xref: /openbmc/linux/drivers/clk/clk.c (revision 94c7b6fc)
1 /*
2  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * Standard functionality for the common clock API.  See Documentation/clk.txt
10  */
11 
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
22 #include <linux/sched.h>
23 
24 #include "clk.h"
25 
26 static DEFINE_SPINLOCK(enable_lock);
27 static DEFINE_MUTEX(prepare_lock);
28 
29 static struct task_struct *prepare_owner;
30 static struct task_struct *enable_owner;
31 
32 static int prepare_refcnt;
33 static int enable_refcnt;
34 
35 static HLIST_HEAD(clk_root_list);
36 static HLIST_HEAD(clk_orphan_list);
37 static LIST_HEAD(clk_notifier_list);
38 
39 /***           locking             ***/
40 static void clk_prepare_lock(void)
41 {
42 	if (!mutex_trylock(&prepare_lock)) {
43 		if (prepare_owner == current) {
44 			prepare_refcnt++;
45 			return;
46 		}
47 		mutex_lock(&prepare_lock);
48 	}
49 	WARN_ON_ONCE(prepare_owner != NULL);
50 	WARN_ON_ONCE(prepare_refcnt != 0);
51 	prepare_owner = current;
52 	prepare_refcnt = 1;
53 }
54 
55 static void clk_prepare_unlock(void)
56 {
57 	WARN_ON_ONCE(prepare_owner != current);
58 	WARN_ON_ONCE(prepare_refcnt == 0);
59 
60 	if (--prepare_refcnt)
61 		return;
62 	prepare_owner = NULL;
63 	mutex_unlock(&prepare_lock);
64 }
65 
66 static unsigned long clk_enable_lock(void)
67 {
68 	unsigned long flags;
69 
70 	if (!spin_trylock_irqsave(&enable_lock, flags)) {
71 		if (enable_owner == current) {
72 			enable_refcnt++;
73 			return flags;
74 		}
75 		spin_lock_irqsave(&enable_lock, flags);
76 	}
77 	WARN_ON_ONCE(enable_owner != NULL);
78 	WARN_ON_ONCE(enable_refcnt != 0);
79 	enable_owner = current;
80 	enable_refcnt = 1;
81 	return flags;
82 }
83 
84 static void clk_enable_unlock(unsigned long flags)
85 {
86 	WARN_ON_ONCE(enable_owner != current);
87 	WARN_ON_ONCE(enable_refcnt == 0);
88 
89 	if (--enable_refcnt)
90 		return;
91 	enable_owner = NULL;
92 	spin_unlock_irqrestore(&enable_lock, flags);
93 }
94 
95 /***        debugfs support        ***/
96 
97 #ifdef CONFIG_DEBUG_FS
98 #include <linux/debugfs.h>
99 
100 static struct dentry *rootdir;
101 static struct dentry *orphandir;
102 static int inited = 0;
103 
104 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
105 {
106 	if (!c)
107 		return;
108 
109 	seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu\n",
110 		   level * 3 + 1, "",
111 		   30 - level * 3, c->name,
112 		   c->enable_count, c->prepare_count, clk_get_rate(c),
113 		   clk_get_accuracy(c));
114 }
115 
116 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
117 				     int level)
118 {
119 	struct clk *child;
120 
121 	if (!c)
122 		return;
123 
124 	clk_summary_show_one(s, c, level);
125 
126 	hlist_for_each_entry(child, &c->children, child_node)
127 		clk_summary_show_subtree(s, child, level + 1);
128 }
129 
130 static int clk_summary_show(struct seq_file *s, void *data)
131 {
132 	struct clk *c;
133 
134 	seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy\n");
135 	seq_puts(s, "--------------------------------------------------------------------------------\n");
136 
137 	clk_prepare_lock();
138 
139 	hlist_for_each_entry(c, &clk_root_list, child_node)
140 		clk_summary_show_subtree(s, c, 0);
141 
142 	hlist_for_each_entry(c, &clk_orphan_list, child_node)
143 		clk_summary_show_subtree(s, c, 0);
144 
145 	clk_prepare_unlock();
146 
147 	return 0;
148 }
149 
150 
151 static int clk_summary_open(struct inode *inode, struct file *file)
152 {
153 	return single_open(file, clk_summary_show, inode->i_private);
154 }
155 
156 static const struct file_operations clk_summary_fops = {
157 	.open		= clk_summary_open,
158 	.read		= seq_read,
159 	.llseek		= seq_lseek,
160 	.release	= single_release,
161 };
162 
163 static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
164 {
165 	if (!c)
166 		return;
167 
168 	seq_printf(s, "\"%s\": { ", c->name);
169 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
170 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
171 	seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
172 	seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
173 }
174 
175 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
176 {
177 	struct clk *child;
178 
179 	if (!c)
180 		return;
181 
182 	clk_dump_one(s, c, level);
183 
184 	hlist_for_each_entry(child, &c->children, child_node) {
185 		seq_printf(s, ",");
186 		clk_dump_subtree(s, child, level + 1);
187 	}
188 
189 	seq_printf(s, "}");
190 }
191 
192 static int clk_dump(struct seq_file *s, void *data)
193 {
194 	struct clk *c;
195 	bool first_node = true;
196 
197 	seq_printf(s, "{");
198 
199 	clk_prepare_lock();
200 
201 	hlist_for_each_entry(c, &clk_root_list, child_node) {
202 		if (!first_node)
203 			seq_printf(s, ",");
204 		first_node = false;
205 		clk_dump_subtree(s, c, 0);
206 	}
207 
208 	hlist_for_each_entry(c, &clk_orphan_list, child_node) {
209 		seq_printf(s, ",");
210 		clk_dump_subtree(s, c, 0);
211 	}
212 
213 	clk_prepare_unlock();
214 
215 	seq_printf(s, "}");
216 	return 0;
217 }
218 
219 
220 static int clk_dump_open(struct inode *inode, struct file *file)
221 {
222 	return single_open(file, clk_dump, inode->i_private);
223 }
224 
225 static const struct file_operations clk_dump_fops = {
226 	.open		= clk_dump_open,
227 	.read		= seq_read,
228 	.llseek		= seq_lseek,
229 	.release	= single_release,
230 };
231 
232 /* caller must hold prepare_lock */
233 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
234 {
235 	struct dentry *d;
236 	int ret = -ENOMEM;
237 
238 	if (!clk || !pdentry) {
239 		ret = -EINVAL;
240 		goto out;
241 	}
242 
243 	d = debugfs_create_dir(clk->name, pdentry);
244 	if (!d)
245 		goto out;
246 
247 	clk->dentry = d;
248 
249 	d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
250 			(u32 *)&clk->rate);
251 	if (!d)
252 		goto err_out;
253 
254 	d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
255 			(u32 *)&clk->accuracy);
256 	if (!d)
257 		goto err_out;
258 
259 	d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
260 			(u32 *)&clk->flags);
261 	if (!d)
262 		goto err_out;
263 
264 	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
265 			(u32 *)&clk->prepare_count);
266 	if (!d)
267 		goto err_out;
268 
269 	d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
270 			(u32 *)&clk->enable_count);
271 	if (!d)
272 		goto err_out;
273 
274 	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
275 			(u32 *)&clk->notifier_count);
276 	if (!d)
277 		goto err_out;
278 
279 	if (clk->ops->debug_init)
280 		if (clk->ops->debug_init(clk->hw, clk->dentry))
281 			goto err_out;
282 
283 	ret = 0;
284 	goto out;
285 
286 err_out:
287 	debugfs_remove_recursive(clk->dentry);
288 	clk->dentry = NULL;
289 out:
290 	return ret;
291 }
292 
293 /* caller must hold prepare_lock */
294 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
295 {
296 	struct clk *child;
297 	int ret = -EINVAL;;
298 
299 	if (!clk || !pdentry)
300 		goto out;
301 
302 	ret = clk_debug_create_one(clk, pdentry);
303 
304 	if (ret)
305 		goto out;
306 
307 	hlist_for_each_entry(child, &clk->children, child_node)
308 		clk_debug_create_subtree(child, clk->dentry);
309 
310 	ret = 0;
311 out:
312 	return ret;
313 }
314 
315 /**
316  * clk_debug_register - add a clk node to the debugfs clk tree
317  * @clk: the clk being added to the debugfs clk tree
318  *
319  * Dynamically adds a clk to the debugfs clk tree if debugfs has been
320  * initialized.  Otherwise it bails out early since the debugfs clk tree
321  * will be created lazily by clk_debug_init as part of a late_initcall.
322  *
323  * Caller must hold prepare_lock.  Only clk_init calls this function (so
324  * far) so this is taken care.
325  */
326 static int clk_debug_register(struct clk *clk)
327 {
328 	struct clk *parent;
329 	struct dentry *pdentry;
330 	int ret = 0;
331 
332 	if (!inited)
333 		goto out;
334 
335 	parent = clk->parent;
336 
337 	/*
338 	 * Check to see if a clk is a root clk.  Also check that it is
339 	 * safe to add this clk to debugfs
340 	 */
341 	if (!parent)
342 		if (clk->flags & CLK_IS_ROOT)
343 			pdentry = rootdir;
344 		else
345 			pdentry = orphandir;
346 	else
347 		if (parent->dentry)
348 			pdentry = parent->dentry;
349 		else
350 			goto out;
351 
352 	ret = clk_debug_create_subtree(clk, pdentry);
353 
354 out:
355 	return ret;
356 }
357 
358  /**
359  * clk_debug_unregister - remove a clk node from the debugfs clk tree
360  * @clk: the clk being removed from the debugfs clk tree
361  *
362  * Dynamically removes a clk and all it's children clk nodes from the
363  * debugfs clk tree if clk->dentry points to debugfs created by
364  * clk_debug_register in __clk_init.
365  *
366  * Caller must hold prepare_lock.
367  */
368 static void clk_debug_unregister(struct clk *clk)
369 {
370 	debugfs_remove_recursive(clk->dentry);
371 }
372 
373 /**
374  * clk_debug_reparent - reparent clk node in the debugfs clk tree
375  * @clk: the clk being reparented
376  * @new_parent: the new clk parent, may be NULL
377  *
378  * Rename clk entry in the debugfs clk tree if debugfs has been
379  * initialized.  Otherwise it bails out early since the debugfs clk tree
380  * will be created lazily by clk_debug_init as part of a late_initcall.
381  *
382  * Caller must hold prepare_lock.
383  */
384 static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
385 {
386 	struct dentry *d;
387 	struct dentry *new_parent_d;
388 
389 	if (!inited)
390 		return;
391 
392 	if (new_parent)
393 		new_parent_d = new_parent->dentry;
394 	else
395 		new_parent_d = orphandir;
396 
397 	d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
398 			new_parent_d, clk->name);
399 	if (d)
400 		clk->dentry = d;
401 	else
402 		pr_debug("%s: failed to rename debugfs entry for %s\n",
403 				__func__, clk->name);
404 }
405 
406 /**
407  * clk_debug_init - lazily create the debugfs clk tree visualization
408  *
409  * clks are often initialized very early during boot before memory can
410  * be dynamically allocated and well before debugfs is setup.
411  * clk_debug_init walks the clk tree hierarchy while holding
412  * prepare_lock and creates the topology as part of a late_initcall,
413  * thus insuring that clks initialized very early will still be
414  * represented in the debugfs clk tree.  This function should only be
415  * called once at boot-time, and all other clks added dynamically will
416  * be done so with clk_debug_register.
417  */
418 static int __init clk_debug_init(void)
419 {
420 	struct clk *clk;
421 	struct dentry *d;
422 
423 	rootdir = debugfs_create_dir("clk", NULL);
424 
425 	if (!rootdir)
426 		return -ENOMEM;
427 
428 	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
429 				&clk_summary_fops);
430 	if (!d)
431 		return -ENOMEM;
432 
433 	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
434 				&clk_dump_fops);
435 	if (!d)
436 		return -ENOMEM;
437 
438 	orphandir = debugfs_create_dir("orphans", rootdir);
439 
440 	if (!orphandir)
441 		return -ENOMEM;
442 
443 	clk_prepare_lock();
444 
445 	hlist_for_each_entry(clk, &clk_root_list, child_node)
446 		clk_debug_create_subtree(clk, rootdir);
447 
448 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
449 		clk_debug_create_subtree(clk, orphandir);
450 
451 	inited = 1;
452 
453 	clk_prepare_unlock();
454 
455 	return 0;
456 }
457 late_initcall(clk_debug_init);
458 #else
459 static inline int clk_debug_register(struct clk *clk) { return 0; }
460 static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
461 {
462 }
463 static inline void clk_debug_unregister(struct clk *clk)
464 {
465 }
466 #endif
467 
468 /* caller must hold prepare_lock */
469 static void clk_unprepare_unused_subtree(struct clk *clk)
470 {
471 	struct clk *child;
472 
473 	if (!clk)
474 		return;
475 
476 	hlist_for_each_entry(child, &clk->children, child_node)
477 		clk_unprepare_unused_subtree(child);
478 
479 	if (clk->prepare_count)
480 		return;
481 
482 	if (clk->flags & CLK_IGNORE_UNUSED)
483 		return;
484 
485 	if (__clk_is_prepared(clk)) {
486 		if (clk->ops->unprepare_unused)
487 			clk->ops->unprepare_unused(clk->hw);
488 		else if (clk->ops->unprepare)
489 			clk->ops->unprepare(clk->hw);
490 	}
491 }
492 
493 /* caller must hold prepare_lock */
494 static void clk_disable_unused_subtree(struct clk *clk)
495 {
496 	struct clk *child;
497 	unsigned long flags;
498 
499 	if (!clk)
500 		goto out;
501 
502 	hlist_for_each_entry(child, &clk->children, child_node)
503 		clk_disable_unused_subtree(child);
504 
505 	flags = clk_enable_lock();
506 
507 	if (clk->enable_count)
508 		goto unlock_out;
509 
510 	if (clk->flags & CLK_IGNORE_UNUSED)
511 		goto unlock_out;
512 
513 	/*
514 	 * some gate clocks have special needs during the disable-unused
515 	 * sequence.  call .disable_unused if available, otherwise fall
516 	 * back to .disable
517 	 */
518 	if (__clk_is_enabled(clk)) {
519 		if (clk->ops->disable_unused)
520 			clk->ops->disable_unused(clk->hw);
521 		else if (clk->ops->disable)
522 			clk->ops->disable(clk->hw);
523 	}
524 
525 unlock_out:
526 	clk_enable_unlock(flags);
527 
528 out:
529 	return;
530 }
531 
532 static bool clk_ignore_unused;
533 static int __init clk_ignore_unused_setup(char *__unused)
534 {
535 	clk_ignore_unused = true;
536 	return 1;
537 }
538 __setup("clk_ignore_unused", clk_ignore_unused_setup);
539 
540 static int clk_disable_unused(void)
541 {
542 	struct clk *clk;
543 
544 	if (clk_ignore_unused) {
545 		pr_warn("clk: Not disabling unused clocks\n");
546 		return 0;
547 	}
548 
549 	clk_prepare_lock();
550 
551 	hlist_for_each_entry(clk, &clk_root_list, child_node)
552 		clk_disable_unused_subtree(clk);
553 
554 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
555 		clk_disable_unused_subtree(clk);
556 
557 	hlist_for_each_entry(clk, &clk_root_list, child_node)
558 		clk_unprepare_unused_subtree(clk);
559 
560 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
561 		clk_unprepare_unused_subtree(clk);
562 
563 	clk_prepare_unlock();
564 
565 	return 0;
566 }
567 late_initcall_sync(clk_disable_unused);
568 
569 /***    helper functions   ***/
570 
571 const char *__clk_get_name(struct clk *clk)
572 {
573 	return !clk ? NULL : clk->name;
574 }
575 EXPORT_SYMBOL_GPL(__clk_get_name);
576 
577 struct clk_hw *__clk_get_hw(struct clk *clk)
578 {
579 	return !clk ? NULL : clk->hw;
580 }
581 EXPORT_SYMBOL_GPL(__clk_get_hw);
582 
583 u8 __clk_get_num_parents(struct clk *clk)
584 {
585 	return !clk ? 0 : clk->num_parents;
586 }
587 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
588 
589 struct clk *__clk_get_parent(struct clk *clk)
590 {
591 	return !clk ? NULL : clk->parent;
592 }
593 EXPORT_SYMBOL_GPL(__clk_get_parent);
594 
595 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
596 {
597 	if (!clk || index >= clk->num_parents)
598 		return NULL;
599 	else if (!clk->parents)
600 		return __clk_lookup(clk->parent_names[index]);
601 	else if (!clk->parents[index])
602 		return clk->parents[index] =
603 			__clk_lookup(clk->parent_names[index]);
604 	else
605 		return clk->parents[index];
606 }
607 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
608 
609 unsigned int __clk_get_enable_count(struct clk *clk)
610 {
611 	return !clk ? 0 : clk->enable_count;
612 }
613 
614 unsigned int __clk_get_prepare_count(struct clk *clk)
615 {
616 	return !clk ? 0 : clk->prepare_count;
617 }
618 
619 unsigned long __clk_get_rate(struct clk *clk)
620 {
621 	unsigned long ret;
622 
623 	if (!clk) {
624 		ret = 0;
625 		goto out;
626 	}
627 
628 	ret = clk->rate;
629 
630 	if (clk->flags & CLK_IS_ROOT)
631 		goto out;
632 
633 	if (!clk->parent)
634 		ret = 0;
635 
636 out:
637 	return ret;
638 }
639 EXPORT_SYMBOL_GPL(__clk_get_rate);
640 
641 unsigned long __clk_get_accuracy(struct clk *clk)
642 {
643 	if (!clk)
644 		return 0;
645 
646 	return clk->accuracy;
647 }
648 
649 unsigned long __clk_get_flags(struct clk *clk)
650 {
651 	return !clk ? 0 : clk->flags;
652 }
653 EXPORT_SYMBOL_GPL(__clk_get_flags);
654 
655 bool __clk_is_prepared(struct clk *clk)
656 {
657 	int ret;
658 
659 	if (!clk)
660 		return false;
661 
662 	/*
663 	 * .is_prepared is optional for clocks that can prepare
664 	 * fall back to software usage counter if it is missing
665 	 */
666 	if (!clk->ops->is_prepared) {
667 		ret = clk->prepare_count ? 1 : 0;
668 		goto out;
669 	}
670 
671 	ret = clk->ops->is_prepared(clk->hw);
672 out:
673 	return !!ret;
674 }
675 
676 bool __clk_is_enabled(struct clk *clk)
677 {
678 	int ret;
679 
680 	if (!clk)
681 		return false;
682 
683 	/*
684 	 * .is_enabled is only mandatory for clocks that gate
685 	 * fall back to software usage counter if .is_enabled is missing
686 	 */
687 	if (!clk->ops->is_enabled) {
688 		ret = clk->enable_count ? 1 : 0;
689 		goto out;
690 	}
691 
692 	ret = clk->ops->is_enabled(clk->hw);
693 out:
694 	return !!ret;
695 }
696 EXPORT_SYMBOL_GPL(__clk_is_enabled);
697 
698 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
699 {
700 	struct clk *child;
701 	struct clk *ret;
702 
703 	if (!strcmp(clk->name, name))
704 		return clk;
705 
706 	hlist_for_each_entry(child, &clk->children, child_node) {
707 		ret = __clk_lookup_subtree(name, child);
708 		if (ret)
709 			return ret;
710 	}
711 
712 	return NULL;
713 }
714 
715 struct clk *__clk_lookup(const char *name)
716 {
717 	struct clk *root_clk;
718 	struct clk *ret;
719 
720 	if (!name)
721 		return NULL;
722 
723 	/* search the 'proper' clk tree first */
724 	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
725 		ret = __clk_lookup_subtree(name, root_clk);
726 		if (ret)
727 			return ret;
728 	}
729 
730 	/* if not found, then search the orphan tree */
731 	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
732 		ret = __clk_lookup_subtree(name, root_clk);
733 		if (ret)
734 			return ret;
735 	}
736 
737 	return NULL;
738 }
739 
740 /*
741  * Helper for finding best parent to provide a given frequency. This can be used
742  * directly as a determine_rate callback (e.g. for a mux), or from a more
743  * complex clock that may combine a mux with other operations.
744  */
745 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
746 			      unsigned long *best_parent_rate,
747 			      struct clk **best_parent_p)
748 {
749 	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
750 	int i, num_parents;
751 	unsigned long parent_rate, best = 0;
752 
753 	/* if NO_REPARENT flag set, pass through to current parent */
754 	if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
755 		parent = clk->parent;
756 		if (clk->flags & CLK_SET_RATE_PARENT)
757 			best = __clk_round_rate(parent, rate);
758 		else if (parent)
759 			best = __clk_get_rate(parent);
760 		else
761 			best = __clk_get_rate(clk);
762 		goto out;
763 	}
764 
765 	/* find the parent that can provide the fastest rate <= rate */
766 	num_parents = clk->num_parents;
767 	for (i = 0; i < num_parents; i++) {
768 		parent = clk_get_parent_by_index(clk, i);
769 		if (!parent)
770 			continue;
771 		if (clk->flags & CLK_SET_RATE_PARENT)
772 			parent_rate = __clk_round_rate(parent, rate);
773 		else
774 			parent_rate = __clk_get_rate(parent);
775 		if (parent_rate <= rate && parent_rate > best) {
776 			best_parent = parent;
777 			best = parent_rate;
778 		}
779 	}
780 
781 out:
782 	if (best_parent)
783 		*best_parent_p = best_parent;
784 	*best_parent_rate = best;
785 
786 	return best;
787 }
788 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
789 
790 /***        clk api        ***/
791 
792 void __clk_unprepare(struct clk *clk)
793 {
794 	if (!clk)
795 		return;
796 
797 	if (WARN_ON(clk->prepare_count == 0))
798 		return;
799 
800 	if (--clk->prepare_count > 0)
801 		return;
802 
803 	WARN_ON(clk->enable_count > 0);
804 
805 	if (clk->ops->unprepare)
806 		clk->ops->unprepare(clk->hw);
807 
808 	__clk_unprepare(clk->parent);
809 }
810 
811 /**
812  * clk_unprepare - undo preparation of a clock source
813  * @clk: the clk being unprepared
814  *
815  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
816  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
817  * if the operation may sleep.  One example is a clk which is accessed over
818  * I2c.  In the complex case a clk gate operation may require a fast and a slow
819  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
820  * exclusive.  In fact clk_disable must be called before clk_unprepare.
821  */
822 void clk_unprepare(struct clk *clk)
823 {
824 	if (IS_ERR_OR_NULL(clk))
825 		return;
826 
827 	clk_prepare_lock();
828 	__clk_unprepare(clk);
829 	clk_prepare_unlock();
830 }
831 EXPORT_SYMBOL_GPL(clk_unprepare);
832 
833 int __clk_prepare(struct clk *clk)
834 {
835 	int ret = 0;
836 
837 	if (!clk)
838 		return 0;
839 
840 	if (clk->prepare_count == 0) {
841 		ret = __clk_prepare(clk->parent);
842 		if (ret)
843 			return ret;
844 
845 		if (clk->ops->prepare) {
846 			ret = clk->ops->prepare(clk->hw);
847 			if (ret) {
848 				__clk_unprepare(clk->parent);
849 				return ret;
850 			}
851 		}
852 	}
853 
854 	clk->prepare_count++;
855 
856 	return 0;
857 }
858 
859 /**
860  * clk_prepare - prepare a clock source
861  * @clk: the clk being prepared
862  *
863  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
864  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
865  * operation may sleep.  One example is a clk which is accessed over I2c.  In
866  * the complex case a clk ungate operation may require a fast and a slow part.
867  * It is this reason that clk_prepare and clk_enable are not mutually
868  * exclusive.  In fact clk_prepare must be called before clk_enable.
869  * Returns 0 on success, -EERROR otherwise.
870  */
871 int clk_prepare(struct clk *clk)
872 {
873 	int ret;
874 
875 	clk_prepare_lock();
876 	ret = __clk_prepare(clk);
877 	clk_prepare_unlock();
878 
879 	return ret;
880 }
881 EXPORT_SYMBOL_GPL(clk_prepare);
882 
883 static void __clk_disable(struct clk *clk)
884 {
885 	if (!clk)
886 		return;
887 
888 	if (WARN_ON(clk->enable_count == 0))
889 		return;
890 
891 	if (--clk->enable_count > 0)
892 		return;
893 
894 	if (clk->ops->disable)
895 		clk->ops->disable(clk->hw);
896 
897 	__clk_disable(clk->parent);
898 }
899 
900 /**
901  * clk_disable - gate a clock
902  * @clk: the clk being gated
903  *
904  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
905  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
906  * clk if the operation is fast and will never sleep.  One example is a
907  * SoC-internal clk which is controlled via simple register writes.  In the
908  * complex case a clk gate operation may require a fast and a slow part.  It is
909  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
910  * In fact clk_disable must be called before clk_unprepare.
911  */
912 void clk_disable(struct clk *clk)
913 {
914 	unsigned long flags;
915 
916 	if (IS_ERR_OR_NULL(clk))
917 		return;
918 
919 	flags = clk_enable_lock();
920 	__clk_disable(clk);
921 	clk_enable_unlock(flags);
922 }
923 EXPORT_SYMBOL_GPL(clk_disable);
924 
925 static int __clk_enable(struct clk *clk)
926 {
927 	int ret = 0;
928 
929 	if (!clk)
930 		return 0;
931 
932 	if (WARN_ON(clk->prepare_count == 0))
933 		return -ESHUTDOWN;
934 
935 	if (clk->enable_count == 0) {
936 		ret = __clk_enable(clk->parent);
937 
938 		if (ret)
939 			return ret;
940 
941 		if (clk->ops->enable) {
942 			ret = clk->ops->enable(clk->hw);
943 			if (ret) {
944 				__clk_disable(clk->parent);
945 				return ret;
946 			}
947 		}
948 	}
949 
950 	clk->enable_count++;
951 	return 0;
952 }
953 
954 /**
955  * clk_enable - ungate a clock
956  * @clk: the clk being ungated
957  *
958  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
959  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
960  * if the operation will never sleep.  One example is a SoC-internal clk which
961  * is controlled via simple register writes.  In the complex case a clk ungate
962  * operation may require a fast and a slow part.  It is this reason that
963  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
964  * must be called before clk_enable.  Returns 0 on success, -EERROR
965  * otherwise.
966  */
967 int clk_enable(struct clk *clk)
968 {
969 	unsigned long flags;
970 	int ret;
971 
972 	flags = clk_enable_lock();
973 	ret = __clk_enable(clk);
974 	clk_enable_unlock(flags);
975 
976 	return ret;
977 }
978 EXPORT_SYMBOL_GPL(clk_enable);
979 
980 /**
981  * __clk_round_rate - round the given rate for a clk
982  * @clk: round the rate of this clock
983  * @rate: the rate which is to be rounded
984  *
985  * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
986  */
987 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
988 {
989 	unsigned long parent_rate = 0;
990 	struct clk *parent;
991 
992 	if (!clk)
993 		return 0;
994 
995 	parent = clk->parent;
996 	if (parent)
997 		parent_rate = parent->rate;
998 
999 	if (clk->ops->determine_rate)
1000 		return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
1001 						&parent);
1002 	else if (clk->ops->round_rate)
1003 		return clk->ops->round_rate(clk->hw, rate, &parent_rate);
1004 	else if (clk->flags & CLK_SET_RATE_PARENT)
1005 		return __clk_round_rate(clk->parent, rate);
1006 	else
1007 		return clk->rate;
1008 }
1009 EXPORT_SYMBOL_GPL(__clk_round_rate);
1010 
1011 /**
1012  * clk_round_rate - round the given rate for a clk
1013  * @clk: the clk for which we are rounding a rate
1014  * @rate: the rate which is to be rounded
1015  *
1016  * Takes in a rate as input and rounds it to a rate that the clk can actually
1017  * use which is then returned.  If clk doesn't support round_rate operation
1018  * then the parent rate is returned.
1019  */
1020 long clk_round_rate(struct clk *clk, unsigned long rate)
1021 {
1022 	unsigned long ret;
1023 
1024 	clk_prepare_lock();
1025 	ret = __clk_round_rate(clk, rate);
1026 	clk_prepare_unlock();
1027 
1028 	return ret;
1029 }
1030 EXPORT_SYMBOL_GPL(clk_round_rate);
1031 
1032 /**
1033  * __clk_notify - call clk notifier chain
1034  * @clk: struct clk * that is changing rate
1035  * @msg: clk notifier type (see include/linux/clk.h)
1036  * @old_rate: old clk rate
1037  * @new_rate: new clk rate
1038  *
1039  * Triggers a notifier call chain on the clk rate-change notification
1040  * for 'clk'.  Passes a pointer to the struct clk and the previous
1041  * and current rates to the notifier callback.  Intended to be called by
1042  * internal clock code only.  Returns NOTIFY_DONE from the last driver
1043  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1044  * a driver returns that.
1045  */
1046 static int __clk_notify(struct clk *clk, unsigned long msg,
1047 		unsigned long old_rate, unsigned long new_rate)
1048 {
1049 	struct clk_notifier *cn;
1050 	struct clk_notifier_data cnd;
1051 	int ret = NOTIFY_DONE;
1052 
1053 	cnd.clk = clk;
1054 	cnd.old_rate = old_rate;
1055 	cnd.new_rate = new_rate;
1056 
1057 	list_for_each_entry(cn, &clk_notifier_list, node) {
1058 		if (cn->clk == clk) {
1059 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1060 					&cnd);
1061 			break;
1062 		}
1063 	}
1064 
1065 	return ret;
1066 }
1067 
1068 /**
1069  * __clk_recalc_accuracies
1070  * @clk: first clk in the subtree
1071  *
1072  * Walks the subtree of clks starting with clk and recalculates accuracies as
1073  * it goes.  Note that if a clk does not implement the .recalc_accuracy
1074  * callback then it is assumed that the clock will take on the accuracy of it's
1075  * parent.
1076  *
1077  * Caller must hold prepare_lock.
1078  */
1079 static void __clk_recalc_accuracies(struct clk *clk)
1080 {
1081 	unsigned long parent_accuracy = 0;
1082 	struct clk *child;
1083 
1084 	if (clk->parent)
1085 		parent_accuracy = clk->parent->accuracy;
1086 
1087 	if (clk->ops->recalc_accuracy)
1088 		clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1089 							  parent_accuracy);
1090 	else
1091 		clk->accuracy = parent_accuracy;
1092 
1093 	hlist_for_each_entry(child, &clk->children, child_node)
1094 		__clk_recalc_accuracies(child);
1095 }
1096 
1097 /**
1098  * clk_get_accuracy - return the accuracy of clk
1099  * @clk: the clk whose accuracy is being returned
1100  *
1101  * Simply returns the cached accuracy of the clk, unless
1102  * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1103  * issued.
1104  * If clk is NULL then returns 0.
1105  */
1106 long clk_get_accuracy(struct clk *clk)
1107 {
1108 	unsigned long accuracy;
1109 
1110 	clk_prepare_lock();
1111 	if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1112 		__clk_recalc_accuracies(clk);
1113 
1114 	accuracy = __clk_get_accuracy(clk);
1115 	clk_prepare_unlock();
1116 
1117 	return accuracy;
1118 }
1119 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1120 
1121 static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
1122 {
1123 	if (clk->ops->recalc_rate)
1124 		return clk->ops->recalc_rate(clk->hw, parent_rate);
1125 	return parent_rate;
1126 }
1127 
1128 /**
1129  * __clk_recalc_rates
1130  * @clk: first clk in the subtree
1131  * @msg: notification type (see include/linux/clk.h)
1132  *
1133  * Walks the subtree of clks starting with clk and recalculates rates as it
1134  * goes.  Note that if a clk does not implement the .recalc_rate callback then
1135  * it is assumed that the clock will take on the rate of its parent.
1136  *
1137  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1138  * if necessary.
1139  *
1140  * Caller must hold prepare_lock.
1141  */
1142 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1143 {
1144 	unsigned long old_rate;
1145 	unsigned long parent_rate = 0;
1146 	struct clk *child;
1147 
1148 	old_rate = clk->rate;
1149 
1150 	if (clk->parent)
1151 		parent_rate = clk->parent->rate;
1152 
1153 	clk->rate = clk_recalc(clk, parent_rate);
1154 
1155 	/*
1156 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1157 	 * & ABORT_RATE_CHANGE notifiers
1158 	 */
1159 	if (clk->notifier_count && msg)
1160 		__clk_notify(clk, msg, old_rate, clk->rate);
1161 
1162 	hlist_for_each_entry(child, &clk->children, child_node)
1163 		__clk_recalc_rates(child, msg);
1164 }
1165 
1166 /**
1167  * clk_get_rate - return the rate of clk
1168  * @clk: the clk whose rate is being returned
1169  *
1170  * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1171  * is set, which means a recalc_rate will be issued.
1172  * If clk is NULL then returns 0.
1173  */
1174 unsigned long clk_get_rate(struct clk *clk)
1175 {
1176 	unsigned long rate;
1177 
1178 	clk_prepare_lock();
1179 
1180 	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1181 		__clk_recalc_rates(clk, 0);
1182 
1183 	rate = __clk_get_rate(clk);
1184 	clk_prepare_unlock();
1185 
1186 	return rate;
1187 }
1188 EXPORT_SYMBOL_GPL(clk_get_rate);
1189 
1190 static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1191 {
1192 	int i;
1193 
1194 	if (!clk->parents) {
1195 		clk->parents = kcalloc(clk->num_parents,
1196 					sizeof(struct clk *), GFP_KERNEL);
1197 		if (!clk->parents)
1198 			return -ENOMEM;
1199 	}
1200 
1201 	/*
1202 	 * find index of new parent clock using cached parent ptrs,
1203 	 * or if not yet cached, use string name comparison and cache
1204 	 * them now to avoid future calls to __clk_lookup.
1205 	 */
1206 	for (i = 0; i < clk->num_parents; i++) {
1207 		if (clk->parents[i] == parent)
1208 			return i;
1209 
1210 		if (clk->parents[i])
1211 			continue;
1212 
1213 		if (!strcmp(clk->parent_names[i], parent->name)) {
1214 			clk->parents[i] = __clk_lookup(parent->name);
1215 			return i;
1216 		}
1217 	}
1218 
1219 	return -EINVAL;
1220 }
1221 
1222 static void clk_reparent(struct clk *clk, struct clk *new_parent)
1223 {
1224 	hlist_del(&clk->child_node);
1225 
1226 	if (new_parent) {
1227 		/* avoid duplicate POST_RATE_CHANGE notifications */
1228 		if (new_parent->new_child == clk)
1229 			new_parent->new_child = NULL;
1230 
1231 		hlist_add_head(&clk->child_node, &new_parent->children);
1232 	} else {
1233 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1234 	}
1235 
1236 	clk->parent = new_parent;
1237 }
1238 
1239 static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1240 {
1241 	unsigned long flags;
1242 	struct clk *old_parent = clk->parent;
1243 
1244 	/*
1245 	 * Migrate prepare state between parents and prevent race with
1246 	 * clk_enable().
1247 	 *
1248 	 * If the clock is not prepared, then a race with
1249 	 * clk_enable/disable() is impossible since we already have the
1250 	 * prepare lock (future calls to clk_enable() need to be preceded by
1251 	 * a clk_prepare()).
1252 	 *
1253 	 * If the clock is prepared, migrate the prepared state to the new
1254 	 * parent and also protect against a race with clk_enable() by
1255 	 * forcing the clock and the new parent on.  This ensures that all
1256 	 * future calls to clk_enable() are practically NOPs with respect to
1257 	 * hardware and software states.
1258 	 *
1259 	 * See also: Comment for clk_set_parent() below.
1260 	 */
1261 	if (clk->prepare_count) {
1262 		__clk_prepare(parent);
1263 		clk_enable(parent);
1264 		clk_enable(clk);
1265 	}
1266 
1267 	/* update the clk tree topology */
1268 	flags = clk_enable_lock();
1269 	clk_reparent(clk, parent);
1270 	clk_enable_unlock(flags);
1271 
1272 	return old_parent;
1273 }
1274 
1275 static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
1276 		struct clk *old_parent)
1277 {
1278 	/*
1279 	 * Finish the migration of prepare state and undo the changes done
1280 	 * for preventing a race with clk_enable().
1281 	 */
1282 	if (clk->prepare_count) {
1283 		clk_disable(clk);
1284 		clk_disable(old_parent);
1285 		__clk_unprepare(old_parent);
1286 	}
1287 
1288 	/* update debugfs with new clk tree topology */
1289 	clk_debug_reparent(clk, parent);
1290 }
1291 
1292 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1293 {
1294 	unsigned long flags;
1295 	int ret = 0;
1296 	struct clk *old_parent;
1297 
1298 	old_parent = __clk_set_parent_before(clk, parent);
1299 
1300 	/* change clock input source */
1301 	if (parent && clk->ops->set_parent)
1302 		ret = clk->ops->set_parent(clk->hw, p_index);
1303 
1304 	if (ret) {
1305 		flags = clk_enable_lock();
1306 		clk_reparent(clk, old_parent);
1307 		clk_enable_unlock(flags);
1308 
1309 		if (clk->prepare_count) {
1310 			clk_disable(clk);
1311 			clk_disable(parent);
1312 			__clk_unprepare(parent);
1313 		}
1314 		return ret;
1315 	}
1316 
1317 	__clk_set_parent_after(clk, parent, old_parent);
1318 
1319 	return 0;
1320 }
1321 
1322 /**
1323  * __clk_speculate_rates
1324  * @clk: first clk in the subtree
1325  * @parent_rate: the "future" rate of clk's parent
1326  *
1327  * Walks the subtree of clks starting with clk, speculating rates as it
1328  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1329  *
1330  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1331  * pre-rate change notifications and returns early if no clks in the
1332  * subtree have subscribed to the notifications.  Note that if a clk does not
1333  * implement the .recalc_rate callback then it is assumed that the clock will
1334  * take on the rate of its parent.
1335  *
1336  * Caller must hold prepare_lock.
1337  */
1338 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1339 {
1340 	struct clk *child;
1341 	unsigned long new_rate;
1342 	int ret = NOTIFY_DONE;
1343 
1344 	new_rate = clk_recalc(clk, parent_rate);
1345 
1346 	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1347 	if (clk->notifier_count)
1348 		ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1349 
1350 	if (ret & NOTIFY_STOP_MASK) {
1351 		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1352 				__func__, clk->name, ret);
1353 		goto out;
1354 	}
1355 
1356 	hlist_for_each_entry(child, &clk->children, child_node) {
1357 		ret = __clk_speculate_rates(child, new_rate);
1358 		if (ret & NOTIFY_STOP_MASK)
1359 			break;
1360 	}
1361 
1362 out:
1363 	return ret;
1364 }
1365 
1366 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1367 			     struct clk *new_parent, u8 p_index)
1368 {
1369 	struct clk *child;
1370 
1371 	clk->new_rate = new_rate;
1372 	clk->new_parent = new_parent;
1373 	clk->new_parent_index = p_index;
1374 	/* include clk in new parent's PRE_RATE_CHANGE notifications */
1375 	clk->new_child = NULL;
1376 	if (new_parent && new_parent != clk->parent)
1377 		new_parent->new_child = clk;
1378 
1379 	hlist_for_each_entry(child, &clk->children, child_node) {
1380 		child->new_rate = clk_recalc(child, new_rate);
1381 		clk_calc_subtree(child, child->new_rate, NULL, 0);
1382 	}
1383 }
1384 
1385 /*
1386  * calculate the new rates returning the topmost clock that has to be
1387  * changed.
1388  */
1389 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1390 {
1391 	struct clk *top = clk;
1392 	struct clk *old_parent, *parent;
1393 	unsigned long best_parent_rate = 0;
1394 	unsigned long new_rate;
1395 	int p_index = 0;
1396 
1397 	/* sanity */
1398 	if (IS_ERR_OR_NULL(clk))
1399 		return NULL;
1400 
1401 	/* save parent rate, if it exists */
1402 	parent = old_parent = clk->parent;
1403 	if (parent)
1404 		best_parent_rate = parent->rate;
1405 
1406 	/* find the closest rate and parent clk/rate */
1407 	if (clk->ops->determine_rate) {
1408 		new_rate = clk->ops->determine_rate(clk->hw, rate,
1409 						    &best_parent_rate,
1410 						    &parent);
1411 	} else if (clk->ops->round_rate) {
1412 		new_rate = clk->ops->round_rate(clk->hw, rate,
1413 						&best_parent_rate);
1414 	} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1415 		/* pass-through clock without adjustable parent */
1416 		clk->new_rate = clk->rate;
1417 		return NULL;
1418 	} else {
1419 		/* pass-through clock with adjustable parent */
1420 		top = clk_calc_new_rates(parent, rate);
1421 		new_rate = parent->new_rate;
1422 		goto out;
1423 	}
1424 
1425 	/* some clocks must be gated to change parent */
1426 	if (parent != old_parent &&
1427 	    (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1428 		pr_debug("%s: %s not gated but wants to reparent\n",
1429 			 __func__, clk->name);
1430 		return NULL;
1431 	}
1432 
1433 	/* try finding the new parent index */
1434 	if (parent) {
1435 		p_index = clk_fetch_parent_index(clk, parent);
1436 		if (p_index < 0) {
1437 			pr_debug("%s: clk %s can not be parent of clk %s\n",
1438 				 __func__, parent->name, clk->name);
1439 			return NULL;
1440 		}
1441 	}
1442 
1443 	if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
1444 	    best_parent_rate != parent->rate)
1445 		top = clk_calc_new_rates(parent, best_parent_rate);
1446 
1447 out:
1448 	clk_calc_subtree(clk, new_rate, parent, p_index);
1449 
1450 	return top;
1451 }
1452 
1453 /*
1454  * Notify about rate changes in a subtree. Always walk down the whole tree
1455  * so that in case of an error we can walk down the whole tree again and
1456  * abort the change.
1457  */
1458 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1459 {
1460 	struct clk *child, *tmp_clk, *fail_clk = NULL;
1461 	int ret = NOTIFY_DONE;
1462 
1463 	if (clk->rate == clk->new_rate)
1464 		return NULL;
1465 
1466 	if (clk->notifier_count) {
1467 		ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
1468 		if (ret & NOTIFY_STOP_MASK)
1469 			fail_clk = clk;
1470 	}
1471 
1472 	hlist_for_each_entry(child, &clk->children, child_node) {
1473 		/* Skip children who will be reparented to another clock */
1474 		if (child->new_parent && child->new_parent != clk)
1475 			continue;
1476 		tmp_clk = clk_propagate_rate_change(child, event);
1477 		if (tmp_clk)
1478 			fail_clk = tmp_clk;
1479 	}
1480 
1481 	/* handle the new child who might not be in clk->children yet */
1482 	if (clk->new_child) {
1483 		tmp_clk = clk_propagate_rate_change(clk->new_child, event);
1484 		if (tmp_clk)
1485 			fail_clk = tmp_clk;
1486 	}
1487 
1488 	return fail_clk;
1489 }
1490 
1491 /*
1492  * walk down a subtree and set the new rates notifying the rate
1493  * change on the way
1494  */
1495 static void clk_change_rate(struct clk *clk)
1496 {
1497 	struct clk *child;
1498 	unsigned long old_rate;
1499 	unsigned long best_parent_rate = 0;
1500 	bool skip_set_rate = false;
1501 	struct clk *old_parent;
1502 
1503 	old_rate = clk->rate;
1504 
1505 	if (clk->new_parent)
1506 		best_parent_rate = clk->new_parent->rate;
1507 	else if (clk->parent)
1508 		best_parent_rate = clk->parent->rate;
1509 
1510 	if (clk->new_parent && clk->new_parent != clk->parent) {
1511 		old_parent = __clk_set_parent_before(clk, clk->new_parent);
1512 
1513 		if (clk->ops->set_rate_and_parent) {
1514 			skip_set_rate = true;
1515 			clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
1516 					best_parent_rate,
1517 					clk->new_parent_index);
1518 		} else if (clk->ops->set_parent) {
1519 			clk->ops->set_parent(clk->hw, clk->new_parent_index);
1520 		}
1521 
1522 		__clk_set_parent_after(clk, clk->new_parent, old_parent);
1523 	}
1524 
1525 	if (!skip_set_rate && clk->ops->set_rate)
1526 		clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
1527 
1528 	clk->rate = clk_recalc(clk, best_parent_rate);
1529 
1530 	if (clk->notifier_count && old_rate != clk->rate)
1531 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1532 
1533 	hlist_for_each_entry(child, &clk->children, child_node) {
1534 		/* Skip children who will be reparented to another clock */
1535 		if (child->new_parent && child->new_parent != clk)
1536 			continue;
1537 		clk_change_rate(child);
1538 	}
1539 
1540 	/* handle the new child who might not be in clk->children yet */
1541 	if (clk->new_child)
1542 		clk_change_rate(clk->new_child);
1543 }
1544 
1545 /**
1546  * clk_set_rate - specify a new rate for clk
1547  * @clk: the clk whose rate is being changed
1548  * @rate: the new rate for clk
1549  *
1550  * In the simplest case clk_set_rate will only adjust the rate of clk.
1551  *
1552  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1553  * propagate up to clk's parent; whether or not this happens depends on the
1554  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
1555  * after calling .round_rate then upstream parent propagation is ignored.  If
1556  * *parent_rate comes back with a new rate for clk's parent then we propagate
1557  * up to clk's parent and set its rate.  Upward propagation will continue
1558  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1559  * .round_rate stops requesting changes to clk's parent_rate.
1560  *
1561  * Rate changes are accomplished via tree traversal that also recalculates the
1562  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1563  *
1564  * Returns 0 on success, -EERROR otherwise.
1565  */
1566 int clk_set_rate(struct clk *clk, unsigned long rate)
1567 {
1568 	struct clk *top, *fail_clk;
1569 	int ret = 0;
1570 
1571 	if (!clk)
1572 		return 0;
1573 
1574 	/* prevent racing with updates to the clock topology */
1575 	clk_prepare_lock();
1576 
1577 	/* bail early if nothing to do */
1578 	if (rate == clk_get_rate(clk))
1579 		goto out;
1580 
1581 	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
1582 		ret = -EBUSY;
1583 		goto out;
1584 	}
1585 
1586 	/* calculate new rates and get the topmost changed clock */
1587 	top = clk_calc_new_rates(clk, rate);
1588 	if (!top) {
1589 		ret = -EINVAL;
1590 		goto out;
1591 	}
1592 
1593 	/* notify that we are about to change rates */
1594 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1595 	if (fail_clk) {
1596 		pr_debug("%s: failed to set %s rate\n", __func__,
1597 				fail_clk->name);
1598 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1599 		ret = -EBUSY;
1600 		goto out;
1601 	}
1602 
1603 	/* change the rates */
1604 	clk_change_rate(top);
1605 
1606 out:
1607 	clk_prepare_unlock();
1608 
1609 	return ret;
1610 }
1611 EXPORT_SYMBOL_GPL(clk_set_rate);
1612 
1613 /**
1614  * clk_get_parent - return the parent of a clk
1615  * @clk: the clk whose parent gets returned
1616  *
1617  * Simply returns clk->parent.  Returns NULL if clk is NULL.
1618  */
1619 struct clk *clk_get_parent(struct clk *clk)
1620 {
1621 	struct clk *parent;
1622 
1623 	clk_prepare_lock();
1624 	parent = __clk_get_parent(clk);
1625 	clk_prepare_unlock();
1626 
1627 	return parent;
1628 }
1629 EXPORT_SYMBOL_GPL(clk_get_parent);
1630 
1631 /*
1632  * .get_parent is mandatory for clocks with multiple possible parents.  It is
1633  * optional for single-parent clocks.  Always call .get_parent if it is
1634  * available and WARN if it is missing for multi-parent clocks.
1635  *
1636  * For single-parent clocks without .get_parent, first check to see if the
1637  * .parents array exists, and if so use it to avoid an expensive tree
1638  * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
1639  */
1640 static struct clk *__clk_init_parent(struct clk *clk)
1641 {
1642 	struct clk *ret = NULL;
1643 	u8 index;
1644 
1645 	/* handle the trivial cases */
1646 
1647 	if (!clk->num_parents)
1648 		goto out;
1649 
1650 	if (clk->num_parents == 1) {
1651 		if (IS_ERR_OR_NULL(clk->parent))
1652 			ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1653 		ret = clk->parent;
1654 		goto out;
1655 	}
1656 
1657 	if (!clk->ops->get_parent) {
1658 		WARN(!clk->ops->get_parent,
1659 			"%s: multi-parent clocks must implement .get_parent\n",
1660 			__func__);
1661 		goto out;
1662 	};
1663 
1664 	/*
1665 	 * Do our best to cache parent clocks in clk->parents.  This prevents
1666 	 * unnecessary and expensive calls to __clk_lookup.  We don't set
1667 	 * clk->parent here; that is done by the calling function
1668 	 */
1669 
1670 	index = clk->ops->get_parent(clk->hw);
1671 
1672 	if (!clk->parents)
1673 		clk->parents =
1674 			kcalloc(clk->num_parents, sizeof(struct clk *),
1675 					GFP_KERNEL);
1676 
1677 	ret = clk_get_parent_by_index(clk, index);
1678 
1679 out:
1680 	return ret;
1681 }
1682 
1683 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1684 {
1685 	clk_reparent(clk, new_parent);
1686 	clk_debug_reparent(clk, new_parent);
1687 	__clk_recalc_accuracies(clk);
1688 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
1689 }
1690 
1691 /**
1692  * clk_set_parent - switch the parent of a mux clk
1693  * @clk: the mux clk whose input we are switching
1694  * @parent: the new input to clk
1695  *
1696  * Re-parent clk to use parent as its new input source.  If clk is in
1697  * prepared state, the clk will get enabled for the duration of this call. If
1698  * that's not acceptable for a specific clk (Eg: the consumer can't handle
1699  * that, the reparenting is glitchy in hardware, etc), use the
1700  * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1701  *
1702  * After successfully changing clk's parent clk_set_parent will update the
1703  * clk topology, sysfs topology and propagate rate recalculation via
1704  * __clk_recalc_rates.
1705  *
1706  * Returns 0 on success, -EERROR otherwise.
1707  */
1708 int clk_set_parent(struct clk *clk, struct clk *parent)
1709 {
1710 	int ret = 0;
1711 	int p_index = 0;
1712 	unsigned long p_rate = 0;
1713 
1714 	if (!clk)
1715 		return 0;
1716 
1717 	/* verify ops for for multi-parent clks */
1718 	if ((clk->num_parents > 1) && (!clk->ops->set_parent))
1719 		return -ENOSYS;
1720 
1721 	/* prevent racing with updates to the clock topology */
1722 	clk_prepare_lock();
1723 
1724 	if (clk->parent == parent)
1725 		goto out;
1726 
1727 	/* check that we are allowed to re-parent if the clock is in use */
1728 	if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1729 		ret = -EBUSY;
1730 		goto out;
1731 	}
1732 
1733 	/* try finding the new parent index */
1734 	if (parent) {
1735 		p_index = clk_fetch_parent_index(clk, parent);
1736 		p_rate = parent->rate;
1737 		if (p_index < 0) {
1738 			pr_debug("%s: clk %s can not be parent of clk %s\n",
1739 					__func__, parent->name, clk->name);
1740 			ret = p_index;
1741 			goto out;
1742 		}
1743 	}
1744 
1745 	/* propagate PRE_RATE_CHANGE notifications */
1746 	ret = __clk_speculate_rates(clk, p_rate);
1747 
1748 	/* abort if a driver objects */
1749 	if (ret & NOTIFY_STOP_MASK)
1750 		goto out;
1751 
1752 	/* do the re-parent */
1753 	ret = __clk_set_parent(clk, parent, p_index);
1754 
1755 	/* propagate rate an accuracy recalculation accordingly */
1756 	if (ret) {
1757 		__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1758 	} else {
1759 		__clk_recalc_rates(clk, POST_RATE_CHANGE);
1760 		__clk_recalc_accuracies(clk);
1761 	}
1762 
1763 out:
1764 	clk_prepare_unlock();
1765 
1766 	return ret;
1767 }
1768 EXPORT_SYMBOL_GPL(clk_set_parent);
1769 
1770 /**
1771  * __clk_init - initialize the data structures in a struct clk
1772  * @dev:	device initializing this clk, placeholder for now
1773  * @clk:	clk being initialized
1774  *
1775  * Initializes the lists in struct clk, queries the hardware for the
1776  * parent and rate and sets them both.
1777  */
1778 int __clk_init(struct device *dev, struct clk *clk)
1779 {
1780 	int i, ret = 0;
1781 	struct clk *orphan;
1782 	struct hlist_node *tmp2;
1783 
1784 	if (!clk)
1785 		return -EINVAL;
1786 
1787 	clk_prepare_lock();
1788 
1789 	/* check to see if a clock with this name is already registered */
1790 	if (__clk_lookup(clk->name)) {
1791 		pr_debug("%s: clk %s already initialized\n",
1792 				__func__, clk->name);
1793 		ret = -EEXIST;
1794 		goto out;
1795 	}
1796 
1797 	/* check that clk_ops are sane.  See Documentation/clk.txt */
1798 	if (clk->ops->set_rate &&
1799 	    !((clk->ops->round_rate || clk->ops->determine_rate) &&
1800 	      clk->ops->recalc_rate)) {
1801 		pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
1802 				__func__, clk->name);
1803 		ret = -EINVAL;
1804 		goto out;
1805 	}
1806 
1807 	if (clk->ops->set_parent && !clk->ops->get_parent) {
1808 		pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1809 				__func__, clk->name);
1810 		ret = -EINVAL;
1811 		goto out;
1812 	}
1813 
1814 	if (clk->ops->set_rate_and_parent &&
1815 			!(clk->ops->set_parent && clk->ops->set_rate)) {
1816 		pr_warn("%s: %s must implement .set_parent & .set_rate\n",
1817 				__func__, clk->name);
1818 		ret = -EINVAL;
1819 		goto out;
1820 	}
1821 
1822 	/* throw a WARN if any entries in parent_names are NULL */
1823 	for (i = 0; i < clk->num_parents; i++)
1824 		WARN(!clk->parent_names[i],
1825 				"%s: invalid NULL in %s's .parent_names\n",
1826 				__func__, clk->name);
1827 
1828 	/*
1829 	 * Allocate an array of struct clk *'s to avoid unnecessary string
1830 	 * look-ups of clk's possible parents.  This can fail for clocks passed
1831 	 * in to clk_init during early boot; thus any access to clk->parents[]
1832 	 * must always check for a NULL pointer and try to populate it if
1833 	 * necessary.
1834 	 *
1835 	 * If clk->parents is not NULL we skip this entire block.  This allows
1836 	 * for clock drivers to statically initialize clk->parents.
1837 	 */
1838 	if (clk->num_parents > 1 && !clk->parents) {
1839 		clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
1840 					GFP_KERNEL);
1841 		/*
1842 		 * __clk_lookup returns NULL for parents that have not been
1843 		 * clk_init'd; thus any access to clk->parents[] must check
1844 		 * for a NULL pointer.  We can always perform lazy lookups for
1845 		 * missing parents later on.
1846 		 */
1847 		if (clk->parents)
1848 			for (i = 0; i < clk->num_parents; i++)
1849 				clk->parents[i] =
1850 					__clk_lookup(clk->parent_names[i]);
1851 	}
1852 
1853 	clk->parent = __clk_init_parent(clk);
1854 
1855 	/*
1856 	 * Populate clk->parent if parent has already been __clk_init'd.  If
1857 	 * parent has not yet been __clk_init'd then place clk in the orphan
1858 	 * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1859 	 * clk list.
1860 	 *
1861 	 * Every time a new clk is clk_init'd then we walk the list of orphan
1862 	 * clocks and re-parent any that are children of the clock currently
1863 	 * being clk_init'd.
1864 	 */
1865 	if (clk->parent)
1866 		hlist_add_head(&clk->child_node,
1867 				&clk->parent->children);
1868 	else if (clk->flags & CLK_IS_ROOT)
1869 		hlist_add_head(&clk->child_node, &clk_root_list);
1870 	else
1871 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1872 
1873 	/*
1874 	 * Set clk's accuracy.  The preferred method is to use
1875 	 * .recalc_accuracy. For simple clocks and lazy developers the default
1876 	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
1877 	 * parent (or is orphaned) then accuracy is set to zero (perfect
1878 	 * clock).
1879 	 */
1880 	if (clk->ops->recalc_accuracy)
1881 		clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1882 					__clk_get_accuracy(clk->parent));
1883 	else if (clk->parent)
1884 		clk->accuracy = clk->parent->accuracy;
1885 	else
1886 		clk->accuracy = 0;
1887 
1888 	/*
1889 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1890 	 * simple clocks and lazy developers the default fallback is to use the
1891 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1892 	 * then rate is set to zero.
1893 	 */
1894 	if (clk->ops->recalc_rate)
1895 		clk->rate = clk->ops->recalc_rate(clk->hw,
1896 				__clk_get_rate(clk->parent));
1897 	else if (clk->parent)
1898 		clk->rate = clk->parent->rate;
1899 	else
1900 		clk->rate = 0;
1901 
1902 	clk_debug_register(clk);
1903 	/*
1904 	 * walk the list of orphan clocks and reparent any that are children of
1905 	 * this clock
1906 	 */
1907 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
1908 		if (orphan->num_parents && orphan->ops->get_parent) {
1909 			i = orphan->ops->get_parent(orphan->hw);
1910 			if (!strcmp(clk->name, orphan->parent_names[i]))
1911 				__clk_reparent(orphan, clk);
1912 			continue;
1913 		}
1914 
1915 		for (i = 0; i < orphan->num_parents; i++)
1916 			if (!strcmp(clk->name, orphan->parent_names[i])) {
1917 				__clk_reparent(orphan, clk);
1918 				break;
1919 			}
1920 	 }
1921 
1922 	/*
1923 	 * optional platform-specific magic
1924 	 *
1925 	 * The .init callback is not used by any of the basic clock types, but
1926 	 * exists for weird hardware that must perform initialization magic.
1927 	 * Please consider other ways of solving initialization problems before
1928 	 * using this callback, as its use is discouraged.
1929 	 */
1930 	if (clk->ops->init)
1931 		clk->ops->init(clk->hw);
1932 
1933 	kref_init(&clk->ref);
1934 out:
1935 	clk_prepare_unlock();
1936 
1937 	return ret;
1938 }
1939 
1940 /**
1941  * __clk_register - register a clock and return a cookie.
1942  *
1943  * Same as clk_register, except that the .clk field inside hw shall point to a
1944  * preallocated (generally statically allocated) struct clk. None of the fields
1945  * of the struct clk need to be initialized.
1946  *
1947  * The data pointed to by .init and .clk field shall NOT be marked as init
1948  * data.
1949  *
1950  * __clk_register is only exposed via clk-private.h and is intended for use with
1951  * very large numbers of clocks that need to be statically initialized.  It is
1952  * a layering violation to include clk-private.h from any code which implements
1953  * a clock's .ops; as such any statically initialized clock data MUST be in a
1954  * separate C file from the logic that implements its operations.  Returns 0
1955  * on success, otherwise an error code.
1956  */
1957 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1958 {
1959 	int ret;
1960 	struct clk *clk;
1961 
1962 	clk = hw->clk;
1963 	clk->name = hw->init->name;
1964 	clk->ops = hw->init->ops;
1965 	clk->hw = hw;
1966 	clk->flags = hw->init->flags;
1967 	clk->parent_names = hw->init->parent_names;
1968 	clk->num_parents = hw->init->num_parents;
1969 	if (dev && dev->driver)
1970 		clk->owner = dev->driver->owner;
1971 	else
1972 		clk->owner = NULL;
1973 
1974 	ret = __clk_init(dev, clk);
1975 	if (ret)
1976 		return ERR_PTR(ret);
1977 
1978 	return clk;
1979 }
1980 EXPORT_SYMBOL_GPL(__clk_register);
1981 
1982 /**
1983  * clk_register - allocate a new clock, register it and return an opaque cookie
1984  * @dev: device that is registering this clock
1985  * @hw: link to hardware-specific clock data
1986  *
1987  * clk_register is the primary interface for populating the clock tree with new
1988  * clock nodes.  It returns a pointer to the newly allocated struct clk which
1989  * cannot be dereferenced by driver code but may be used in conjuction with the
1990  * rest of the clock API.  In the event of an error clk_register will return an
1991  * error code; drivers must test for an error code after calling clk_register.
1992  */
1993 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1994 {
1995 	int i, ret;
1996 	struct clk *clk;
1997 
1998 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1999 	if (!clk) {
2000 		pr_err("%s: could not allocate clk\n", __func__);
2001 		ret = -ENOMEM;
2002 		goto fail_out;
2003 	}
2004 
2005 	clk->name = kstrdup(hw->init->name, GFP_KERNEL);
2006 	if (!clk->name) {
2007 		pr_err("%s: could not allocate clk->name\n", __func__);
2008 		ret = -ENOMEM;
2009 		goto fail_name;
2010 	}
2011 	clk->ops = hw->init->ops;
2012 	if (dev && dev->driver)
2013 		clk->owner = dev->driver->owner;
2014 	clk->hw = hw;
2015 	clk->flags = hw->init->flags;
2016 	clk->num_parents = hw->init->num_parents;
2017 	hw->clk = clk;
2018 
2019 	/* allocate local copy in case parent_names is __initdata */
2020 	clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
2021 					GFP_KERNEL);
2022 
2023 	if (!clk->parent_names) {
2024 		pr_err("%s: could not allocate clk->parent_names\n", __func__);
2025 		ret = -ENOMEM;
2026 		goto fail_parent_names;
2027 	}
2028 
2029 
2030 	/* copy each string name in case parent_names is __initdata */
2031 	for (i = 0; i < clk->num_parents; i++) {
2032 		clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
2033 						GFP_KERNEL);
2034 		if (!clk->parent_names[i]) {
2035 			pr_err("%s: could not copy parent_names\n", __func__);
2036 			ret = -ENOMEM;
2037 			goto fail_parent_names_copy;
2038 		}
2039 	}
2040 
2041 	ret = __clk_init(dev, clk);
2042 	if (!ret)
2043 		return clk;
2044 
2045 fail_parent_names_copy:
2046 	while (--i >= 0)
2047 		kfree(clk->parent_names[i]);
2048 	kfree(clk->parent_names);
2049 fail_parent_names:
2050 	kfree(clk->name);
2051 fail_name:
2052 	kfree(clk);
2053 fail_out:
2054 	return ERR_PTR(ret);
2055 }
2056 EXPORT_SYMBOL_GPL(clk_register);
2057 
2058 /*
2059  * Free memory allocated for a clock.
2060  * Caller must hold prepare_lock.
2061  */
2062 static void __clk_release(struct kref *ref)
2063 {
2064 	struct clk *clk = container_of(ref, struct clk, ref);
2065 	int i = clk->num_parents;
2066 
2067 	kfree(clk->parents);
2068 	while (--i >= 0)
2069 		kfree(clk->parent_names[i]);
2070 
2071 	kfree(clk->parent_names);
2072 	kfree(clk->name);
2073 	kfree(clk);
2074 }
2075 
2076 /*
2077  * Empty clk_ops for unregistered clocks. These are used temporarily
2078  * after clk_unregister() was called on a clock and until last clock
2079  * consumer calls clk_put() and the struct clk object is freed.
2080  */
2081 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2082 {
2083 	return -ENXIO;
2084 }
2085 
2086 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2087 {
2088 	WARN_ON_ONCE(1);
2089 }
2090 
2091 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2092 					unsigned long parent_rate)
2093 {
2094 	return -ENXIO;
2095 }
2096 
2097 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2098 {
2099 	return -ENXIO;
2100 }
2101 
2102 static const struct clk_ops clk_nodrv_ops = {
2103 	.enable		= clk_nodrv_prepare_enable,
2104 	.disable	= clk_nodrv_disable_unprepare,
2105 	.prepare	= clk_nodrv_prepare_enable,
2106 	.unprepare	= clk_nodrv_disable_unprepare,
2107 	.set_rate	= clk_nodrv_set_rate,
2108 	.set_parent	= clk_nodrv_set_parent,
2109 };
2110 
2111 /**
2112  * clk_unregister - unregister a currently registered clock
2113  * @clk: clock to unregister
2114  */
2115 void clk_unregister(struct clk *clk)
2116 {
2117 	unsigned long flags;
2118 
2119        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2120                return;
2121 
2122 	clk_prepare_lock();
2123 
2124 	if (clk->ops == &clk_nodrv_ops) {
2125 		pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
2126 		goto out;
2127 	}
2128 	/*
2129 	 * Assign empty clock ops for consumers that might still hold
2130 	 * a reference to this clock.
2131 	 */
2132 	flags = clk_enable_lock();
2133 	clk->ops = &clk_nodrv_ops;
2134 	clk_enable_unlock(flags);
2135 
2136 	if (!hlist_empty(&clk->children)) {
2137 		struct clk *child;
2138 		struct hlist_node *t;
2139 
2140 		/* Reparent all children to the orphan list. */
2141 		hlist_for_each_entry_safe(child, t, &clk->children, child_node)
2142 			clk_set_parent(child, NULL);
2143 	}
2144 
2145 	clk_debug_unregister(clk);
2146 
2147 	hlist_del_init(&clk->child_node);
2148 
2149 	if (clk->prepare_count)
2150 		pr_warn("%s: unregistering prepared clock: %s\n",
2151 					__func__, clk->name);
2152 
2153 	kref_put(&clk->ref, __clk_release);
2154 out:
2155 	clk_prepare_unlock();
2156 }
2157 EXPORT_SYMBOL_GPL(clk_unregister);
2158 
2159 static void devm_clk_release(struct device *dev, void *res)
2160 {
2161 	clk_unregister(*(struct clk **)res);
2162 }
2163 
2164 /**
2165  * devm_clk_register - resource managed clk_register()
2166  * @dev: device that is registering this clock
2167  * @hw: link to hardware-specific clock data
2168  *
2169  * Managed clk_register(). Clocks returned from this function are
2170  * automatically clk_unregister()ed on driver detach. See clk_register() for
2171  * more information.
2172  */
2173 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2174 {
2175 	struct clk *clk;
2176 	struct clk **clkp;
2177 
2178 	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2179 	if (!clkp)
2180 		return ERR_PTR(-ENOMEM);
2181 
2182 	clk = clk_register(dev, hw);
2183 	if (!IS_ERR(clk)) {
2184 		*clkp = clk;
2185 		devres_add(dev, clkp);
2186 	} else {
2187 		devres_free(clkp);
2188 	}
2189 
2190 	return clk;
2191 }
2192 EXPORT_SYMBOL_GPL(devm_clk_register);
2193 
2194 static int devm_clk_match(struct device *dev, void *res, void *data)
2195 {
2196 	struct clk *c = res;
2197 	if (WARN_ON(!c))
2198 		return 0;
2199 	return c == data;
2200 }
2201 
2202 /**
2203  * devm_clk_unregister - resource managed clk_unregister()
2204  * @clk: clock to unregister
2205  *
2206  * Deallocate a clock allocated with devm_clk_register(). Normally
2207  * this function will not need to be called and the resource management
2208  * code will ensure that the resource is freed.
2209  */
2210 void devm_clk_unregister(struct device *dev, struct clk *clk)
2211 {
2212 	WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2213 }
2214 EXPORT_SYMBOL_GPL(devm_clk_unregister);
2215 
2216 /*
2217  * clkdev helpers
2218  */
2219 int __clk_get(struct clk *clk)
2220 {
2221 	if (clk) {
2222 		if (!try_module_get(clk->owner))
2223 			return 0;
2224 
2225 		kref_get(&clk->ref);
2226 	}
2227 	return 1;
2228 }
2229 
2230 void __clk_put(struct clk *clk)
2231 {
2232 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2233 		return;
2234 
2235 	clk_prepare_lock();
2236 	kref_put(&clk->ref, __clk_release);
2237 	clk_prepare_unlock();
2238 
2239 	module_put(clk->owner);
2240 }
2241 
2242 /***        clk rate change notifiers        ***/
2243 
2244 /**
2245  * clk_notifier_register - add a clk rate change notifier
2246  * @clk: struct clk * to watch
2247  * @nb: struct notifier_block * with callback info
2248  *
2249  * Request notification when clk's rate changes.  This uses an SRCU
2250  * notifier because we want it to block and notifier unregistrations are
2251  * uncommon.  The callbacks associated with the notifier must not
2252  * re-enter into the clk framework by calling any top-level clk APIs;
2253  * this will cause a nested prepare_lock mutex.
2254  *
2255  * In all notification cases cases (pre, post and abort rate change) the
2256  * original clock rate is passed to the callback via struct
2257  * clk_notifier_data.old_rate and the new frequency is passed via struct
2258  * clk_notifier_data.new_rate.
2259  *
2260  * clk_notifier_register() must be called from non-atomic context.
2261  * Returns -EINVAL if called with null arguments, -ENOMEM upon
2262  * allocation failure; otherwise, passes along the return value of
2263  * srcu_notifier_chain_register().
2264  */
2265 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2266 {
2267 	struct clk_notifier *cn;
2268 	int ret = -ENOMEM;
2269 
2270 	if (!clk || !nb)
2271 		return -EINVAL;
2272 
2273 	clk_prepare_lock();
2274 
2275 	/* search the list of notifiers for this clk */
2276 	list_for_each_entry(cn, &clk_notifier_list, node)
2277 		if (cn->clk == clk)
2278 			break;
2279 
2280 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
2281 	if (cn->clk != clk) {
2282 		cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2283 		if (!cn)
2284 			goto out;
2285 
2286 		cn->clk = clk;
2287 		srcu_init_notifier_head(&cn->notifier_head);
2288 
2289 		list_add(&cn->node, &clk_notifier_list);
2290 	}
2291 
2292 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2293 
2294 	clk->notifier_count++;
2295 
2296 out:
2297 	clk_prepare_unlock();
2298 
2299 	return ret;
2300 }
2301 EXPORT_SYMBOL_GPL(clk_notifier_register);
2302 
2303 /**
2304  * clk_notifier_unregister - remove a clk rate change notifier
2305  * @clk: struct clk *
2306  * @nb: struct notifier_block * with callback info
2307  *
2308  * Request no further notification for changes to 'clk' and frees memory
2309  * allocated in clk_notifier_register.
2310  *
2311  * Returns -EINVAL if called with null arguments; otherwise, passes
2312  * along the return value of srcu_notifier_chain_unregister().
2313  */
2314 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2315 {
2316 	struct clk_notifier *cn = NULL;
2317 	int ret = -EINVAL;
2318 
2319 	if (!clk || !nb)
2320 		return -EINVAL;
2321 
2322 	clk_prepare_lock();
2323 
2324 	list_for_each_entry(cn, &clk_notifier_list, node)
2325 		if (cn->clk == clk)
2326 			break;
2327 
2328 	if (cn->clk == clk) {
2329 		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2330 
2331 		clk->notifier_count--;
2332 
2333 		/* XXX the notifier code should handle this better */
2334 		if (!cn->notifier_head.head) {
2335 			srcu_cleanup_notifier_head(&cn->notifier_head);
2336 			list_del(&cn->node);
2337 			kfree(cn);
2338 		}
2339 
2340 	} else {
2341 		ret = -ENOENT;
2342 	}
2343 
2344 	clk_prepare_unlock();
2345 
2346 	return ret;
2347 }
2348 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
2349 
2350 #ifdef CONFIG_OF
2351 /**
2352  * struct of_clk_provider - Clock provider registration structure
2353  * @link: Entry in global list of clock providers
2354  * @node: Pointer to device tree node of clock provider
2355  * @get: Get clock callback.  Returns NULL or a struct clk for the
2356  *       given clock specifier
2357  * @data: context pointer to be passed into @get callback
2358  */
2359 struct of_clk_provider {
2360 	struct list_head link;
2361 
2362 	struct device_node *node;
2363 	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2364 	void *data;
2365 };
2366 
2367 static const struct of_device_id __clk_of_table_sentinel
2368 	__used __section(__clk_of_table_end);
2369 
2370 static LIST_HEAD(of_clk_providers);
2371 static DEFINE_MUTEX(of_clk_mutex);
2372 
2373 /* of_clk_provider list locking helpers */
2374 void of_clk_lock(void)
2375 {
2376 	mutex_lock(&of_clk_mutex);
2377 }
2378 
2379 void of_clk_unlock(void)
2380 {
2381 	mutex_unlock(&of_clk_mutex);
2382 }
2383 
2384 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2385 				     void *data)
2386 {
2387 	return data;
2388 }
2389 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2390 
2391 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2392 {
2393 	struct clk_onecell_data *clk_data = data;
2394 	unsigned int idx = clkspec->args[0];
2395 
2396 	if (idx >= clk_data->clk_num) {
2397 		pr_err("%s: invalid clock index %d\n", __func__, idx);
2398 		return ERR_PTR(-EINVAL);
2399 	}
2400 
2401 	return clk_data->clks[idx];
2402 }
2403 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2404 
2405 /**
2406  * of_clk_add_provider() - Register a clock provider for a node
2407  * @np: Device node pointer associated with clock provider
2408  * @clk_src_get: callback for decoding clock
2409  * @data: context pointer for @clk_src_get callback.
2410  */
2411 int of_clk_add_provider(struct device_node *np,
2412 			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2413 						   void *data),
2414 			void *data)
2415 {
2416 	struct of_clk_provider *cp;
2417 
2418 	cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2419 	if (!cp)
2420 		return -ENOMEM;
2421 
2422 	cp->node = of_node_get(np);
2423 	cp->data = data;
2424 	cp->get = clk_src_get;
2425 
2426 	mutex_lock(&of_clk_mutex);
2427 	list_add(&cp->link, &of_clk_providers);
2428 	mutex_unlock(&of_clk_mutex);
2429 	pr_debug("Added clock from %s\n", np->full_name);
2430 
2431 	return 0;
2432 }
2433 EXPORT_SYMBOL_GPL(of_clk_add_provider);
2434 
2435 /**
2436  * of_clk_del_provider() - Remove a previously registered clock provider
2437  * @np: Device node pointer associated with clock provider
2438  */
2439 void of_clk_del_provider(struct device_node *np)
2440 {
2441 	struct of_clk_provider *cp;
2442 
2443 	mutex_lock(&of_clk_mutex);
2444 	list_for_each_entry(cp, &of_clk_providers, link) {
2445 		if (cp->node == np) {
2446 			list_del(&cp->link);
2447 			of_node_put(cp->node);
2448 			kfree(cp);
2449 			break;
2450 		}
2451 	}
2452 	mutex_unlock(&of_clk_mutex);
2453 }
2454 EXPORT_SYMBOL_GPL(of_clk_del_provider);
2455 
2456 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
2457 {
2458 	struct of_clk_provider *provider;
2459 	struct clk *clk = ERR_PTR(-EPROBE_DEFER);
2460 
2461 	/* Check if we have such a provider in our array */
2462 	list_for_each_entry(provider, &of_clk_providers, link) {
2463 		if (provider->node == clkspec->np)
2464 			clk = provider->get(clkspec, provider->data);
2465 		if (!IS_ERR(clk))
2466 			break;
2467 	}
2468 
2469 	return clk;
2470 }
2471 
2472 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2473 {
2474 	struct clk *clk;
2475 
2476 	mutex_lock(&of_clk_mutex);
2477 	clk = __of_clk_get_from_provider(clkspec);
2478 	mutex_unlock(&of_clk_mutex);
2479 
2480 	return clk;
2481 }
2482 
2483 int of_clk_get_parent_count(struct device_node *np)
2484 {
2485 	return of_count_phandle_with_args(np, "clocks", "#clock-cells");
2486 }
2487 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
2488 
2489 const char *of_clk_get_parent_name(struct device_node *np, int index)
2490 {
2491 	struct of_phandle_args clkspec;
2492 	struct property *prop;
2493 	const char *clk_name;
2494 	const __be32 *vp;
2495 	u32 pv;
2496 	int rc;
2497 	int count;
2498 
2499 	if (index < 0)
2500 		return NULL;
2501 
2502 	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
2503 					&clkspec);
2504 	if (rc)
2505 		return NULL;
2506 
2507 	index = clkspec.args_count ? clkspec.args[0] : 0;
2508 	count = 0;
2509 
2510 	/* if there is an indices property, use it to transfer the index
2511 	 * specified into an array offset for the clock-output-names property.
2512 	 */
2513 	of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
2514 		if (index == pv) {
2515 			index = count;
2516 			break;
2517 		}
2518 		count++;
2519 	}
2520 
2521 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
2522 					  index,
2523 					  &clk_name) < 0)
2524 		clk_name = clkspec.np->name;
2525 
2526 	of_node_put(clkspec.np);
2527 	return clk_name;
2528 }
2529 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2530 
2531 struct clock_provider {
2532 	of_clk_init_cb_t clk_init_cb;
2533 	struct device_node *np;
2534 	struct list_head node;
2535 };
2536 
2537 static LIST_HEAD(clk_provider_list);
2538 
2539 /*
2540  * This function looks for a parent clock. If there is one, then it
2541  * checks that the provider for this parent clock was initialized, in
2542  * this case the parent clock will be ready.
2543  */
2544 static int parent_ready(struct device_node *np)
2545 {
2546 	int i = 0;
2547 
2548 	while (true) {
2549 		struct clk *clk = of_clk_get(np, i);
2550 
2551 		/* this parent is ready we can check the next one */
2552 		if (!IS_ERR(clk)) {
2553 			clk_put(clk);
2554 			i++;
2555 			continue;
2556 		}
2557 
2558 		/* at least one parent is not ready, we exit now */
2559 		if (PTR_ERR(clk) == -EPROBE_DEFER)
2560 			return 0;
2561 
2562 		/*
2563 		 * Here we make assumption that the device tree is
2564 		 * written correctly. So an error means that there is
2565 		 * no more parent. As we didn't exit yet, then the
2566 		 * previous parent are ready. If there is no clock
2567 		 * parent, no need to wait for them, then we can
2568 		 * consider their absence as being ready
2569 		 */
2570 		return 1;
2571 	}
2572 }
2573 
2574 /**
2575  * of_clk_init() - Scan and init clock providers from the DT
2576  * @matches: array of compatible values and init functions for providers.
2577  *
2578  * This function scans the device tree for matching clock providers
2579  * and calls their initialization functions. It also does it by trying
2580  * to follow the dependencies.
2581  */
2582 void __init of_clk_init(const struct of_device_id *matches)
2583 {
2584 	const struct of_device_id *match;
2585 	struct device_node *np;
2586 	struct clock_provider *clk_provider, *next;
2587 	bool is_init_done;
2588 	bool force = false;
2589 
2590 	if (!matches)
2591 		matches = &__clk_of_table;
2592 
2593 	/* First prepare the list of the clocks providers */
2594 	for_each_matching_node_and_match(np, matches, &match) {
2595 		struct clock_provider *parent =
2596 			kzalloc(sizeof(struct clock_provider),	GFP_KERNEL);
2597 
2598 		parent->clk_init_cb = match->data;
2599 		parent->np = np;
2600 		list_add_tail(&parent->node, &clk_provider_list);
2601 	}
2602 
2603 	while (!list_empty(&clk_provider_list)) {
2604 		is_init_done = false;
2605 		list_for_each_entry_safe(clk_provider, next,
2606 					&clk_provider_list, node) {
2607 			if (force || parent_ready(clk_provider->np)) {
2608 				clk_provider->clk_init_cb(clk_provider->np);
2609 				list_del(&clk_provider->node);
2610 				kfree(clk_provider);
2611 				is_init_done = true;
2612 			}
2613 		}
2614 
2615 		/*
2616 		 * We didn't manage to initialize any of the
2617 		 * remaining providers during the last loop, so now we
2618 		 * initialize all the remaining ones unconditionally
2619 		 * in case the clock parent was not mandatory
2620 		 */
2621 		if (!is_init_done)
2622 			force = true;
2623 
2624 	}
2625 }
2626 #endif
2627