xref: /openbmc/linux/drivers/clk/clk.c (revision afb46f79)
1 /*
2  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * Standard functionality for the common clock API.  See Documentation/clk.txt
10  */
11 
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
22 #include <linux/sched.h>
23 
24 #include "clk.h"
25 
26 static DEFINE_SPINLOCK(enable_lock);
27 static DEFINE_MUTEX(prepare_lock);
28 
29 static struct task_struct *prepare_owner;
30 static struct task_struct *enable_owner;
31 
32 static int prepare_refcnt;
33 static int enable_refcnt;
34 
35 static HLIST_HEAD(clk_root_list);
36 static HLIST_HEAD(clk_orphan_list);
37 static LIST_HEAD(clk_notifier_list);
38 
39 /***           locking             ***/
40 static void clk_prepare_lock(void)
41 {
42 	if (!mutex_trylock(&prepare_lock)) {
43 		if (prepare_owner == current) {
44 			prepare_refcnt++;
45 			return;
46 		}
47 		mutex_lock(&prepare_lock);
48 	}
49 	WARN_ON_ONCE(prepare_owner != NULL);
50 	WARN_ON_ONCE(prepare_refcnt != 0);
51 	prepare_owner = current;
52 	prepare_refcnt = 1;
53 }
54 
55 static void clk_prepare_unlock(void)
56 {
57 	WARN_ON_ONCE(prepare_owner != current);
58 	WARN_ON_ONCE(prepare_refcnt == 0);
59 
60 	if (--prepare_refcnt)
61 		return;
62 	prepare_owner = NULL;
63 	mutex_unlock(&prepare_lock);
64 }
65 
66 static unsigned long clk_enable_lock(void)
67 {
68 	unsigned long flags;
69 
70 	if (!spin_trylock_irqsave(&enable_lock, flags)) {
71 		if (enable_owner == current) {
72 			enable_refcnt++;
73 			return flags;
74 		}
75 		spin_lock_irqsave(&enable_lock, flags);
76 	}
77 	WARN_ON_ONCE(enable_owner != NULL);
78 	WARN_ON_ONCE(enable_refcnt != 0);
79 	enable_owner = current;
80 	enable_refcnt = 1;
81 	return flags;
82 }
83 
84 static void clk_enable_unlock(unsigned long flags)
85 {
86 	WARN_ON_ONCE(enable_owner != current);
87 	WARN_ON_ONCE(enable_refcnt == 0);
88 
89 	if (--enable_refcnt)
90 		return;
91 	enable_owner = NULL;
92 	spin_unlock_irqrestore(&enable_lock, flags);
93 }
94 
95 /***        debugfs support        ***/
96 
97 #ifdef CONFIG_DEBUG_FS
98 #include <linux/debugfs.h>
99 
100 static struct dentry *rootdir;
101 static struct dentry *orphandir;
102 static int inited = 0;
103 
104 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
105 {
106 	if (!c)
107 		return;
108 
109 	seq_printf(s, "%*s%-*s %-11d %-12d %-10lu %-11lu",
110 		   level * 3 + 1, "",
111 		   30 - level * 3, c->name,
112 		   c->enable_count, c->prepare_count, clk_get_rate(c),
113 		   clk_get_accuracy(c));
114 	seq_printf(s, "\n");
115 }
116 
117 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
118 				     int level)
119 {
120 	struct clk *child;
121 
122 	if (!c)
123 		return;
124 
125 	clk_summary_show_one(s, c, level);
126 
127 	hlist_for_each_entry(child, &c->children, child_node)
128 		clk_summary_show_subtree(s, child, level + 1);
129 }
130 
131 static int clk_summary_show(struct seq_file *s, void *data)
132 {
133 	struct clk *c;
134 
135 	seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate        accuracy\n");
136 	seq_printf(s, "---------------------------------------------------------------------------------\n");
137 
138 	clk_prepare_lock();
139 
140 	hlist_for_each_entry(c, &clk_root_list, child_node)
141 		clk_summary_show_subtree(s, c, 0);
142 
143 	hlist_for_each_entry(c, &clk_orphan_list, child_node)
144 		clk_summary_show_subtree(s, c, 0);
145 
146 	clk_prepare_unlock();
147 
148 	return 0;
149 }
150 
151 
152 static int clk_summary_open(struct inode *inode, struct file *file)
153 {
154 	return single_open(file, clk_summary_show, inode->i_private);
155 }
156 
157 static const struct file_operations clk_summary_fops = {
158 	.open		= clk_summary_open,
159 	.read		= seq_read,
160 	.llseek		= seq_lseek,
161 	.release	= single_release,
162 };
163 
164 static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
165 {
166 	if (!c)
167 		return;
168 
169 	seq_printf(s, "\"%s\": { ", c->name);
170 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
171 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
172 	seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
173 	seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
174 }
175 
176 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
177 {
178 	struct clk *child;
179 
180 	if (!c)
181 		return;
182 
183 	clk_dump_one(s, c, level);
184 
185 	hlist_for_each_entry(child, &c->children, child_node) {
186 		seq_printf(s, ",");
187 		clk_dump_subtree(s, child, level + 1);
188 	}
189 
190 	seq_printf(s, "}");
191 }
192 
193 static int clk_dump(struct seq_file *s, void *data)
194 {
195 	struct clk *c;
196 	bool first_node = true;
197 
198 	seq_printf(s, "{");
199 
200 	clk_prepare_lock();
201 
202 	hlist_for_each_entry(c, &clk_root_list, child_node) {
203 		if (!first_node)
204 			seq_printf(s, ",");
205 		first_node = false;
206 		clk_dump_subtree(s, c, 0);
207 	}
208 
209 	hlist_for_each_entry(c, &clk_orphan_list, child_node) {
210 		seq_printf(s, ",");
211 		clk_dump_subtree(s, c, 0);
212 	}
213 
214 	clk_prepare_unlock();
215 
216 	seq_printf(s, "}");
217 	return 0;
218 }
219 
220 
221 static int clk_dump_open(struct inode *inode, struct file *file)
222 {
223 	return single_open(file, clk_dump, inode->i_private);
224 }
225 
226 static const struct file_operations clk_dump_fops = {
227 	.open		= clk_dump_open,
228 	.read		= seq_read,
229 	.llseek		= seq_lseek,
230 	.release	= single_release,
231 };
232 
233 /* caller must hold prepare_lock */
234 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
235 {
236 	struct dentry *d;
237 	int ret = -ENOMEM;
238 
239 	if (!clk || !pdentry) {
240 		ret = -EINVAL;
241 		goto out;
242 	}
243 
244 	d = debugfs_create_dir(clk->name, pdentry);
245 	if (!d)
246 		goto out;
247 
248 	clk->dentry = d;
249 
250 	d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
251 			(u32 *)&clk->rate);
252 	if (!d)
253 		goto err_out;
254 
255 	d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry,
256 			(u32 *)&clk->accuracy);
257 	if (!d)
258 		goto err_out;
259 
260 	d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
261 			(u32 *)&clk->flags);
262 	if (!d)
263 		goto err_out;
264 
265 	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
266 			(u32 *)&clk->prepare_count);
267 	if (!d)
268 		goto err_out;
269 
270 	d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
271 			(u32 *)&clk->enable_count);
272 	if (!d)
273 		goto err_out;
274 
275 	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
276 			(u32 *)&clk->notifier_count);
277 	if (!d)
278 		goto err_out;
279 
280 	if (clk->ops->debug_init)
281 		if (clk->ops->debug_init(clk->hw, clk->dentry))
282 			goto err_out;
283 
284 	ret = 0;
285 	goto out;
286 
287 err_out:
288 	debugfs_remove_recursive(clk->dentry);
289 	clk->dentry = NULL;
290 out:
291 	return ret;
292 }
293 
294 /* caller must hold prepare_lock */
295 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
296 {
297 	struct clk *child;
298 	int ret = -EINVAL;;
299 
300 	if (!clk || !pdentry)
301 		goto out;
302 
303 	ret = clk_debug_create_one(clk, pdentry);
304 
305 	if (ret)
306 		goto out;
307 
308 	hlist_for_each_entry(child, &clk->children, child_node)
309 		clk_debug_create_subtree(child, clk->dentry);
310 
311 	ret = 0;
312 out:
313 	return ret;
314 }
315 
316 /**
317  * clk_debug_register - add a clk node to the debugfs clk tree
318  * @clk: the clk being added to the debugfs clk tree
319  *
320  * Dynamically adds a clk to the debugfs clk tree if debugfs has been
321  * initialized.  Otherwise it bails out early since the debugfs clk tree
322  * will be created lazily by clk_debug_init as part of a late_initcall.
323  *
324  * Caller must hold prepare_lock.  Only clk_init calls this function (so
325  * far) so this is taken care.
326  */
327 static int clk_debug_register(struct clk *clk)
328 {
329 	struct clk *parent;
330 	struct dentry *pdentry;
331 	int ret = 0;
332 
333 	if (!inited)
334 		goto out;
335 
336 	parent = clk->parent;
337 
338 	/*
339 	 * Check to see if a clk is a root clk.  Also check that it is
340 	 * safe to add this clk to debugfs
341 	 */
342 	if (!parent)
343 		if (clk->flags & CLK_IS_ROOT)
344 			pdentry = rootdir;
345 		else
346 			pdentry = orphandir;
347 	else
348 		if (parent->dentry)
349 			pdentry = parent->dentry;
350 		else
351 			goto out;
352 
353 	ret = clk_debug_create_subtree(clk, pdentry);
354 
355 out:
356 	return ret;
357 }
358 
359  /**
360  * clk_debug_unregister - remove a clk node from the debugfs clk tree
361  * @clk: the clk being removed from the debugfs clk tree
362  *
363  * Dynamically removes a clk and all it's children clk nodes from the
364  * debugfs clk tree if clk->dentry points to debugfs created by
365  * clk_debug_register in __clk_init.
366  *
367  * Caller must hold prepare_lock.
368  */
369 static void clk_debug_unregister(struct clk *clk)
370 {
371 	debugfs_remove_recursive(clk->dentry);
372 }
373 
374 /**
375  * clk_debug_reparent - reparent clk node in the debugfs clk tree
376  * @clk: the clk being reparented
377  * @new_parent: the new clk parent, may be NULL
378  *
379  * Rename clk entry in the debugfs clk tree if debugfs has been
380  * initialized.  Otherwise it bails out early since the debugfs clk tree
381  * will be created lazily by clk_debug_init as part of a late_initcall.
382  *
383  * Caller must hold prepare_lock.
384  */
385 static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
386 {
387 	struct dentry *d;
388 	struct dentry *new_parent_d;
389 
390 	if (!inited)
391 		return;
392 
393 	if (new_parent)
394 		new_parent_d = new_parent->dentry;
395 	else
396 		new_parent_d = orphandir;
397 
398 	d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
399 			new_parent_d, clk->name);
400 	if (d)
401 		clk->dentry = d;
402 	else
403 		pr_debug("%s: failed to rename debugfs entry for %s\n",
404 				__func__, clk->name);
405 }
406 
407 /**
408  * clk_debug_init - lazily create the debugfs clk tree visualization
409  *
410  * clks are often initialized very early during boot before memory can
411  * be dynamically allocated and well before debugfs is setup.
412  * clk_debug_init walks the clk tree hierarchy while holding
413  * prepare_lock and creates the topology as part of a late_initcall,
414  * thus insuring that clks initialized very early will still be
415  * represented in the debugfs clk tree.  This function should only be
416  * called once at boot-time, and all other clks added dynamically will
417  * be done so with clk_debug_register.
418  */
419 static int __init clk_debug_init(void)
420 {
421 	struct clk *clk;
422 	struct dentry *d;
423 
424 	rootdir = debugfs_create_dir("clk", NULL);
425 
426 	if (!rootdir)
427 		return -ENOMEM;
428 
429 	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
430 				&clk_summary_fops);
431 	if (!d)
432 		return -ENOMEM;
433 
434 	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
435 				&clk_dump_fops);
436 	if (!d)
437 		return -ENOMEM;
438 
439 	orphandir = debugfs_create_dir("orphans", rootdir);
440 
441 	if (!orphandir)
442 		return -ENOMEM;
443 
444 	clk_prepare_lock();
445 
446 	hlist_for_each_entry(clk, &clk_root_list, child_node)
447 		clk_debug_create_subtree(clk, rootdir);
448 
449 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
450 		clk_debug_create_subtree(clk, orphandir);
451 
452 	inited = 1;
453 
454 	clk_prepare_unlock();
455 
456 	return 0;
457 }
458 late_initcall(clk_debug_init);
459 #else
460 static inline int clk_debug_register(struct clk *clk) { return 0; }
461 static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
462 {
463 }
464 static inline void clk_debug_unregister(struct clk *clk)
465 {
466 }
467 #endif
468 
469 /* caller must hold prepare_lock */
470 static void clk_unprepare_unused_subtree(struct clk *clk)
471 {
472 	struct clk *child;
473 
474 	if (!clk)
475 		return;
476 
477 	hlist_for_each_entry(child, &clk->children, child_node)
478 		clk_unprepare_unused_subtree(child);
479 
480 	if (clk->prepare_count)
481 		return;
482 
483 	if (clk->flags & CLK_IGNORE_UNUSED)
484 		return;
485 
486 	if (__clk_is_prepared(clk)) {
487 		if (clk->ops->unprepare_unused)
488 			clk->ops->unprepare_unused(clk->hw);
489 		else if (clk->ops->unprepare)
490 			clk->ops->unprepare(clk->hw);
491 	}
492 }
493 
494 /* caller must hold prepare_lock */
495 static void clk_disable_unused_subtree(struct clk *clk)
496 {
497 	struct clk *child;
498 	unsigned long flags;
499 
500 	if (!clk)
501 		goto out;
502 
503 	hlist_for_each_entry(child, &clk->children, child_node)
504 		clk_disable_unused_subtree(child);
505 
506 	flags = clk_enable_lock();
507 
508 	if (clk->enable_count)
509 		goto unlock_out;
510 
511 	if (clk->flags & CLK_IGNORE_UNUSED)
512 		goto unlock_out;
513 
514 	/*
515 	 * some gate clocks have special needs during the disable-unused
516 	 * sequence.  call .disable_unused if available, otherwise fall
517 	 * back to .disable
518 	 */
519 	if (__clk_is_enabled(clk)) {
520 		if (clk->ops->disable_unused)
521 			clk->ops->disable_unused(clk->hw);
522 		else if (clk->ops->disable)
523 			clk->ops->disable(clk->hw);
524 	}
525 
526 unlock_out:
527 	clk_enable_unlock(flags);
528 
529 out:
530 	return;
531 }
532 
533 static bool clk_ignore_unused;
534 static int __init clk_ignore_unused_setup(char *__unused)
535 {
536 	clk_ignore_unused = true;
537 	return 1;
538 }
539 __setup("clk_ignore_unused", clk_ignore_unused_setup);
540 
541 static int clk_disable_unused(void)
542 {
543 	struct clk *clk;
544 
545 	if (clk_ignore_unused) {
546 		pr_warn("clk: Not disabling unused clocks\n");
547 		return 0;
548 	}
549 
550 	clk_prepare_lock();
551 
552 	hlist_for_each_entry(clk, &clk_root_list, child_node)
553 		clk_disable_unused_subtree(clk);
554 
555 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
556 		clk_disable_unused_subtree(clk);
557 
558 	hlist_for_each_entry(clk, &clk_root_list, child_node)
559 		clk_unprepare_unused_subtree(clk);
560 
561 	hlist_for_each_entry(clk, &clk_orphan_list, child_node)
562 		clk_unprepare_unused_subtree(clk);
563 
564 	clk_prepare_unlock();
565 
566 	return 0;
567 }
568 late_initcall_sync(clk_disable_unused);
569 
570 /***    helper functions   ***/
571 
572 const char *__clk_get_name(struct clk *clk)
573 {
574 	return !clk ? NULL : clk->name;
575 }
576 EXPORT_SYMBOL_GPL(__clk_get_name);
577 
578 struct clk_hw *__clk_get_hw(struct clk *clk)
579 {
580 	return !clk ? NULL : clk->hw;
581 }
582 EXPORT_SYMBOL_GPL(__clk_get_hw);
583 
584 u8 __clk_get_num_parents(struct clk *clk)
585 {
586 	return !clk ? 0 : clk->num_parents;
587 }
588 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
589 
590 struct clk *__clk_get_parent(struct clk *clk)
591 {
592 	return !clk ? NULL : clk->parent;
593 }
594 EXPORT_SYMBOL_GPL(__clk_get_parent);
595 
596 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
597 {
598 	if (!clk || index >= clk->num_parents)
599 		return NULL;
600 	else if (!clk->parents)
601 		return __clk_lookup(clk->parent_names[index]);
602 	else if (!clk->parents[index])
603 		return clk->parents[index] =
604 			__clk_lookup(clk->parent_names[index]);
605 	else
606 		return clk->parents[index];
607 }
608 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
609 
610 unsigned int __clk_get_enable_count(struct clk *clk)
611 {
612 	return !clk ? 0 : clk->enable_count;
613 }
614 
615 unsigned int __clk_get_prepare_count(struct clk *clk)
616 {
617 	return !clk ? 0 : clk->prepare_count;
618 }
619 
620 unsigned long __clk_get_rate(struct clk *clk)
621 {
622 	unsigned long ret;
623 
624 	if (!clk) {
625 		ret = 0;
626 		goto out;
627 	}
628 
629 	ret = clk->rate;
630 
631 	if (clk->flags & CLK_IS_ROOT)
632 		goto out;
633 
634 	if (!clk->parent)
635 		ret = 0;
636 
637 out:
638 	return ret;
639 }
640 EXPORT_SYMBOL_GPL(__clk_get_rate);
641 
642 unsigned long __clk_get_accuracy(struct clk *clk)
643 {
644 	if (!clk)
645 		return 0;
646 
647 	return clk->accuracy;
648 }
649 
650 unsigned long __clk_get_flags(struct clk *clk)
651 {
652 	return !clk ? 0 : clk->flags;
653 }
654 EXPORT_SYMBOL_GPL(__clk_get_flags);
655 
656 bool __clk_is_prepared(struct clk *clk)
657 {
658 	int ret;
659 
660 	if (!clk)
661 		return false;
662 
663 	/*
664 	 * .is_prepared is optional for clocks that can prepare
665 	 * fall back to software usage counter if it is missing
666 	 */
667 	if (!clk->ops->is_prepared) {
668 		ret = clk->prepare_count ? 1 : 0;
669 		goto out;
670 	}
671 
672 	ret = clk->ops->is_prepared(clk->hw);
673 out:
674 	return !!ret;
675 }
676 
677 bool __clk_is_enabled(struct clk *clk)
678 {
679 	int ret;
680 
681 	if (!clk)
682 		return false;
683 
684 	/*
685 	 * .is_enabled is only mandatory for clocks that gate
686 	 * fall back to software usage counter if .is_enabled is missing
687 	 */
688 	if (!clk->ops->is_enabled) {
689 		ret = clk->enable_count ? 1 : 0;
690 		goto out;
691 	}
692 
693 	ret = clk->ops->is_enabled(clk->hw);
694 out:
695 	return !!ret;
696 }
697 EXPORT_SYMBOL_GPL(__clk_is_enabled);
698 
699 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
700 {
701 	struct clk *child;
702 	struct clk *ret;
703 
704 	if (!strcmp(clk->name, name))
705 		return clk;
706 
707 	hlist_for_each_entry(child, &clk->children, child_node) {
708 		ret = __clk_lookup_subtree(name, child);
709 		if (ret)
710 			return ret;
711 	}
712 
713 	return NULL;
714 }
715 
716 struct clk *__clk_lookup(const char *name)
717 {
718 	struct clk *root_clk;
719 	struct clk *ret;
720 
721 	if (!name)
722 		return NULL;
723 
724 	/* search the 'proper' clk tree first */
725 	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
726 		ret = __clk_lookup_subtree(name, root_clk);
727 		if (ret)
728 			return ret;
729 	}
730 
731 	/* if not found, then search the orphan tree */
732 	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
733 		ret = __clk_lookup_subtree(name, root_clk);
734 		if (ret)
735 			return ret;
736 	}
737 
738 	return NULL;
739 }
740 
741 /*
742  * Helper for finding best parent to provide a given frequency. This can be used
743  * directly as a determine_rate callback (e.g. for a mux), or from a more
744  * complex clock that may combine a mux with other operations.
745  */
746 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
747 			      unsigned long *best_parent_rate,
748 			      struct clk **best_parent_p)
749 {
750 	struct clk *clk = hw->clk, *parent, *best_parent = NULL;
751 	int i, num_parents;
752 	unsigned long parent_rate, best = 0;
753 
754 	/* if NO_REPARENT flag set, pass through to current parent */
755 	if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
756 		parent = clk->parent;
757 		if (clk->flags & CLK_SET_RATE_PARENT)
758 			best = __clk_round_rate(parent, rate);
759 		else if (parent)
760 			best = __clk_get_rate(parent);
761 		else
762 			best = __clk_get_rate(clk);
763 		goto out;
764 	}
765 
766 	/* find the parent that can provide the fastest rate <= rate */
767 	num_parents = clk->num_parents;
768 	for (i = 0; i < num_parents; i++) {
769 		parent = clk_get_parent_by_index(clk, i);
770 		if (!parent)
771 			continue;
772 		if (clk->flags & CLK_SET_RATE_PARENT)
773 			parent_rate = __clk_round_rate(parent, rate);
774 		else
775 			parent_rate = __clk_get_rate(parent);
776 		if (parent_rate <= rate && parent_rate > best) {
777 			best_parent = parent;
778 			best = parent_rate;
779 		}
780 	}
781 
782 out:
783 	if (best_parent)
784 		*best_parent_p = best_parent;
785 	*best_parent_rate = best;
786 
787 	return best;
788 }
789 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
790 
791 /***        clk api        ***/
792 
793 void __clk_unprepare(struct clk *clk)
794 {
795 	if (!clk)
796 		return;
797 
798 	if (WARN_ON(clk->prepare_count == 0))
799 		return;
800 
801 	if (--clk->prepare_count > 0)
802 		return;
803 
804 	WARN_ON(clk->enable_count > 0);
805 
806 	if (clk->ops->unprepare)
807 		clk->ops->unprepare(clk->hw);
808 
809 	__clk_unprepare(clk->parent);
810 }
811 
812 /**
813  * clk_unprepare - undo preparation of a clock source
814  * @clk: the clk being unprepared
815  *
816  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
817  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
818  * if the operation may sleep.  One example is a clk which is accessed over
819  * I2c.  In the complex case a clk gate operation may require a fast and a slow
820  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
821  * exclusive.  In fact clk_disable must be called before clk_unprepare.
822  */
823 void clk_unprepare(struct clk *clk)
824 {
825 	clk_prepare_lock();
826 	__clk_unprepare(clk);
827 	clk_prepare_unlock();
828 }
829 EXPORT_SYMBOL_GPL(clk_unprepare);
830 
831 int __clk_prepare(struct clk *clk)
832 {
833 	int ret = 0;
834 
835 	if (!clk)
836 		return 0;
837 
838 	if (clk->prepare_count == 0) {
839 		ret = __clk_prepare(clk->parent);
840 		if (ret)
841 			return ret;
842 
843 		if (clk->ops->prepare) {
844 			ret = clk->ops->prepare(clk->hw);
845 			if (ret) {
846 				__clk_unprepare(clk->parent);
847 				return ret;
848 			}
849 		}
850 	}
851 
852 	clk->prepare_count++;
853 
854 	return 0;
855 }
856 
857 /**
858  * clk_prepare - prepare a clock source
859  * @clk: the clk being prepared
860  *
861  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
862  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
863  * operation may sleep.  One example is a clk which is accessed over I2c.  In
864  * the complex case a clk ungate operation may require a fast and a slow part.
865  * It is this reason that clk_prepare and clk_enable are not mutually
866  * exclusive.  In fact clk_prepare must be called before clk_enable.
867  * Returns 0 on success, -EERROR otherwise.
868  */
869 int clk_prepare(struct clk *clk)
870 {
871 	int ret;
872 
873 	clk_prepare_lock();
874 	ret = __clk_prepare(clk);
875 	clk_prepare_unlock();
876 
877 	return ret;
878 }
879 EXPORT_SYMBOL_GPL(clk_prepare);
880 
881 static void __clk_disable(struct clk *clk)
882 {
883 	if (!clk)
884 		return;
885 
886 	if (WARN_ON(IS_ERR(clk)))
887 		return;
888 
889 	if (WARN_ON(clk->enable_count == 0))
890 		return;
891 
892 	if (--clk->enable_count > 0)
893 		return;
894 
895 	if (clk->ops->disable)
896 		clk->ops->disable(clk->hw);
897 
898 	__clk_disable(clk->parent);
899 }
900 
901 /**
902  * clk_disable - gate a clock
903  * @clk: the clk being gated
904  *
905  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
906  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
907  * clk if the operation is fast and will never sleep.  One example is a
908  * SoC-internal clk which is controlled via simple register writes.  In the
909  * complex case a clk gate operation may require a fast and a slow part.  It is
910  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
911  * In fact clk_disable must be called before clk_unprepare.
912  */
913 void clk_disable(struct clk *clk)
914 {
915 	unsigned long flags;
916 
917 	flags = clk_enable_lock();
918 	__clk_disable(clk);
919 	clk_enable_unlock(flags);
920 }
921 EXPORT_SYMBOL_GPL(clk_disable);
922 
923 static int __clk_enable(struct clk *clk)
924 {
925 	int ret = 0;
926 
927 	if (!clk)
928 		return 0;
929 
930 	if (WARN_ON(clk->prepare_count == 0))
931 		return -ESHUTDOWN;
932 
933 	if (clk->enable_count == 0) {
934 		ret = __clk_enable(clk->parent);
935 
936 		if (ret)
937 			return ret;
938 
939 		if (clk->ops->enable) {
940 			ret = clk->ops->enable(clk->hw);
941 			if (ret) {
942 				__clk_disable(clk->parent);
943 				return ret;
944 			}
945 		}
946 	}
947 
948 	clk->enable_count++;
949 	return 0;
950 }
951 
952 /**
953  * clk_enable - ungate a clock
954  * @clk: the clk being ungated
955  *
956  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
957  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
958  * if the operation will never sleep.  One example is a SoC-internal clk which
959  * is controlled via simple register writes.  In the complex case a clk ungate
960  * operation may require a fast and a slow part.  It is this reason that
961  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
962  * must be called before clk_enable.  Returns 0 on success, -EERROR
963  * otherwise.
964  */
965 int clk_enable(struct clk *clk)
966 {
967 	unsigned long flags;
968 	int ret;
969 
970 	flags = clk_enable_lock();
971 	ret = __clk_enable(clk);
972 	clk_enable_unlock(flags);
973 
974 	return ret;
975 }
976 EXPORT_SYMBOL_GPL(clk_enable);
977 
978 /**
979  * __clk_round_rate - round the given rate for a clk
980  * @clk: round the rate of this clock
981  * @rate: the rate which is to be rounded
982  *
983  * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
984  */
985 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
986 {
987 	unsigned long parent_rate = 0;
988 	struct clk *parent;
989 
990 	if (!clk)
991 		return 0;
992 
993 	parent = clk->parent;
994 	if (parent)
995 		parent_rate = parent->rate;
996 
997 	if (clk->ops->determine_rate)
998 		return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
999 						&parent);
1000 	else if (clk->ops->round_rate)
1001 		return clk->ops->round_rate(clk->hw, rate, &parent_rate);
1002 	else if (clk->flags & CLK_SET_RATE_PARENT)
1003 		return __clk_round_rate(clk->parent, rate);
1004 	else
1005 		return clk->rate;
1006 }
1007 
1008 /**
1009  * clk_round_rate - round the given rate for a clk
1010  * @clk: the clk for which we are rounding a rate
1011  * @rate: the rate which is to be rounded
1012  *
1013  * Takes in a rate as input and rounds it to a rate that the clk can actually
1014  * use which is then returned.  If clk doesn't support round_rate operation
1015  * then the parent rate is returned.
1016  */
1017 long clk_round_rate(struct clk *clk, unsigned long rate)
1018 {
1019 	unsigned long ret;
1020 
1021 	clk_prepare_lock();
1022 	ret = __clk_round_rate(clk, rate);
1023 	clk_prepare_unlock();
1024 
1025 	return ret;
1026 }
1027 EXPORT_SYMBOL_GPL(clk_round_rate);
1028 
1029 /**
1030  * __clk_notify - call clk notifier chain
1031  * @clk: struct clk * that is changing rate
1032  * @msg: clk notifier type (see include/linux/clk.h)
1033  * @old_rate: old clk rate
1034  * @new_rate: new clk rate
1035  *
1036  * Triggers a notifier call chain on the clk rate-change notification
1037  * for 'clk'.  Passes a pointer to the struct clk and the previous
1038  * and current rates to the notifier callback.  Intended to be called by
1039  * internal clock code only.  Returns NOTIFY_DONE from the last driver
1040  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1041  * a driver returns that.
1042  */
1043 static int __clk_notify(struct clk *clk, unsigned long msg,
1044 		unsigned long old_rate, unsigned long new_rate)
1045 {
1046 	struct clk_notifier *cn;
1047 	struct clk_notifier_data cnd;
1048 	int ret = NOTIFY_DONE;
1049 
1050 	cnd.clk = clk;
1051 	cnd.old_rate = old_rate;
1052 	cnd.new_rate = new_rate;
1053 
1054 	list_for_each_entry(cn, &clk_notifier_list, node) {
1055 		if (cn->clk == clk) {
1056 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1057 					&cnd);
1058 			break;
1059 		}
1060 	}
1061 
1062 	return ret;
1063 }
1064 
1065 /**
1066  * __clk_recalc_accuracies
1067  * @clk: first clk in the subtree
1068  *
1069  * Walks the subtree of clks starting with clk and recalculates accuracies as
1070  * it goes.  Note that if a clk does not implement the .recalc_accuracy
1071  * callback then it is assumed that the clock will take on the accuracy of it's
1072  * parent.
1073  *
1074  * Caller must hold prepare_lock.
1075  */
1076 static void __clk_recalc_accuracies(struct clk *clk)
1077 {
1078 	unsigned long parent_accuracy = 0;
1079 	struct clk *child;
1080 
1081 	if (clk->parent)
1082 		parent_accuracy = clk->parent->accuracy;
1083 
1084 	if (clk->ops->recalc_accuracy)
1085 		clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1086 							  parent_accuracy);
1087 	else
1088 		clk->accuracy = parent_accuracy;
1089 
1090 	hlist_for_each_entry(child, &clk->children, child_node)
1091 		__clk_recalc_accuracies(child);
1092 }
1093 
1094 /**
1095  * clk_get_accuracy - return the accuracy of clk
1096  * @clk: the clk whose accuracy is being returned
1097  *
1098  * Simply returns the cached accuracy of the clk, unless
1099  * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1100  * issued.
1101  * If clk is NULL then returns 0.
1102  */
1103 long clk_get_accuracy(struct clk *clk)
1104 {
1105 	unsigned long accuracy;
1106 
1107 	clk_prepare_lock();
1108 	if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
1109 		__clk_recalc_accuracies(clk);
1110 
1111 	accuracy = __clk_get_accuracy(clk);
1112 	clk_prepare_unlock();
1113 
1114 	return accuracy;
1115 }
1116 EXPORT_SYMBOL_GPL(clk_get_accuracy);
1117 
1118 /**
1119  * __clk_recalc_rates
1120  * @clk: first clk in the subtree
1121  * @msg: notification type (see include/linux/clk.h)
1122  *
1123  * Walks the subtree of clks starting with clk and recalculates rates as it
1124  * goes.  Note that if a clk does not implement the .recalc_rate callback then
1125  * it is assumed that the clock will take on the rate of its parent.
1126  *
1127  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1128  * if necessary.
1129  *
1130  * Caller must hold prepare_lock.
1131  */
1132 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
1133 {
1134 	unsigned long old_rate;
1135 	unsigned long parent_rate = 0;
1136 	struct clk *child;
1137 
1138 	old_rate = clk->rate;
1139 
1140 	if (clk->parent)
1141 		parent_rate = clk->parent->rate;
1142 
1143 	if (clk->ops->recalc_rate)
1144 		clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
1145 	else
1146 		clk->rate = parent_rate;
1147 
1148 	/*
1149 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1150 	 * & ABORT_RATE_CHANGE notifiers
1151 	 */
1152 	if (clk->notifier_count && msg)
1153 		__clk_notify(clk, msg, old_rate, clk->rate);
1154 
1155 	hlist_for_each_entry(child, &clk->children, child_node)
1156 		__clk_recalc_rates(child, msg);
1157 }
1158 
1159 /**
1160  * clk_get_rate - return the rate of clk
1161  * @clk: the clk whose rate is being returned
1162  *
1163  * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1164  * is set, which means a recalc_rate will be issued.
1165  * If clk is NULL then returns 0.
1166  */
1167 unsigned long clk_get_rate(struct clk *clk)
1168 {
1169 	unsigned long rate;
1170 
1171 	clk_prepare_lock();
1172 
1173 	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
1174 		__clk_recalc_rates(clk, 0);
1175 
1176 	rate = __clk_get_rate(clk);
1177 	clk_prepare_unlock();
1178 
1179 	return rate;
1180 }
1181 EXPORT_SYMBOL_GPL(clk_get_rate);
1182 
1183 static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
1184 {
1185 	int i;
1186 
1187 	if (!clk->parents) {
1188 		clk->parents = kcalloc(clk->num_parents,
1189 					sizeof(struct clk *), GFP_KERNEL);
1190 		if (!clk->parents)
1191 			return -ENOMEM;
1192 	}
1193 
1194 	/*
1195 	 * find index of new parent clock using cached parent ptrs,
1196 	 * or if not yet cached, use string name comparison and cache
1197 	 * them now to avoid future calls to __clk_lookup.
1198 	 */
1199 	for (i = 0; i < clk->num_parents; i++) {
1200 		if (clk->parents[i] == parent)
1201 			return i;
1202 
1203 		if (clk->parents[i])
1204 			continue;
1205 
1206 		if (!strcmp(clk->parent_names[i], parent->name)) {
1207 			clk->parents[i] = __clk_lookup(parent->name);
1208 			return i;
1209 		}
1210 	}
1211 
1212 	return -EINVAL;
1213 }
1214 
1215 static void clk_reparent(struct clk *clk, struct clk *new_parent)
1216 {
1217 	hlist_del(&clk->child_node);
1218 
1219 	if (new_parent) {
1220 		/* avoid duplicate POST_RATE_CHANGE notifications */
1221 		if (new_parent->new_child == clk)
1222 			new_parent->new_child = NULL;
1223 
1224 		hlist_add_head(&clk->child_node, &new_parent->children);
1225 	} else {
1226 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1227 	}
1228 
1229 	clk->parent = new_parent;
1230 }
1231 
1232 static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
1233 {
1234 	unsigned long flags;
1235 	struct clk *old_parent = clk->parent;
1236 
1237 	/*
1238 	 * Migrate prepare state between parents and prevent race with
1239 	 * clk_enable().
1240 	 *
1241 	 * If the clock is not prepared, then a race with
1242 	 * clk_enable/disable() is impossible since we already have the
1243 	 * prepare lock (future calls to clk_enable() need to be preceded by
1244 	 * a clk_prepare()).
1245 	 *
1246 	 * If the clock is prepared, migrate the prepared state to the new
1247 	 * parent and also protect against a race with clk_enable() by
1248 	 * forcing the clock and the new parent on.  This ensures that all
1249 	 * future calls to clk_enable() are practically NOPs with respect to
1250 	 * hardware and software states.
1251 	 *
1252 	 * See also: Comment for clk_set_parent() below.
1253 	 */
1254 	if (clk->prepare_count) {
1255 		__clk_prepare(parent);
1256 		clk_enable(parent);
1257 		clk_enable(clk);
1258 	}
1259 
1260 	/* update the clk tree topology */
1261 	flags = clk_enable_lock();
1262 	clk_reparent(clk, parent);
1263 	clk_enable_unlock(flags);
1264 
1265 	return old_parent;
1266 }
1267 
1268 static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
1269 		struct clk *old_parent)
1270 {
1271 	/*
1272 	 * Finish the migration of prepare state and undo the changes done
1273 	 * for preventing a race with clk_enable().
1274 	 */
1275 	if (clk->prepare_count) {
1276 		clk_disable(clk);
1277 		clk_disable(old_parent);
1278 		__clk_unprepare(old_parent);
1279 	}
1280 
1281 	/* update debugfs with new clk tree topology */
1282 	clk_debug_reparent(clk, parent);
1283 }
1284 
1285 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
1286 {
1287 	unsigned long flags;
1288 	int ret = 0;
1289 	struct clk *old_parent;
1290 
1291 	old_parent = __clk_set_parent_before(clk, parent);
1292 
1293 	/* change clock input source */
1294 	if (parent && clk->ops->set_parent)
1295 		ret = clk->ops->set_parent(clk->hw, p_index);
1296 
1297 	if (ret) {
1298 		flags = clk_enable_lock();
1299 		clk_reparent(clk, old_parent);
1300 		clk_enable_unlock(flags);
1301 
1302 		if (clk->prepare_count) {
1303 			clk_disable(clk);
1304 			clk_disable(parent);
1305 			__clk_unprepare(parent);
1306 		}
1307 		return ret;
1308 	}
1309 
1310 	__clk_set_parent_after(clk, parent, old_parent);
1311 
1312 	return 0;
1313 }
1314 
1315 /**
1316  * __clk_speculate_rates
1317  * @clk: first clk in the subtree
1318  * @parent_rate: the "future" rate of clk's parent
1319  *
1320  * Walks the subtree of clks starting with clk, speculating rates as it
1321  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1322  *
1323  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1324  * pre-rate change notifications and returns early if no clks in the
1325  * subtree have subscribed to the notifications.  Note that if a clk does not
1326  * implement the .recalc_rate callback then it is assumed that the clock will
1327  * take on the rate of its parent.
1328  *
1329  * Caller must hold prepare_lock.
1330  */
1331 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
1332 {
1333 	struct clk *child;
1334 	unsigned long new_rate;
1335 	int ret = NOTIFY_DONE;
1336 
1337 	if (clk->ops->recalc_rate)
1338 		new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
1339 	else
1340 		new_rate = parent_rate;
1341 
1342 	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1343 	if (clk->notifier_count)
1344 		ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
1345 
1346 	if (ret & NOTIFY_STOP_MASK) {
1347 		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1348 				__func__, clk->name, ret);
1349 		goto out;
1350 	}
1351 
1352 	hlist_for_each_entry(child, &clk->children, child_node) {
1353 		ret = __clk_speculate_rates(child, new_rate);
1354 		if (ret & NOTIFY_STOP_MASK)
1355 			break;
1356 	}
1357 
1358 out:
1359 	return ret;
1360 }
1361 
1362 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
1363 			     struct clk *new_parent, u8 p_index)
1364 {
1365 	struct clk *child;
1366 
1367 	clk->new_rate = new_rate;
1368 	clk->new_parent = new_parent;
1369 	clk->new_parent_index = p_index;
1370 	/* include clk in new parent's PRE_RATE_CHANGE notifications */
1371 	clk->new_child = NULL;
1372 	if (new_parent && new_parent != clk->parent)
1373 		new_parent->new_child = clk;
1374 
1375 	hlist_for_each_entry(child, &clk->children, child_node) {
1376 		if (child->ops->recalc_rate)
1377 			child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
1378 		else
1379 			child->new_rate = new_rate;
1380 		clk_calc_subtree(child, child->new_rate, NULL, 0);
1381 	}
1382 }
1383 
1384 /*
1385  * calculate the new rates returning the topmost clock that has to be
1386  * changed.
1387  */
1388 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
1389 {
1390 	struct clk *top = clk;
1391 	struct clk *old_parent, *parent;
1392 	unsigned long best_parent_rate = 0;
1393 	unsigned long new_rate;
1394 	int p_index = 0;
1395 
1396 	/* sanity */
1397 	if (IS_ERR_OR_NULL(clk))
1398 		return NULL;
1399 
1400 	/* save parent rate, if it exists */
1401 	parent = old_parent = clk->parent;
1402 	if (parent)
1403 		best_parent_rate = parent->rate;
1404 
1405 	/* find the closest rate and parent clk/rate */
1406 	if (clk->ops->determine_rate) {
1407 		new_rate = clk->ops->determine_rate(clk->hw, rate,
1408 						    &best_parent_rate,
1409 						    &parent);
1410 	} else if (clk->ops->round_rate) {
1411 		new_rate = clk->ops->round_rate(clk->hw, rate,
1412 						&best_parent_rate);
1413 	} else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
1414 		/* pass-through clock without adjustable parent */
1415 		clk->new_rate = clk->rate;
1416 		return NULL;
1417 	} else {
1418 		/* pass-through clock with adjustable parent */
1419 		top = clk_calc_new_rates(parent, rate);
1420 		new_rate = parent->new_rate;
1421 		goto out;
1422 	}
1423 
1424 	/* some clocks must be gated to change parent */
1425 	if (parent != old_parent &&
1426 	    (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1427 		pr_debug("%s: %s not gated but wants to reparent\n",
1428 			 __func__, clk->name);
1429 		return NULL;
1430 	}
1431 
1432 	/* try finding the new parent index */
1433 	if (parent) {
1434 		p_index = clk_fetch_parent_index(clk, parent);
1435 		if (p_index < 0) {
1436 			pr_debug("%s: clk %s can not be parent of clk %s\n",
1437 				 __func__, parent->name, clk->name);
1438 			return NULL;
1439 		}
1440 	}
1441 
1442 	if ((clk->flags & CLK_SET_RATE_PARENT) && parent &&
1443 	    best_parent_rate != parent->rate)
1444 		top = clk_calc_new_rates(parent, best_parent_rate);
1445 
1446 out:
1447 	clk_calc_subtree(clk, new_rate, parent, p_index);
1448 
1449 	return top;
1450 }
1451 
1452 /*
1453  * Notify about rate changes in a subtree. Always walk down the whole tree
1454  * so that in case of an error we can walk down the whole tree again and
1455  * abort the change.
1456  */
1457 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
1458 {
1459 	struct clk *child, *tmp_clk, *fail_clk = NULL;
1460 	int ret = NOTIFY_DONE;
1461 
1462 	if (clk->rate == clk->new_rate)
1463 		return NULL;
1464 
1465 	if (clk->notifier_count) {
1466 		ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
1467 		if (ret & NOTIFY_STOP_MASK)
1468 			fail_clk = clk;
1469 	}
1470 
1471 	hlist_for_each_entry(child, &clk->children, child_node) {
1472 		/* Skip children who will be reparented to another clock */
1473 		if (child->new_parent && child->new_parent != clk)
1474 			continue;
1475 		tmp_clk = clk_propagate_rate_change(child, event);
1476 		if (tmp_clk)
1477 			fail_clk = tmp_clk;
1478 	}
1479 
1480 	/* handle the new child who might not be in clk->children yet */
1481 	if (clk->new_child) {
1482 		tmp_clk = clk_propagate_rate_change(clk->new_child, event);
1483 		if (tmp_clk)
1484 			fail_clk = tmp_clk;
1485 	}
1486 
1487 	return fail_clk;
1488 }
1489 
1490 /*
1491  * walk down a subtree and set the new rates notifying the rate
1492  * change on the way
1493  */
1494 static void clk_change_rate(struct clk *clk)
1495 {
1496 	struct clk *child;
1497 	unsigned long old_rate;
1498 	unsigned long best_parent_rate = 0;
1499 	bool skip_set_rate = false;
1500 	struct clk *old_parent;
1501 
1502 	old_rate = clk->rate;
1503 
1504 	if (clk->new_parent)
1505 		best_parent_rate = clk->new_parent->rate;
1506 	else if (clk->parent)
1507 		best_parent_rate = clk->parent->rate;
1508 
1509 	if (clk->new_parent && clk->new_parent != clk->parent) {
1510 		old_parent = __clk_set_parent_before(clk, clk->new_parent);
1511 
1512 		if (clk->ops->set_rate_and_parent) {
1513 			skip_set_rate = true;
1514 			clk->ops->set_rate_and_parent(clk->hw, clk->new_rate,
1515 					best_parent_rate,
1516 					clk->new_parent_index);
1517 		} else if (clk->ops->set_parent) {
1518 			clk->ops->set_parent(clk->hw, clk->new_parent_index);
1519 		}
1520 
1521 		__clk_set_parent_after(clk, clk->new_parent, old_parent);
1522 	}
1523 
1524 	if (!skip_set_rate && clk->ops->set_rate)
1525 		clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
1526 
1527 	if (clk->ops->recalc_rate)
1528 		clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
1529 	else
1530 		clk->rate = best_parent_rate;
1531 
1532 	if (clk->notifier_count && old_rate != clk->rate)
1533 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1534 
1535 	hlist_for_each_entry(child, &clk->children, child_node) {
1536 		/* Skip children who will be reparented to another clock */
1537 		if (child->new_parent && child->new_parent != clk)
1538 			continue;
1539 		clk_change_rate(child);
1540 	}
1541 
1542 	/* handle the new child who might not be in clk->children yet */
1543 	if (clk->new_child)
1544 		clk_change_rate(clk->new_child);
1545 }
1546 
1547 /**
1548  * clk_set_rate - specify a new rate for clk
1549  * @clk: the clk whose rate is being changed
1550  * @rate: the new rate for clk
1551  *
1552  * In the simplest case clk_set_rate will only adjust the rate of clk.
1553  *
1554  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1555  * propagate up to clk's parent; whether or not this happens depends on the
1556  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
1557  * after calling .round_rate then upstream parent propagation is ignored.  If
1558  * *parent_rate comes back with a new rate for clk's parent then we propagate
1559  * up to clk's parent and set its rate.  Upward propagation will continue
1560  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1561  * .round_rate stops requesting changes to clk's parent_rate.
1562  *
1563  * Rate changes are accomplished via tree traversal that also recalculates the
1564  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1565  *
1566  * Returns 0 on success, -EERROR otherwise.
1567  */
1568 int clk_set_rate(struct clk *clk, unsigned long rate)
1569 {
1570 	struct clk *top, *fail_clk;
1571 	int ret = 0;
1572 
1573 	if (!clk)
1574 		return 0;
1575 
1576 	/* prevent racing with updates to the clock topology */
1577 	clk_prepare_lock();
1578 
1579 	/* bail early if nothing to do */
1580 	if (rate == clk_get_rate(clk))
1581 		goto out;
1582 
1583 	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
1584 		ret = -EBUSY;
1585 		goto out;
1586 	}
1587 
1588 	/* calculate new rates and get the topmost changed clock */
1589 	top = clk_calc_new_rates(clk, rate);
1590 	if (!top) {
1591 		ret = -EINVAL;
1592 		goto out;
1593 	}
1594 
1595 	/* notify that we are about to change rates */
1596 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1597 	if (fail_clk) {
1598 		pr_debug("%s: failed to set %s rate\n", __func__,
1599 				fail_clk->name);
1600 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1601 		ret = -EBUSY;
1602 		goto out;
1603 	}
1604 
1605 	/* change the rates */
1606 	clk_change_rate(top);
1607 
1608 out:
1609 	clk_prepare_unlock();
1610 
1611 	return ret;
1612 }
1613 EXPORT_SYMBOL_GPL(clk_set_rate);
1614 
1615 /**
1616  * clk_get_parent - return the parent of a clk
1617  * @clk: the clk whose parent gets returned
1618  *
1619  * Simply returns clk->parent.  Returns NULL if clk is NULL.
1620  */
1621 struct clk *clk_get_parent(struct clk *clk)
1622 {
1623 	struct clk *parent;
1624 
1625 	clk_prepare_lock();
1626 	parent = __clk_get_parent(clk);
1627 	clk_prepare_unlock();
1628 
1629 	return parent;
1630 }
1631 EXPORT_SYMBOL_GPL(clk_get_parent);
1632 
1633 /*
1634  * .get_parent is mandatory for clocks with multiple possible parents.  It is
1635  * optional for single-parent clocks.  Always call .get_parent if it is
1636  * available and WARN if it is missing for multi-parent clocks.
1637  *
1638  * For single-parent clocks without .get_parent, first check to see if the
1639  * .parents array exists, and if so use it to avoid an expensive tree
1640  * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
1641  */
1642 static struct clk *__clk_init_parent(struct clk *clk)
1643 {
1644 	struct clk *ret = NULL;
1645 	u8 index;
1646 
1647 	/* handle the trivial cases */
1648 
1649 	if (!clk->num_parents)
1650 		goto out;
1651 
1652 	if (clk->num_parents == 1) {
1653 		if (IS_ERR_OR_NULL(clk->parent))
1654 			ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1655 		ret = clk->parent;
1656 		goto out;
1657 	}
1658 
1659 	if (!clk->ops->get_parent) {
1660 		WARN(!clk->ops->get_parent,
1661 			"%s: multi-parent clocks must implement .get_parent\n",
1662 			__func__);
1663 		goto out;
1664 	};
1665 
1666 	/*
1667 	 * Do our best to cache parent clocks in clk->parents.  This prevents
1668 	 * unnecessary and expensive calls to __clk_lookup.  We don't set
1669 	 * clk->parent here; that is done by the calling function
1670 	 */
1671 
1672 	index = clk->ops->get_parent(clk->hw);
1673 
1674 	if (!clk->parents)
1675 		clk->parents =
1676 			kcalloc(clk->num_parents, sizeof(struct clk *),
1677 					GFP_KERNEL);
1678 
1679 	ret = clk_get_parent_by_index(clk, index);
1680 
1681 out:
1682 	return ret;
1683 }
1684 
1685 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1686 {
1687 	clk_reparent(clk, new_parent);
1688 	clk_debug_reparent(clk, new_parent);
1689 	__clk_recalc_accuracies(clk);
1690 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
1691 }
1692 
1693 /**
1694  * clk_set_parent - switch the parent of a mux clk
1695  * @clk: the mux clk whose input we are switching
1696  * @parent: the new input to clk
1697  *
1698  * Re-parent clk to use parent as its new input source.  If clk is in
1699  * prepared state, the clk will get enabled for the duration of this call. If
1700  * that's not acceptable for a specific clk (Eg: the consumer can't handle
1701  * that, the reparenting is glitchy in hardware, etc), use the
1702  * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1703  *
1704  * After successfully changing clk's parent clk_set_parent will update the
1705  * clk topology, sysfs topology and propagate rate recalculation via
1706  * __clk_recalc_rates.
1707  *
1708  * Returns 0 on success, -EERROR otherwise.
1709  */
1710 int clk_set_parent(struct clk *clk, struct clk *parent)
1711 {
1712 	int ret = 0;
1713 	int p_index = 0;
1714 	unsigned long p_rate = 0;
1715 
1716 	if (!clk)
1717 		return 0;
1718 
1719 	if (!clk->ops)
1720 		return -EINVAL;
1721 
1722 	/* verify ops for for multi-parent clks */
1723 	if ((clk->num_parents > 1) && (!clk->ops->set_parent))
1724 		return -ENOSYS;
1725 
1726 	/* prevent racing with updates to the clock topology */
1727 	clk_prepare_lock();
1728 
1729 	if (clk->parent == parent)
1730 		goto out;
1731 
1732 	/* check that we are allowed to re-parent if the clock is in use */
1733 	if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) {
1734 		ret = -EBUSY;
1735 		goto out;
1736 	}
1737 
1738 	/* try finding the new parent index */
1739 	if (parent) {
1740 		p_index = clk_fetch_parent_index(clk, parent);
1741 		p_rate = parent->rate;
1742 		if (p_index < 0) {
1743 			pr_debug("%s: clk %s can not be parent of clk %s\n",
1744 					__func__, parent->name, clk->name);
1745 			ret = p_index;
1746 			goto out;
1747 		}
1748 	}
1749 
1750 	/* propagate PRE_RATE_CHANGE notifications */
1751 	ret = __clk_speculate_rates(clk, p_rate);
1752 
1753 	/* abort if a driver objects */
1754 	if (ret & NOTIFY_STOP_MASK)
1755 		goto out;
1756 
1757 	/* do the re-parent */
1758 	ret = __clk_set_parent(clk, parent, p_index);
1759 
1760 	/* propagate rate an accuracy recalculation accordingly */
1761 	if (ret) {
1762 		__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1763 	} else {
1764 		__clk_recalc_rates(clk, POST_RATE_CHANGE);
1765 		__clk_recalc_accuracies(clk);
1766 	}
1767 
1768 out:
1769 	clk_prepare_unlock();
1770 
1771 	return ret;
1772 }
1773 EXPORT_SYMBOL_GPL(clk_set_parent);
1774 
1775 /**
1776  * __clk_init - initialize the data structures in a struct clk
1777  * @dev:	device initializing this clk, placeholder for now
1778  * @clk:	clk being initialized
1779  *
1780  * Initializes the lists in struct clk, queries the hardware for the
1781  * parent and rate and sets them both.
1782  */
1783 int __clk_init(struct device *dev, struct clk *clk)
1784 {
1785 	int i, ret = 0;
1786 	struct clk *orphan;
1787 	struct hlist_node *tmp2;
1788 
1789 	if (!clk)
1790 		return -EINVAL;
1791 
1792 	clk_prepare_lock();
1793 
1794 	/* check to see if a clock with this name is already registered */
1795 	if (__clk_lookup(clk->name)) {
1796 		pr_debug("%s: clk %s already initialized\n",
1797 				__func__, clk->name);
1798 		ret = -EEXIST;
1799 		goto out;
1800 	}
1801 
1802 	/* check that clk_ops are sane.  See Documentation/clk.txt */
1803 	if (clk->ops->set_rate &&
1804 	    !((clk->ops->round_rate || clk->ops->determine_rate) &&
1805 	      clk->ops->recalc_rate)) {
1806 		pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
1807 				__func__, clk->name);
1808 		ret = -EINVAL;
1809 		goto out;
1810 	}
1811 
1812 	if (clk->ops->set_parent && !clk->ops->get_parent) {
1813 		pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1814 				__func__, clk->name);
1815 		ret = -EINVAL;
1816 		goto out;
1817 	}
1818 
1819 	if (clk->ops->set_rate_and_parent &&
1820 			!(clk->ops->set_parent && clk->ops->set_rate)) {
1821 		pr_warn("%s: %s must implement .set_parent & .set_rate\n",
1822 				__func__, clk->name);
1823 		ret = -EINVAL;
1824 		goto out;
1825 	}
1826 
1827 	/* throw a WARN if any entries in parent_names are NULL */
1828 	for (i = 0; i < clk->num_parents; i++)
1829 		WARN(!clk->parent_names[i],
1830 				"%s: invalid NULL in %s's .parent_names\n",
1831 				__func__, clk->name);
1832 
1833 	/*
1834 	 * Allocate an array of struct clk *'s to avoid unnecessary string
1835 	 * look-ups of clk's possible parents.  This can fail for clocks passed
1836 	 * in to clk_init during early boot; thus any access to clk->parents[]
1837 	 * must always check for a NULL pointer and try to populate it if
1838 	 * necessary.
1839 	 *
1840 	 * If clk->parents is not NULL we skip this entire block.  This allows
1841 	 * for clock drivers to statically initialize clk->parents.
1842 	 */
1843 	if (clk->num_parents > 1 && !clk->parents) {
1844 		clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
1845 					GFP_KERNEL);
1846 		/*
1847 		 * __clk_lookup returns NULL for parents that have not been
1848 		 * clk_init'd; thus any access to clk->parents[] must check
1849 		 * for a NULL pointer.  We can always perform lazy lookups for
1850 		 * missing parents later on.
1851 		 */
1852 		if (clk->parents)
1853 			for (i = 0; i < clk->num_parents; i++)
1854 				clk->parents[i] =
1855 					__clk_lookup(clk->parent_names[i]);
1856 	}
1857 
1858 	clk->parent = __clk_init_parent(clk);
1859 
1860 	/*
1861 	 * Populate clk->parent if parent has already been __clk_init'd.  If
1862 	 * parent has not yet been __clk_init'd then place clk in the orphan
1863 	 * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1864 	 * clk list.
1865 	 *
1866 	 * Every time a new clk is clk_init'd then we walk the list of orphan
1867 	 * clocks and re-parent any that are children of the clock currently
1868 	 * being clk_init'd.
1869 	 */
1870 	if (clk->parent)
1871 		hlist_add_head(&clk->child_node,
1872 				&clk->parent->children);
1873 	else if (clk->flags & CLK_IS_ROOT)
1874 		hlist_add_head(&clk->child_node, &clk_root_list);
1875 	else
1876 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1877 
1878 	/*
1879 	 * Set clk's accuracy.  The preferred method is to use
1880 	 * .recalc_accuracy. For simple clocks and lazy developers the default
1881 	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
1882 	 * parent (or is orphaned) then accuracy is set to zero (perfect
1883 	 * clock).
1884 	 */
1885 	if (clk->ops->recalc_accuracy)
1886 		clk->accuracy = clk->ops->recalc_accuracy(clk->hw,
1887 					__clk_get_accuracy(clk->parent));
1888 	else if (clk->parent)
1889 		clk->accuracy = clk->parent->accuracy;
1890 	else
1891 		clk->accuracy = 0;
1892 
1893 	/*
1894 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1895 	 * simple clocks and lazy developers the default fallback is to use the
1896 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1897 	 * then rate is set to zero.
1898 	 */
1899 	if (clk->ops->recalc_rate)
1900 		clk->rate = clk->ops->recalc_rate(clk->hw,
1901 				__clk_get_rate(clk->parent));
1902 	else if (clk->parent)
1903 		clk->rate = clk->parent->rate;
1904 	else
1905 		clk->rate = 0;
1906 
1907 	clk_debug_register(clk);
1908 	/*
1909 	 * walk the list of orphan clocks and reparent any that are children of
1910 	 * this clock
1911 	 */
1912 	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
1913 		if (orphan->num_parents && orphan->ops->get_parent) {
1914 			i = orphan->ops->get_parent(orphan->hw);
1915 			if (!strcmp(clk->name, orphan->parent_names[i]))
1916 				__clk_reparent(orphan, clk);
1917 			continue;
1918 		}
1919 
1920 		for (i = 0; i < orphan->num_parents; i++)
1921 			if (!strcmp(clk->name, orphan->parent_names[i])) {
1922 				__clk_reparent(orphan, clk);
1923 				break;
1924 			}
1925 	 }
1926 
1927 	/*
1928 	 * optional platform-specific magic
1929 	 *
1930 	 * The .init callback is not used by any of the basic clock types, but
1931 	 * exists for weird hardware that must perform initialization magic.
1932 	 * Please consider other ways of solving initialization problems before
1933 	 * using this callback, as its use is discouraged.
1934 	 */
1935 	if (clk->ops->init)
1936 		clk->ops->init(clk->hw);
1937 
1938 	kref_init(&clk->ref);
1939 out:
1940 	clk_prepare_unlock();
1941 
1942 	return ret;
1943 }
1944 
1945 /**
1946  * __clk_register - register a clock and return a cookie.
1947  *
1948  * Same as clk_register, except that the .clk field inside hw shall point to a
1949  * preallocated (generally statically allocated) struct clk. None of the fields
1950  * of the struct clk need to be initialized.
1951  *
1952  * The data pointed to by .init and .clk field shall NOT be marked as init
1953  * data.
1954  *
1955  * __clk_register is only exposed via clk-private.h and is intended for use with
1956  * very large numbers of clocks that need to be statically initialized.  It is
1957  * a layering violation to include clk-private.h from any code which implements
1958  * a clock's .ops; as such any statically initialized clock data MUST be in a
1959  * separate C file from the logic that implements its operations.  Returns 0
1960  * on success, otherwise an error code.
1961  */
1962 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1963 {
1964 	int ret;
1965 	struct clk *clk;
1966 
1967 	clk = hw->clk;
1968 	clk->name = hw->init->name;
1969 	clk->ops = hw->init->ops;
1970 	clk->hw = hw;
1971 	clk->flags = hw->init->flags;
1972 	clk->parent_names = hw->init->parent_names;
1973 	clk->num_parents = hw->init->num_parents;
1974 	if (dev && dev->driver)
1975 		clk->owner = dev->driver->owner;
1976 	else
1977 		clk->owner = NULL;
1978 
1979 	ret = __clk_init(dev, clk);
1980 	if (ret)
1981 		return ERR_PTR(ret);
1982 
1983 	return clk;
1984 }
1985 EXPORT_SYMBOL_GPL(__clk_register);
1986 
1987 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1988 {
1989 	int i, ret;
1990 
1991 	clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1992 	if (!clk->name) {
1993 		pr_err("%s: could not allocate clk->name\n", __func__);
1994 		ret = -ENOMEM;
1995 		goto fail_name;
1996 	}
1997 	clk->ops = hw->init->ops;
1998 	if (dev && dev->driver)
1999 		clk->owner = dev->driver->owner;
2000 	clk->hw = hw;
2001 	clk->flags = hw->init->flags;
2002 	clk->num_parents = hw->init->num_parents;
2003 	hw->clk = clk;
2004 
2005 	/* allocate local copy in case parent_names is __initdata */
2006 	clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
2007 					GFP_KERNEL);
2008 
2009 	if (!clk->parent_names) {
2010 		pr_err("%s: could not allocate clk->parent_names\n", __func__);
2011 		ret = -ENOMEM;
2012 		goto fail_parent_names;
2013 	}
2014 
2015 
2016 	/* copy each string name in case parent_names is __initdata */
2017 	for (i = 0; i < clk->num_parents; i++) {
2018 		clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
2019 						GFP_KERNEL);
2020 		if (!clk->parent_names[i]) {
2021 			pr_err("%s: could not copy parent_names\n", __func__);
2022 			ret = -ENOMEM;
2023 			goto fail_parent_names_copy;
2024 		}
2025 	}
2026 
2027 	ret = __clk_init(dev, clk);
2028 	if (!ret)
2029 		return 0;
2030 
2031 fail_parent_names_copy:
2032 	while (--i >= 0)
2033 		kfree(clk->parent_names[i]);
2034 	kfree(clk->parent_names);
2035 fail_parent_names:
2036 	kfree(clk->name);
2037 fail_name:
2038 	return ret;
2039 }
2040 
2041 /**
2042  * clk_register - allocate a new clock, register it and return an opaque cookie
2043  * @dev: device that is registering this clock
2044  * @hw: link to hardware-specific clock data
2045  *
2046  * clk_register is the primary interface for populating the clock tree with new
2047  * clock nodes.  It returns a pointer to the newly allocated struct clk which
2048  * cannot be dereferenced by driver code but may be used in conjuction with the
2049  * rest of the clock API.  In the event of an error clk_register will return an
2050  * error code; drivers must test for an error code after calling clk_register.
2051  */
2052 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2053 {
2054 	int ret;
2055 	struct clk *clk;
2056 
2057 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2058 	if (!clk) {
2059 		pr_err("%s: could not allocate clk\n", __func__);
2060 		ret = -ENOMEM;
2061 		goto fail_out;
2062 	}
2063 
2064 	ret = _clk_register(dev, hw, clk);
2065 	if (!ret)
2066 		return clk;
2067 
2068 	kfree(clk);
2069 fail_out:
2070 	return ERR_PTR(ret);
2071 }
2072 EXPORT_SYMBOL_GPL(clk_register);
2073 
2074 /*
2075  * Free memory allocated for a clock.
2076  * Caller must hold prepare_lock.
2077  */
2078 static void __clk_release(struct kref *ref)
2079 {
2080 	struct clk *clk = container_of(ref, struct clk, ref);
2081 	int i = clk->num_parents;
2082 
2083 	kfree(clk->parents);
2084 	while (--i >= 0)
2085 		kfree(clk->parent_names[i]);
2086 
2087 	kfree(clk->parent_names);
2088 	kfree(clk->name);
2089 	kfree(clk);
2090 }
2091 
2092 /*
2093  * Empty clk_ops for unregistered clocks. These are used temporarily
2094  * after clk_unregister() was called on a clock and until last clock
2095  * consumer calls clk_put() and the struct clk object is freed.
2096  */
2097 static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2098 {
2099 	return -ENXIO;
2100 }
2101 
2102 static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2103 {
2104 	WARN_ON_ONCE(1);
2105 }
2106 
2107 static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2108 					unsigned long parent_rate)
2109 {
2110 	return -ENXIO;
2111 }
2112 
2113 static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2114 {
2115 	return -ENXIO;
2116 }
2117 
2118 static const struct clk_ops clk_nodrv_ops = {
2119 	.enable		= clk_nodrv_prepare_enable,
2120 	.disable	= clk_nodrv_disable_unprepare,
2121 	.prepare	= clk_nodrv_prepare_enable,
2122 	.unprepare	= clk_nodrv_disable_unprepare,
2123 	.set_rate	= clk_nodrv_set_rate,
2124 	.set_parent	= clk_nodrv_set_parent,
2125 };
2126 
2127 /**
2128  * clk_unregister - unregister a currently registered clock
2129  * @clk: clock to unregister
2130  */
2131 void clk_unregister(struct clk *clk)
2132 {
2133 	unsigned long flags;
2134 
2135        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2136                return;
2137 
2138 	clk_prepare_lock();
2139 
2140 	if (clk->ops == &clk_nodrv_ops) {
2141 		pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
2142 		goto out;
2143 	}
2144 	/*
2145 	 * Assign empty clock ops for consumers that might still hold
2146 	 * a reference to this clock.
2147 	 */
2148 	flags = clk_enable_lock();
2149 	clk->ops = &clk_nodrv_ops;
2150 	clk_enable_unlock(flags);
2151 
2152 	if (!hlist_empty(&clk->children)) {
2153 		struct clk *child;
2154 
2155 		/* Reparent all children to the orphan list. */
2156 		hlist_for_each_entry(child, &clk->children, child_node)
2157 			clk_set_parent(child, NULL);
2158 	}
2159 
2160 	clk_debug_unregister(clk);
2161 
2162 	hlist_del_init(&clk->child_node);
2163 
2164 	if (clk->prepare_count)
2165 		pr_warn("%s: unregistering prepared clock: %s\n",
2166 					__func__, clk->name);
2167 
2168 	kref_put(&clk->ref, __clk_release);
2169 out:
2170 	clk_prepare_unlock();
2171 }
2172 EXPORT_SYMBOL_GPL(clk_unregister);
2173 
2174 static void devm_clk_release(struct device *dev, void *res)
2175 {
2176 	clk_unregister(res);
2177 }
2178 
2179 /**
2180  * devm_clk_register - resource managed clk_register()
2181  * @dev: device that is registering this clock
2182  * @hw: link to hardware-specific clock data
2183  *
2184  * Managed clk_register(). Clocks returned from this function are
2185  * automatically clk_unregister()ed on driver detach. See clk_register() for
2186  * more information.
2187  */
2188 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2189 {
2190 	struct clk *clk;
2191 	int ret;
2192 
2193 	clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
2194 	if (!clk)
2195 		return ERR_PTR(-ENOMEM);
2196 
2197 	ret = _clk_register(dev, hw, clk);
2198 	if (!ret) {
2199 		devres_add(dev, clk);
2200 	} else {
2201 		devres_free(clk);
2202 		clk = ERR_PTR(ret);
2203 	}
2204 
2205 	return clk;
2206 }
2207 EXPORT_SYMBOL_GPL(devm_clk_register);
2208 
2209 static int devm_clk_match(struct device *dev, void *res, void *data)
2210 {
2211 	struct clk *c = res;
2212 	if (WARN_ON(!c))
2213 		return 0;
2214 	return c == data;
2215 }
2216 
2217 /**
2218  * devm_clk_unregister - resource managed clk_unregister()
2219  * @clk: clock to unregister
2220  *
2221  * Deallocate a clock allocated with devm_clk_register(). Normally
2222  * this function will not need to be called and the resource management
2223  * code will ensure that the resource is freed.
2224  */
2225 void devm_clk_unregister(struct device *dev, struct clk *clk)
2226 {
2227 	WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2228 }
2229 EXPORT_SYMBOL_GPL(devm_clk_unregister);
2230 
2231 /*
2232  * clkdev helpers
2233  */
2234 int __clk_get(struct clk *clk)
2235 {
2236 	if (clk) {
2237 		if (!try_module_get(clk->owner))
2238 			return 0;
2239 
2240 		kref_get(&clk->ref);
2241 	}
2242 	return 1;
2243 }
2244 
2245 void __clk_put(struct clk *clk)
2246 {
2247 	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2248 		return;
2249 
2250 	clk_prepare_lock();
2251 	kref_put(&clk->ref, __clk_release);
2252 	clk_prepare_unlock();
2253 
2254 	module_put(clk->owner);
2255 }
2256 
2257 /***        clk rate change notifiers        ***/
2258 
2259 /**
2260  * clk_notifier_register - add a clk rate change notifier
2261  * @clk: struct clk * to watch
2262  * @nb: struct notifier_block * with callback info
2263  *
2264  * Request notification when clk's rate changes.  This uses an SRCU
2265  * notifier because we want it to block and notifier unregistrations are
2266  * uncommon.  The callbacks associated with the notifier must not
2267  * re-enter into the clk framework by calling any top-level clk APIs;
2268  * this will cause a nested prepare_lock mutex.
2269  *
2270  * In all notification cases cases (pre, post and abort rate change) the
2271  * original clock rate is passed to the callback via struct
2272  * clk_notifier_data.old_rate and the new frequency is passed via struct
2273  * clk_notifier_data.new_rate.
2274  *
2275  * clk_notifier_register() must be called from non-atomic context.
2276  * Returns -EINVAL if called with null arguments, -ENOMEM upon
2277  * allocation failure; otherwise, passes along the return value of
2278  * srcu_notifier_chain_register().
2279  */
2280 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2281 {
2282 	struct clk_notifier *cn;
2283 	int ret = -ENOMEM;
2284 
2285 	if (!clk || !nb)
2286 		return -EINVAL;
2287 
2288 	clk_prepare_lock();
2289 
2290 	/* search the list of notifiers for this clk */
2291 	list_for_each_entry(cn, &clk_notifier_list, node)
2292 		if (cn->clk == clk)
2293 			break;
2294 
2295 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
2296 	if (cn->clk != clk) {
2297 		cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2298 		if (!cn)
2299 			goto out;
2300 
2301 		cn->clk = clk;
2302 		srcu_init_notifier_head(&cn->notifier_head);
2303 
2304 		list_add(&cn->node, &clk_notifier_list);
2305 	}
2306 
2307 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2308 
2309 	clk->notifier_count++;
2310 
2311 out:
2312 	clk_prepare_unlock();
2313 
2314 	return ret;
2315 }
2316 EXPORT_SYMBOL_GPL(clk_notifier_register);
2317 
2318 /**
2319  * clk_notifier_unregister - remove a clk rate change notifier
2320  * @clk: struct clk *
2321  * @nb: struct notifier_block * with callback info
2322  *
2323  * Request no further notification for changes to 'clk' and frees memory
2324  * allocated in clk_notifier_register.
2325  *
2326  * Returns -EINVAL if called with null arguments; otherwise, passes
2327  * along the return value of srcu_notifier_chain_unregister().
2328  */
2329 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2330 {
2331 	struct clk_notifier *cn = NULL;
2332 	int ret = -EINVAL;
2333 
2334 	if (!clk || !nb)
2335 		return -EINVAL;
2336 
2337 	clk_prepare_lock();
2338 
2339 	list_for_each_entry(cn, &clk_notifier_list, node)
2340 		if (cn->clk == clk)
2341 			break;
2342 
2343 	if (cn->clk == clk) {
2344 		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2345 
2346 		clk->notifier_count--;
2347 
2348 		/* XXX the notifier code should handle this better */
2349 		if (!cn->notifier_head.head) {
2350 			srcu_cleanup_notifier_head(&cn->notifier_head);
2351 			list_del(&cn->node);
2352 			kfree(cn);
2353 		}
2354 
2355 	} else {
2356 		ret = -ENOENT;
2357 	}
2358 
2359 	clk_prepare_unlock();
2360 
2361 	return ret;
2362 }
2363 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
2364 
2365 #ifdef CONFIG_OF
2366 /**
2367  * struct of_clk_provider - Clock provider registration structure
2368  * @link: Entry in global list of clock providers
2369  * @node: Pointer to device tree node of clock provider
2370  * @get: Get clock callback.  Returns NULL or a struct clk for the
2371  *       given clock specifier
2372  * @data: context pointer to be passed into @get callback
2373  */
2374 struct of_clk_provider {
2375 	struct list_head link;
2376 
2377 	struct device_node *node;
2378 	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2379 	void *data;
2380 };
2381 
2382 static const struct of_device_id __clk_of_table_sentinel
2383 	__used __section(__clk_of_table_end);
2384 
2385 static LIST_HEAD(of_clk_providers);
2386 static DEFINE_MUTEX(of_clk_mutex);
2387 
2388 /* of_clk_provider list locking helpers */
2389 void of_clk_lock(void)
2390 {
2391 	mutex_lock(&of_clk_mutex);
2392 }
2393 
2394 void of_clk_unlock(void)
2395 {
2396 	mutex_unlock(&of_clk_mutex);
2397 }
2398 
2399 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2400 				     void *data)
2401 {
2402 	return data;
2403 }
2404 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2405 
2406 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2407 {
2408 	struct clk_onecell_data *clk_data = data;
2409 	unsigned int idx = clkspec->args[0];
2410 
2411 	if (idx >= clk_data->clk_num) {
2412 		pr_err("%s: invalid clock index %d\n", __func__, idx);
2413 		return ERR_PTR(-EINVAL);
2414 	}
2415 
2416 	return clk_data->clks[idx];
2417 }
2418 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2419 
2420 /**
2421  * of_clk_add_provider() - Register a clock provider for a node
2422  * @np: Device node pointer associated with clock provider
2423  * @clk_src_get: callback for decoding clock
2424  * @data: context pointer for @clk_src_get callback.
2425  */
2426 int of_clk_add_provider(struct device_node *np,
2427 			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2428 						   void *data),
2429 			void *data)
2430 {
2431 	struct of_clk_provider *cp;
2432 
2433 	cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2434 	if (!cp)
2435 		return -ENOMEM;
2436 
2437 	cp->node = of_node_get(np);
2438 	cp->data = data;
2439 	cp->get = clk_src_get;
2440 
2441 	mutex_lock(&of_clk_mutex);
2442 	list_add(&cp->link, &of_clk_providers);
2443 	mutex_unlock(&of_clk_mutex);
2444 	pr_debug("Added clock from %s\n", np->full_name);
2445 
2446 	return 0;
2447 }
2448 EXPORT_SYMBOL_GPL(of_clk_add_provider);
2449 
2450 /**
2451  * of_clk_del_provider() - Remove a previously registered clock provider
2452  * @np: Device node pointer associated with clock provider
2453  */
2454 void of_clk_del_provider(struct device_node *np)
2455 {
2456 	struct of_clk_provider *cp;
2457 
2458 	mutex_lock(&of_clk_mutex);
2459 	list_for_each_entry(cp, &of_clk_providers, link) {
2460 		if (cp->node == np) {
2461 			list_del(&cp->link);
2462 			of_node_put(cp->node);
2463 			kfree(cp);
2464 			break;
2465 		}
2466 	}
2467 	mutex_unlock(&of_clk_mutex);
2468 }
2469 EXPORT_SYMBOL_GPL(of_clk_del_provider);
2470 
2471 struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
2472 {
2473 	struct of_clk_provider *provider;
2474 	struct clk *clk = ERR_PTR(-EPROBE_DEFER);
2475 
2476 	/* Check if we have such a provider in our array */
2477 	list_for_each_entry(provider, &of_clk_providers, link) {
2478 		if (provider->node == clkspec->np)
2479 			clk = provider->get(clkspec, provider->data);
2480 		if (!IS_ERR(clk))
2481 			break;
2482 	}
2483 
2484 	return clk;
2485 }
2486 
2487 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2488 {
2489 	struct clk *clk;
2490 
2491 	mutex_lock(&of_clk_mutex);
2492 	clk = __of_clk_get_from_provider(clkspec);
2493 	mutex_unlock(&of_clk_mutex);
2494 
2495 	return clk;
2496 }
2497 
2498 int of_clk_get_parent_count(struct device_node *np)
2499 {
2500 	return of_count_phandle_with_args(np, "clocks", "#clock-cells");
2501 }
2502 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
2503 
2504 const char *of_clk_get_parent_name(struct device_node *np, int index)
2505 {
2506 	struct of_phandle_args clkspec;
2507 	struct property *prop;
2508 	const char *clk_name;
2509 	const __be32 *vp;
2510 	u32 pv;
2511 	int rc;
2512 	int count;
2513 
2514 	if (index < 0)
2515 		return NULL;
2516 
2517 	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
2518 					&clkspec);
2519 	if (rc)
2520 		return NULL;
2521 
2522 	index = clkspec.args_count ? clkspec.args[0] : 0;
2523 	count = 0;
2524 
2525 	/* if there is an indices property, use it to transfer the index
2526 	 * specified into an array offset for the clock-output-names property.
2527 	 */
2528 	of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
2529 		if (index == pv) {
2530 			index = count;
2531 			break;
2532 		}
2533 		count++;
2534 	}
2535 
2536 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
2537 					  index,
2538 					  &clk_name) < 0)
2539 		clk_name = clkspec.np->name;
2540 
2541 	of_node_put(clkspec.np);
2542 	return clk_name;
2543 }
2544 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
2545 
2546 struct clock_provider {
2547 	of_clk_init_cb_t clk_init_cb;
2548 	struct device_node *np;
2549 	struct list_head node;
2550 };
2551 
2552 static LIST_HEAD(clk_provider_list);
2553 
2554 /*
2555  * This function looks for a parent clock. If there is one, then it
2556  * checks that the provider for this parent clock was initialized, in
2557  * this case the parent clock will be ready.
2558  */
2559 static int parent_ready(struct device_node *np)
2560 {
2561 	int i = 0;
2562 
2563 	while (true) {
2564 		struct clk *clk = of_clk_get(np, i);
2565 
2566 		/* this parent is ready we can check the next one */
2567 		if (!IS_ERR(clk)) {
2568 			clk_put(clk);
2569 			i++;
2570 			continue;
2571 		}
2572 
2573 		/* at least one parent is not ready, we exit now */
2574 		if (PTR_ERR(clk) == -EPROBE_DEFER)
2575 			return 0;
2576 
2577 		/*
2578 		 * Here we make assumption that the device tree is
2579 		 * written correctly. So an error means that there is
2580 		 * no more parent. As we didn't exit yet, then the
2581 		 * previous parent are ready. If there is no clock
2582 		 * parent, no need to wait for them, then we can
2583 		 * consider their absence as being ready
2584 		 */
2585 		return 1;
2586 	}
2587 }
2588 
2589 /**
2590  * of_clk_init() - Scan and init clock providers from the DT
2591  * @matches: array of compatible values and init functions for providers.
2592  *
2593  * This function scans the device tree for matching clock providers
2594  * and calls their initialization functions. It also does it by trying
2595  * to follow the dependencies.
2596  */
2597 void __init of_clk_init(const struct of_device_id *matches)
2598 {
2599 	const struct of_device_id *match;
2600 	struct device_node *np;
2601 	struct clock_provider *clk_provider, *next;
2602 	bool is_init_done;
2603 	bool force = false;
2604 
2605 	if (!matches)
2606 		matches = &__clk_of_table;
2607 
2608 	/* First prepare the list of the clocks providers */
2609 	for_each_matching_node_and_match(np, matches, &match) {
2610 		struct clock_provider *parent =
2611 			kzalloc(sizeof(struct clock_provider),	GFP_KERNEL);
2612 
2613 		parent->clk_init_cb = match->data;
2614 		parent->np = np;
2615 		list_add_tail(&parent->node, &clk_provider_list);
2616 	}
2617 
2618 	while (!list_empty(&clk_provider_list)) {
2619 		is_init_done = false;
2620 		list_for_each_entry_safe(clk_provider, next,
2621 					&clk_provider_list, node) {
2622 			if (force || parent_ready(clk_provider->np)) {
2623 				clk_provider->clk_init_cb(clk_provider->np);
2624 				list_del(&clk_provider->node);
2625 				kfree(clk_provider);
2626 				is_init_done = true;
2627 			}
2628 		}
2629 
2630 		/*
2631 		 * We didn't manage to initialize any of the
2632 		 * remaining providers during the last loop, so now we
2633 		 * initialize all the remaining ones unconditionally
2634 		 * in case the clock parent was not mandatory
2635 		 */
2636 		if (!is_init_done)
2637 			force = true;
2638 
2639 	}
2640 }
2641 #endif
2642