xref: /openbmc/linux/drivers/clk/clk.c (revision 9d749629)
1 /*
2  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * Standard functionality for the common clock API.  See Documentation/clk.txt
10  */
11 
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
22 
23 static DEFINE_SPINLOCK(enable_lock);
24 static DEFINE_MUTEX(prepare_lock);
25 
26 static HLIST_HEAD(clk_root_list);
27 static HLIST_HEAD(clk_orphan_list);
28 static LIST_HEAD(clk_notifier_list);
29 
30 /***        debugfs support        ***/
31 
32 #ifdef CONFIG_COMMON_CLK_DEBUG
33 #include <linux/debugfs.h>
34 
35 static struct dentry *rootdir;
36 static struct dentry *orphandir;
37 static int inited = 0;
38 
39 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
40 {
41 	if (!c)
42 		return;
43 
44 	seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
45 		   level * 3 + 1, "",
46 		   30 - level * 3, c->name,
47 		   c->enable_count, c->prepare_count, c->rate);
48 	seq_printf(s, "\n");
49 }
50 
51 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
52 				     int level)
53 {
54 	struct clk *child;
55 	struct hlist_node *tmp;
56 
57 	if (!c)
58 		return;
59 
60 	clk_summary_show_one(s, c, level);
61 
62 	hlist_for_each_entry(child, tmp, &c->children, child_node)
63 		clk_summary_show_subtree(s, child, level + 1);
64 }
65 
66 static int clk_summary_show(struct seq_file *s, void *data)
67 {
68 	struct clk *c;
69 	struct hlist_node *tmp;
70 
71 	seq_printf(s, "   clock                        enable_cnt  prepare_cnt  rate\n");
72 	seq_printf(s, "---------------------------------------------------------------------\n");
73 
74 	mutex_lock(&prepare_lock);
75 
76 	hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
77 		clk_summary_show_subtree(s, c, 0);
78 
79 	hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
80 		clk_summary_show_subtree(s, c, 0);
81 
82 	mutex_unlock(&prepare_lock);
83 
84 	return 0;
85 }
86 
87 
88 static int clk_summary_open(struct inode *inode, struct file *file)
89 {
90 	return single_open(file, clk_summary_show, inode->i_private);
91 }
92 
93 static const struct file_operations clk_summary_fops = {
94 	.open		= clk_summary_open,
95 	.read		= seq_read,
96 	.llseek		= seq_lseek,
97 	.release	= single_release,
98 };
99 
100 static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
101 {
102 	if (!c)
103 		return;
104 
105 	seq_printf(s, "\"%s\": { ", c->name);
106 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
107 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
108 	seq_printf(s, "\"rate\": %lu", c->rate);
109 }
110 
111 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
112 {
113 	struct clk *child;
114 	struct hlist_node *tmp;
115 
116 	if (!c)
117 		return;
118 
119 	clk_dump_one(s, c, level);
120 
121 	hlist_for_each_entry(child, tmp, &c->children, child_node) {
122 		seq_printf(s, ",");
123 		clk_dump_subtree(s, child, level + 1);
124 	}
125 
126 	seq_printf(s, "}");
127 }
128 
129 static int clk_dump(struct seq_file *s, void *data)
130 {
131 	struct clk *c;
132 	struct hlist_node *tmp;
133 	bool first_node = true;
134 
135 	seq_printf(s, "{");
136 
137 	mutex_lock(&prepare_lock);
138 
139 	hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
140 		if (!first_node)
141 			seq_printf(s, ",");
142 		first_node = false;
143 		clk_dump_subtree(s, c, 0);
144 	}
145 
146 	hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
147 		seq_printf(s, ",");
148 		clk_dump_subtree(s, c, 0);
149 	}
150 
151 	mutex_unlock(&prepare_lock);
152 
153 	seq_printf(s, "}");
154 	return 0;
155 }
156 
157 
158 static int clk_dump_open(struct inode *inode, struct file *file)
159 {
160 	return single_open(file, clk_dump, inode->i_private);
161 }
162 
163 static const struct file_operations clk_dump_fops = {
164 	.open		= clk_dump_open,
165 	.read		= seq_read,
166 	.llseek		= seq_lseek,
167 	.release	= single_release,
168 };
169 
170 /* caller must hold prepare_lock */
171 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
172 {
173 	struct dentry *d;
174 	int ret = -ENOMEM;
175 
176 	if (!clk || !pdentry) {
177 		ret = -EINVAL;
178 		goto out;
179 	}
180 
181 	d = debugfs_create_dir(clk->name, pdentry);
182 	if (!d)
183 		goto out;
184 
185 	clk->dentry = d;
186 
187 	d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
188 			(u32 *)&clk->rate);
189 	if (!d)
190 		goto err_out;
191 
192 	d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
193 			(u32 *)&clk->flags);
194 	if (!d)
195 		goto err_out;
196 
197 	d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
198 			(u32 *)&clk->prepare_count);
199 	if (!d)
200 		goto err_out;
201 
202 	d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
203 			(u32 *)&clk->enable_count);
204 	if (!d)
205 		goto err_out;
206 
207 	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
208 			(u32 *)&clk->notifier_count);
209 	if (!d)
210 		goto err_out;
211 
212 	ret = 0;
213 	goto out;
214 
215 err_out:
216 	debugfs_remove(clk->dentry);
217 out:
218 	return ret;
219 }
220 
221 /* caller must hold prepare_lock */
222 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
223 {
224 	struct clk *child;
225 	struct hlist_node *tmp;
226 	int ret = -EINVAL;;
227 
228 	if (!clk || !pdentry)
229 		goto out;
230 
231 	ret = clk_debug_create_one(clk, pdentry);
232 
233 	if (ret)
234 		goto out;
235 
236 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
237 		clk_debug_create_subtree(child, clk->dentry);
238 
239 	ret = 0;
240 out:
241 	return ret;
242 }
243 
244 /**
245  * clk_debug_register - add a clk node to the debugfs clk tree
246  * @clk: the clk being added to the debugfs clk tree
247  *
248  * Dynamically adds a clk to the debugfs clk tree if debugfs has been
249  * initialized.  Otherwise it bails out early since the debugfs clk tree
250  * will be created lazily by clk_debug_init as part of a late_initcall.
251  *
252  * Caller must hold prepare_lock.  Only clk_init calls this function (so
253  * far) so this is taken care.
254  */
255 static int clk_debug_register(struct clk *clk)
256 {
257 	struct clk *parent;
258 	struct dentry *pdentry;
259 	int ret = 0;
260 
261 	if (!inited)
262 		goto out;
263 
264 	parent = clk->parent;
265 
266 	/*
267 	 * Check to see if a clk is a root clk.  Also check that it is
268 	 * safe to add this clk to debugfs
269 	 */
270 	if (!parent)
271 		if (clk->flags & CLK_IS_ROOT)
272 			pdentry = rootdir;
273 		else
274 			pdentry = orphandir;
275 	else
276 		if (parent->dentry)
277 			pdentry = parent->dentry;
278 		else
279 			goto out;
280 
281 	ret = clk_debug_create_subtree(clk, pdentry);
282 
283 out:
284 	return ret;
285 }
286 
287 /**
288  * clk_debug_init - lazily create the debugfs clk tree visualization
289  *
290  * clks are often initialized very early during boot before memory can
291  * be dynamically allocated and well before debugfs is setup.
292  * clk_debug_init walks the clk tree hierarchy while holding
293  * prepare_lock and creates the topology as part of a late_initcall,
294  * thus insuring that clks initialized very early will still be
295  * represented in the debugfs clk tree.  This function should only be
296  * called once at boot-time, and all other clks added dynamically will
297  * be done so with clk_debug_register.
298  */
299 static int __init clk_debug_init(void)
300 {
301 	struct clk *clk;
302 	struct hlist_node *tmp;
303 	struct dentry *d;
304 
305 	rootdir = debugfs_create_dir("clk", NULL);
306 
307 	if (!rootdir)
308 		return -ENOMEM;
309 
310 	d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
311 				&clk_summary_fops);
312 	if (!d)
313 		return -ENOMEM;
314 
315 	d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
316 				&clk_dump_fops);
317 	if (!d)
318 		return -ENOMEM;
319 
320 	orphandir = debugfs_create_dir("orphans", rootdir);
321 
322 	if (!orphandir)
323 		return -ENOMEM;
324 
325 	mutex_lock(&prepare_lock);
326 
327 	hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
328 		clk_debug_create_subtree(clk, rootdir);
329 
330 	hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
331 		clk_debug_create_subtree(clk, orphandir);
332 
333 	inited = 1;
334 
335 	mutex_unlock(&prepare_lock);
336 
337 	return 0;
338 }
339 late_initcall(clk_debug_init);
340 #else
341 static inline int clk_debug_register(struct clk *clk) { return 0; }
342 #endif
343 
344 /* caller must hold prepare_lock */
345 static void clk_disable_unused_subtree(struct clk *clk)
346 {
347 	struct clk *child;
348 	struct hlist_node *tmp;
349 	unsigned long flags;
350 
351 	if (!clk)
352 		goto out;
353 
354 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
355 		clk_disable_unused_subtree(child);
356 
357 	spin_lock_irqsave(&enable_lock, flags);
358 
359 	if (clk->enable_count)
360 		goto unlock_out;
361 
362 	if (clk->flags & CLK_IGNORE_UNUSED)
363 		goto unlock_out;
364 
365 	/*
366 	 * some gate clocks have special needs during the disable-unused
367 	 * sequence.  call .disable_unused if available, otherwise fall
368 	 * back to .disable
369 	 */
370 	if (__clk_is_enabled(clk)) {
371 		if (clk->ops->disable_unused)
372 			clk->ops->disable_unused(clk->hw);
373 		else if (clk->ops->disable)
374 			clk->ops->disable(clk->hw);
375 	}
376 
377 unlock_out:
378 	spin_unlock_irqrestore(&enable_lock, flags);
379 
380 out:
381 	return;
382 }
383 
384 static int clk_disable_unused(void)
385 {
386 	struct clk *clk;
387 	struct hlist_node *tmp;
388 
389 	mutex_lock(&prepare_lock);
390 
391 	hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
392 		clk_disable_unused_subtree(clk);
393 
394 	hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
395 		clk_disable_unused_subtree(clk);
396 
397 	mutex_unlock(&prepare_lock);
398 
399 	return 0;
400 }
401 late_initcall(clk_disable_unused);
402 
403 /***    helper functions   ***/
404 
405 const char *__clk_get_name(struct clk *clk)
406 {
407 	return !clk ? NULL : clk->name;
408 }
409 EXPORT_SYMBOL_GPL(__clk_get_name);
410 
411 struct clk_hw *__clk_get_hw(struct clk *clk)
412 {
413 	return !clk ? NULL : clk->hw;
414 }
415 
416 u8 __clk_get_num_parents(struct clk *clk)
417 {
418 	return !clk ? 0 : clk->num_parents;
419 }
420 
421 struct clk *__clk_get_parent(struct clk *clk)
422 {
423 	return !clk ? NULL : clk->parent;
424 }
425 
426 unsigned int __clk_get_enable_count(struct clk *clk)
427 {
428 	return !clk ? 0 : clk->enable_count;
429 }
430 
431 unsigned int __clk_get_prepare_count(struct clk *clk)
432 {
433 	return !clk ? 0 : clk->prepare_count;
434 }
435 
436 unsigned long __clk_get_rate(struct clk *clk)
437 {
438 	unsigned long ret;
439 
440 	if (!clk) {
441 		ret = 0;
442 		goto out;
443 	}
444 
445 	ret = clk->rate;
446 
447 	if (clk->flags & CLK_IS_ROOT)
448 		goto out;
449 
450 	if (!clk->parent)
451 		ret = 0;
452 
453 out:
454 	return ret;
455 }
456 
457 unsigned long __clk_get_flags(struct clk *clk)
458 {
459 	return !clk ? 0 : clk->flags;
460 }
461 
462 bool __clk_is_enabled(struct clk *clk)
463 {
464 	int ret;
465 
466 	if (!clk)
467 		return false;
468 
469 	/*
470 	 * .is_enabled is only mandatory for clocks that gate
471 	 * fall back to software usage counter if .is_enabled is missing
472 	 */
473 	if (!clk->ops->is_enabled) {
474 		ret = clk->enable_count ? 1 : 0;
475 		goto out;
476 	}
477 
478 	ret = clk->ops->is_enabled(clk->hw);
479 out:
480 	return !!ret;
481 }
482 
483 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
484 {
485 	struct clk *child;
486 	struct clk *ret;
487 	struct hlist_node *tmp;
488 
489 	if (!strcmp(clk->name, name))
490 		return clk;
491 
492 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
493 		ret = __clk_lookup_subtree(name, child);
494 		if (ret)
495 			return ret;
496 	}
497 
498 	return NULL;
499 }
500 
501 struct clk *__clk_lookup(const char *name)
502 {
503 	struct clk *root_clk;
504 	struct clk *ret;
505 	struct hlist_node *tmp;
506 
507 	if (!name)
508 		return NULL;
509 
510 	/* search the 'proper' clk tree first */
511 	hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
512 		ret = __clk_lookup_subtree(name, root_clk);
513 		if (ret)
514 			return ret;
515 	}
516 
517 	/* if not found, then search the orphan tree */
518 	hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
519 		ret = __clk_lookup_subtree(name, root_clk);
520 		if (ret)
521 			return ret;
522 	}
523 
524 	return NULL;
525 }
526 
527 /***        clk api        ***/
528 
529 void __clk_unprepare(struct clk *clk)
530 {
531 	if (!clk)
532 		return;
533 
534 	if (WARN_ON(clk->prepare_count == 0))
535 		return;
536 
537 	if (--clk->prepare_count > 0)
538 		return;
539 
540 	WARN_ON(clk->enable_count > 0);
541 
542 	if (clk->ops->unprepare)
543 		clk->ops->unprepare(clk->hw);
544 
545 	__clk_unprepare(clk->parent);
546 }
547 
548 /**
549  * clk_unprepare - undo preparation of a clock source
550  * @clk: the clk being unprepare
551  *
552  * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
553  * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
554  * if the operation may sleep.  One example is a clk which is accessed over
555  * I2c.  In the complex case a clk gate operation may require a fast and a slow
556  * part.  It is this reason that clk_unprepare and clk_disable are not mutually
557  * exclusive.  In fact clk_disable must be called before clk_unprepare.
558  */
559 void clk_unprepare(struct clk *clk)
560 {
561 	mutex_lock(&prepare_lock);
562 	__clk_unprepare(clk);
563 	mutex_unlock(&prepare_lock);
564 }
565 EXPORT_SYMBOL_GPL(clk_unprepare);
566 
567 int __clk_prepare(struct clk *clk)
568 {
569 	int ret = 0;
570 
571 	if (!clk)
572 		return 0;
573 
574 	if (clk->prepare_count == 0) {
575 		ret = __clk_prepare(clk->parent);
576 		if (ret)
577 			return ret;
578 
579 		if (clk->ops->prepare) {
580 			ret = clk->ops->prepare(clk->hw);
581 			if (ret) {
582 				__clk_unprepare(clk->parent);
583 				return ret;
584 			}
585 		}
586 	}
587 
588 	clk->prepare_count++;
589 
590 	return 0;
591 }
592 
593 /**
594  * clk_prepare - prepare a clock source
595  * @clk: the clk being prepared
596  *
597  * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
598  * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
599  * operation may sleep.  One example is a clk which is accessed over I2c.  In
600  * the complex case a clk ungate operation may require a fast and a slow part.
601  * It is this reason that clk_prepare and clk_enable are not mutually
602  * exclusive.  In fact clk_prepare must be called before clk_enable.
603  * Returns 0 on success, -EERROR otherwise.
604  */
605 int clk_prepare(struct clk *clk)
606 {
607 	int ret;
608 
609 	mutex_lock(&prepare_lock);
610 	ret = __clk_prepare(clk);
611 	mutex_unlock(&prepare_lock);
612 
613 	return ret;
614 }
615 EXPORT_SYMBOL_GPL(clk_prepare);
616 
617 static void __clk_disable(struct clk *clk)
618 {
619 	if (!clk)
620 		return;
621 
622 	if (WARN_ON(IS_ERR(clk)))
623 		return;
624 
625 	if (WARN_ON(clk->enable_count == 0))
626 		return;
627 
628 	if (--clk->enable_count > 0)
629 		return;
630 
631 	if (clk->ops->disable)
632 		clk->ops->disable(clk->hw);
633 
634 	__clk_disable(clk->parent);
635 }
636 
637 /**
638  * clk_disable - gate a clock
639  * @clk: the clk being gated
640  *
641  * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
642  * a simple case, clk_disable can be used instead of clk_unprepare to gate a
643  * clk if the operation is fast and will never sleep.  One example is a
644  * SoC-internal clk which is controlled via simple register writes.  In the
645  * complex case a clk gate operation may require a fast and a slow part.  It is
646  * this reason that clk_unprepare and clk_disable are not mutually exclusive.
647  * In fact clk_disable must be called before clk_unprepare.
648  */
649 void clk_disable(struct clk *clk)
650 {
651 	unsigned long flags;
652 
653 	spin_lock_irqsave(&enable_lock, flags);
654 	__clk_disable(clk);
655 	spin_unlock_irqrestore(&enable_lock, flags);
656 }
657 EXPORT_SYMBOL_GPL(clk_disable);
658 
659 static int __clk_enable(struct clk *clk)
660 {
661 	int ret = 0;
662 
663 	if (!clk)
664 		return 0;
665 
666 	if (WARN_ON(clk->prepare_count == 0))
667 		return -ESHUTDOWN;
668 
669 	if (clk->enable_count == 0) {
670 		ret = __clk_enable(clk->parent);
671 
672 		if (ret)
673 			return ret;
674 
675 		if (clk->ops->enable) {
676 			ret = clk->ops->enable(clk->hw);
677 			if (ret) {
678 				__clk_disable(clk->parent);
679 				return ret;
680 			}
681 		}
682 	}
683 
684 	clk->enable_count++;
685 	return 0;
686 }
687 
688 /**
689  * clk_enable - ungate a clock
690  * @clk: the clk being ungated
691  *
692  * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
693  * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
694  * if the operation will never sleep.  One example is a SoC-internal clk which
695  * is controlled via simple register writes.  In the complex case a clk ungate
696  * operation may require a fast and a slow part.  It is this reason that
697  * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
698  * must be called before clk_enable.  Returns 0 on success, -EERROR
699  * otherwise.
700  */
701 int clk_enable(struct clk *clk)
702 {
703 	unsigned long flags;
704 	int ret;
705 
706 	spin_lock_irqsave(&enable_lock, flags);
707 	ret = __clk_enable(clk);
708 	spin_unlock_irqrestore(&enable_lock, flags);
709 
710 	return ret;
711 }
712 EXPORT_SYMBOL_GPL(clk_enable);
713 
714 /**
715  * __clk_round_rate - round the given rate for a clk
716  * @clk: round the rate of this clock
717  *
718  * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
719  */
720 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
721 {
722 	unsigned long parent_rate = 0;
723 
724 	if (!clk)
725 		return 0;
726 
727 	if (!clk->ops->round_rate) {
728 		if (clk->flags & CLK_SET_RATE_PARENT)
729 			return __clk_round_rate(clk->parent, rate);
730 		else
731 			return clk->rate;
732 	}
733 
734 	if (clk->parent)
735 		parent_rate = clk->parent->rate;
736 
737 	return clk->ops->round_rate(clk->hw, rate, &parent_rate);
738 }
739 
740 /**
741  * clk_round_rate - round the given rate for a clk
742  * @clk: the clk for which we are rounding a rate
743  * @rate: the rate which is to be rounded
744  *
745  * Takes in a rate as input and rounds it to a rate that the clk can actually
746  * use which is then returned.  If clk doesn't support round_rate operation
747  * then the parent rate is returned.
748  */
749 long clk_round_rate(struct clk *clk, unsigned long rate)
750 {
751 	unsigned long ret;
752 
753 	mutex_lock(&prepare_lock);
754 	ret = __clk_round_rate(clk, rate);
755 	mutex_unlock(&prepare_lock);
756 
757 	return ret;
758 }
759 EXPORT_SYMBOL_GPL(clk_round_rate);
760 
761 /**
762  * __clk_notify - call clk notifier chain
763  * @clk: struct clk * that is changing rate
764  * @msg: clk notifier type (see include/linux/clk.h)
765  * @old_rate: old clk rate
766  * @new_rate: new clk rate
767  *
768  * Triggers a notifier call chain on the clk rate-change notification
769  * for 'clk'.  Passes a pointer to the struct clk and the previous
770  * and current rates to the notifier callback.  Intended to be called by
771  * internal clock code only.  Returns NOTIFY_DONE from the last driver
772  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
773  * a driver returns that.
774  */
775 static int __clk_notify(struct clk *clk, unsigned long msg,
776 		unsigned long old_rate, unsigned long new_rate)
777 {
778 	struct clk_notifier *cn;
779 	struct clk_notifier_data cnd;
780 	int ret = NOTIFY_DONE;
781 
782 	cnd.clk = clk;
783 	cnd.old_rate = old_rate;
784 	cnd.new_rate = new_rate;
785 
786 	list_for_each_entry(cn, &clk_notifier_list, node) {
787 		if (cn->clk == clk) {
788 			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
789 					&cnd);
790 			break;
791 		}
792 	}
793 
794 	return ret;
795 }
796 
797 /**
798  * __clk_recalc_rates
799  * @clk: first clk in the subtree
800  * @msg: notification type (see include/linux/clk.h)
801  *
802  * Walks the subtree of clks starting with clk and recalculates rates as it
803  * goes.  Note that if a clk does not implement the .recalc_rate callback then
804  * it is assumed that the clock will take on the rate of it's parent.
805  *
806  * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
807  * if necessary.
808  *
809  * Caller must hold prepare_lock.
810  */
811 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
812 {
813 	unsigned long old_rate;
814 	unsigned long parent_rate = 0;
815 	struct hlist_node *tmp;
816 	struct clk *child;
817 
818 	old_rate = clk->rate;
819 
820 	if (clk->parent)
821 		parent_rate = clk->parent->rate;
822 
823 	if (clk->ops->recalc_rate)
824 		clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
825 	else
826 		clk->rate = parent_rate;
827 
828 	/*
829 	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
830 	 * & ABORT_RATE_CHANGE notifiers
831 	 */
832 	if (clk->notifier_count && msg)
833 		__clk_notify(clk, msg, old_rate, clk->rate);
834 
835 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
836 		__clk_recalc_rates(child, msg);
837 }
838 
839 /**
840  * clk_get_rate - return the rate of clk
841  * @clk: the clk whose rate is being returned
842  *
843  * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
844  * is set, which means a recalc_rate will be issued.
845  * If clk is NULL then returns 0.
846  */
847 unsigned long clk_get_rate(struct clk *clk)
848 {
849 	unsigned long rate;
850 
851 	mutex_lock(&prepare_lock);
852 
853 	if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
854 		__clk_recalc_rates(clk, 0);
855 
856 	rate = __clk_get_rate(clk);
857 	mutex_unlock(&prepare_lock);
858 
859 	return rate;
860 }
861 EXPORT_SYMBOL_GPL(clk_get_rate);
862 
863 /**
864  * __clk_speculate_rates
865  * @clk: first clk in the subtree
866  * @parent_rate: the "future" rate of clk's parent
867  *
868  * Walks the subtree of clks starting with clk, speculating rates as it
869  * goes and firing off PRE_RATE_CHANGE notifications as necessary.
870  *
871  * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
872  * pre-rate change notifications and returns early if no clks in the
873  * subtree have subscribed to the notifications.  Note that if a clk does not
874  * implement the .recalc_rate callback then it is assumed that the clock will
875  * take on the rate of it's parent.
876  *
877  * Caller must hold prepare_lock.
878  */
879 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
880 {
881 	struct hlist_node *tmp;
882 	struct clk *child;
883 	unsigned long new_rate;
884 	int ret = NOTIFY_DONE;
885 
886 	if (clk->ops->recalc_rate)
887 		new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
888 	else
889 		new_rate = parent_rate;
890 
891 	/* abort the rate change if a driver returns NOTIFY_BAD */
892 	if (clk->notifier_count)
893 		ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
894 
895 	if (ret == NOTIFY_BAD)
896 		goto out;
897 
898 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
899 		ret = __clk_speculate_rates(child, new_rate);
900 		if (ret == NOTIFY_BAD)
901 			break;
902 	}
903 
904 out:
905 	return ret;
906 }
907 
908 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
909 {
910 	struct clk *child;
911 	struct hlist_node *tmp;
912 
913 	clk->new_rate = new_rate;
914 
915 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
916 		if (child->ops->recalc_rate)
917 			child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
918 		else
919 			child->new_rate = new_rate;
920 		clk_calc_subtree(child, child->new_rate);
921 	}
922 }
923 
924 /*
925  * calculate the new rates returning the topmost clock that has to be
926  * changed.
927  */
928 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
929 {
930 	struct clk *top = clk;
931 	unsigned long best_parent_rate = 0;
932 	unsigned long new_rate;
933 
934 	/* sanity */
935 	if (IS_ERR_OR_NULL(clk))
936 		return NULL;
937 
938 	/* save parent rate, if it exists */
939 	if (clk->parent)
940 		best_parent_rate = clk->parent->rate;
941 
942 	/* never propagate up to the parent */
943 	if (!(clk->flags & CLK_SET_RATE_PARENT)) {
944 		if (!clk->ops->round_rate) {
945 			clk->new_rate = clk->rate;
946 			return NULL;
947 		}
948 		new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
949 		goto out;
950 	}
951 
952 	/* need clk->parent from here on out */
953 	if (!clk->parent) {
954 		pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
955 		return NULL;
956 	}
957 
958 	if (!clk->ops->round_rate) {
959 		top = clk_calc_new_rates(clk->parent, rate);
960 		new_rate = clk->parent->new_rate;
961 
962 		goto out;
963 	}
964 
965 	new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
966 
967 	if (best_parent_rate != clk->parent->rate) {
968 		top = clk_calc_new_rates(clk->parent, best_parent_rate);
969 
970 		goto out;
971 	}
972 
973 out:
974 	clk_calc_subtree(clk, new_rate);
975 
976 	return top;
977 }
978 
979 /*
980  * Notify about rate changes in a subtree. Always walk down the whole tree
981  * so that in case of an error we can walk down the whole tree again and
982  * abort the change.
983  */
984 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
985 {
986 	struct hlist_node *tmp;
987 	struct clk *child, *fail_clk = NULL;
988 	int ret = NOTIFY_DONE;
989 
990 	if (clk->rate == clk->new_rate)
991 		return 0;
992 
993 	if (clk->notifier_count) {
994 		ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
995 		if (ret == NOTIFY_BAD)
996 			fail_clk = clk;
997 	}
998 
999 	hlist_for_each_entry(child, tmp, &clk->children, child_node) {
1000 		clk = clk_propagate_rate_change(child, event);
1001 		if (clk)
1002 			fail_clk = clk;
1003 	}
1004 
1005 	return fail_clk;
1006 }
1007 
1008 /*
1009  * walk down a subtree and set the new rates notifying the rate
1010  * change on the way
1011  */
1012 static void clk_change_rate(struct clk *clk)
1013 {
1014 	struct clk *child;
1015 	unsigned long old_rate;
1016 	unsigned long best_parent_rate = 0;
1017 	struct hlist_node *tmp;
1018 
1019 	old_rate = clk->rate;
1020 
1021 	if (clk->parent)
1022 		best_parent_rate = clk->parent->rate;
1023 
1024 	if (clk->ops->set_rate)
1025 		clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
1026 
1027 	if (clk->ops->recalc_rate)
1028 		clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
1029 	else
1030 		clk->rate = best_parent_rate;
1031 
1032 	if (clk->notifier_count && old_rate != clk->rate)
1033 		__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1034 
1035 	hlist_for_each_entry(child, tmp, &clk->children, child_node)
1036 		clk_change_rate(child);
1037 }
1038 
1039 /**
1040  * clk_set_rate - specify a new rate for clk
1041  * @clk: the clk whose rate is being changed
1042  * @rate: the new rate for clk
1043  *
1044  * In the simplest case clk_set_rate will only adjust the rate of clk.
1045  *
1046  * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1047  * propagate up to clk's parent; whether or not this happens depends on the
1048  * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
1049  * after calling .round_rate then upstream parent propagation is ignored.  If
1050  * *parent_rate comes back with a new rate for clk's parent then we propagate
1051  * up to clk's parent and set it's rate.  Upward propagation will continue
1052  * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1053  * .round_rate stops requesting changes to clk's parent_rate.
1054  *
1055  * Rate changes are accomplished via tree traversal that also recalculates the
1056  * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1057  *
1058  * Returns 0 on success, -EERROR otherwise.
1059  */
1060 int clk_set_rate(struct clk *clk, unsigned long rate)
1061 {
1062 	struct clk *top, *fail_clk;
1063 	int ret = 0;
1064 
1065 	/* prevent racing with updates to the clock topology */
1066 	mutex_lock(&prepare_lock);
1067 
1068 	/* bail early if nothing to do */
1069 	if (rate == clk->rate)
1070 		goto out;
1071 
1072 	if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
1073 		ret = -EBUSY;
1074 		goto out;
1075 	}
1076 
1077 	/* calculate new rates and get the topmost changed clock */
1078 	top = clk_calc_new_rates(clk, rate);
1079 	if (!top) {
1080 		ret = -EINVAL;
1081 		goto out;
1082 	}
1083 
1084 	/* notify that we are about to change rates */
1085 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1086 	if (fail_clk) {
1087 		pr_warn("%s: failed to set %s rate\n", __func__,
1088 				fail_clk->name);
1089 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1090 		ret = -EBUSY;
1091 		goto out;
1092 	}
1093 
1094 	/* change the rates */
1095 	clk_change_rate(top);
1096 
1097 out:
1098 	mutex_unlock(&prepare_lock);
1099 
1100 	return ret;
1101 }
1102 EXPORT_SYMBOL_GPL(clk_set_rate);
1103 
1104 /**
1105  * clk_get_parent - return the parent of a clk
1106  * @clk: the clk whose parent gets returned
1107  *
1108  * Simply returns clk->parent.  Returns NULL if clk is NULL.
1109  */
1110 struct clk *clk_get_parent(struct clk *clk)
1111 {
1112 	struct clk *parent;
1113 
1114 	mutex_lock(&prepare_lock);
1115 	parent = __clk_get_parent(clk);
1116 	mutex_unlock(&prepare_lock);
1117 
1118 	return parent;
1119 }
1120 EXPORT_SYMBOL_GPL(clk_get_parent);
1121 
1122 /*
1123  * .get_parent is mandatory for clocks with multiple possible parents.  It is
1124  * optional for single-parent clocks.  Always call .get_parent if it is
1125  * available and WARN if it is missing for multi-parent clocks.
1126  *
1127  * For single-parent clocks without .get_parent, first check to see if the
1128  * .parents array exists, and if so use it to avoid an expensive tree
1129  * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
1130  */
1131 static struct clk *__clk_init_parent(struct clk *clk)
1132 {
1133 	struct clk *ret = NULL;
1134 	u8 index;
1135 
1136 	/* handle the trivial cases */
1137 
1138 	if (!clk->num_parents)
1139 		goto out;
1140 
1141 	if (clk->num_parents == 1) {
1142 		if (IS_ERR_OR_NULL(clk->parent))
1143 			ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1144 		ret = clk->parent;
1145 		goto out;
1146 	}
1147 
1148 	if (!clk->ops->get_parent) {
1149 		WARN(!clk->ops->get_parent,
1150 			"%s: multi-parent clocks must implement .get_parent\n",
1151 			__func__);
1152 		goto out;
1153 	};
1154 
1155 	/*
1156 	 * Do our best to cache parent clocks in clk->parents.  This prevents
1157 	 * unnecessary and expensive calls to __clk_lookup.  We don't set
1158 	 * clk->parent here; that is done by the calling function
1159 	 */
1160 
1161 	index = clk->ops->get_parent(clk->hw);
1162 
1163 	if (!clk->parents)
1164 		clk->parents =
1165 			kzalloc((sizeof(struct clk*) * clk->num_parents),
1166 					GFP_KERNEL);
1167 
1168 	if (!clk->parents)
1169 		ret = __clk_lookup(clk->parent_names[index]);
1170 	else if (!clk->parents[index])
1171 		ret = clk->parents[index] =
1172 			__clk_lookup(clk->parent_names[index]);
1173 	else
1174 		ret = clk->parents[index];
1175 
1176 out:
1177 	return ret;
1178 }
1179 
1180 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1181 {
1182 #ifdef CONFIG_COMMON_CLK_DEBUG
1183 	struct dentry *d;
1184 	struct dentry *new_parent_d;
1185 #endif
1186 
1187 	if (!clk || !new_parent)
1188 		return;
1189 
1190 	hlist_del(&clk->child_node);
1191 
1192 	if (new_parent)
1193 		hlist_add_head(&clk->child_node, &new_parent->children);
1194 	else
1195 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1196 
1197 #ifdef CONFIG_COMMON_CLK_DEBUG
1198 	if (!inited)
1199 		goto out;
1200 
1201 	if (new_parent)
1202 		new_parent_d = new_parent->dentry;
1203 	else
1204 		new_parent_d = orphandir;
1205 
1206 	d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1207 			new_parent_d, clk->name);
1208 	if (d)
1209 		clk->dentry = d;
1210 	else
1211 		pr_debug("%s: failed to rename debugfs entry for %s\n",
1212 				__func__, clk->name);
1213 out:
1214 #endif
1215 
1216 	clk->parent = new_parent;
1217 
1218 	__clk_recalc_rates(clk, POST_RATE_CHANGE);
1219 }
1220 
1221 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1222 {
1223 	struct clk *old_parent;
1224 	unsigned long flags;
1225 	int ret = -EINVAL;
1226 	u8 i;
1227 
1228 	old_parent = clk->parent;
1229 
1230 	if (!clk->parents)
1231 		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1232 								GFP_KERNEL);
1233 
1234 	/*
1235 	 * find index of new parent clock using cached parent ptrs,
1236 	 * or if not yet cached, use string name comparison and cache
1237 	 * them now to avoid future calls to __clk_lookup.
1238 	 */
1239 	for (i = 0; i < clk->num_parents; i++) {
1240 		if (clk->parents && clk->parents[i] == parent)
1241 			break;
1242 		else if (!strcmp(clk->parent_names[i], parent->name)) {
1243 			if (clk->parents)
1244 				clk->parents[i] = __clk_lookup(parent->name);
1245 			break;
1246 		}
1247 	}
1248 
1249 	if (i == clk->num_parents) {
1250 		pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1251 				__func__, parent->name, clk->name);
1252 		goto out;
1253 	}
1254 
1255 	/* migrate prepare and enable */
1256 	if (clk->prepare_count)
1257 		__clk_prepare(parent);
1258 
1259 	/* FIXME replace with clk_is_enabled(clk) someday */
1260 	spin_lock_irqsave(&enable_lock, flags);
1261 	if (clk->enable_count)
1262 		__clk_enable(parent);
1263 	spin_unlock_irqrestore(&enable_lock, flags);
1264 
1265 	/* change clock input source */
1266 	ret = clk->ops->set_parent(clk->hw, i);
1267 
1268 	/* clean up old prepare and enable */
1269 	spin_lock_irqsave(&enable_lock, flags);
1270 	if (clk->enable_count)
1271 		__clk_disable(old_parent);
1272 	spin_unlock_irqrestore(&enable_lock, flags);
1273 
1274 	if (clk->prepare_count)
1275 		__clk_unprepare(old_parent);
1276 
1277 out:
1278 	return ret;
1279 }
1280 
1281 /**
1282  * clk_set_parent - switch the parent of a mux clk
1283  * @clk: the mux clk whose input we are switching
1284  * @parent: the new input to clk
1285  *
1286  * Re-parent clk to use parent as it's new input source.  If clk has the
1287  * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1288  * operation to succeed.  After successfully changing clk's parent
1289  * clk_set_parent will update the clk topology, sysfs topology and
1290  * propagate rate recalculation via __clk_recalc_rates.  Returns 0 on
1291  * success, -EERROR otherwise.
1292  */
1293 int clk_set_parent(struct clk *clk, struct clk *parent)
1294 {
1295 	int ret = 0;
1296 
1297 	if (!clk || !clk->ops)
1298 		return -EINVAL;
1299 
1300 	if (!clk->ops->set_parent)
1301 		return -ENOSYS;
1302 
1303 	/* prevent racing with updates to the clock topology */
1304 	mutex_lock(&prepare_lock);
1305 
1306 	if (clk->parent == parent)
1307 		goto out;
1308 
1309 	/* propagate PRE_RATE_CHANGE notifications */
1310 	if (clk->notifier_count)
1311 		ret = __clk_speculate_rates(clk, parent->rate);
1312 
1313 	/* abort if a driver objects */
1314 	if (ret == NOTIFY_STOP)
1315 		goto out;
1316 
1317 	/* only re-parent if the clock is not in use */
1318 	if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1319 		ret = -EBUSY;
1320 	else
1321 		ret = __clk_set_parent(clk, parent);
1322 
1323 	/* propagate ABORT_RATE_CHANGE if .set_parent failed */
1324 	if (ret) {
1325 		__clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1326 		goto out;
1327 	}
1328 
1329 	/* propagate rate recalculation downstream */
1330 	__clk_reparent(clk, parent);
1331 
1332 out:
1333 	mutex_unlock(&prepare_lock);
1334 
1335 	return ret;
1336 }
1337 EXPORT_SYMBOL_GPL(clk_set_parent);
1338 
1339 /**
1340  * __clk_init - initialize the data structures in a struct clk
1341  * @dev:	device initializing this clk, placeholder for now
1342  * @clk:	clk being initialized
1343  *
1344  * Initializes the lists in struct clk, queries the hardware for the
1345  * parent and rate and sets them both.
1346  */
1347 int __clk_init(struct device *dev, struct clk *clk)
1348 {
1349 	int i, ret = 0;
1350 	struct clk *orphan;
1351 	struct hlist_node *tmp, *tmp2;
1352 
1353 	if (!clk)
1354 		return -EINVAL;
1355 
1356 	mutex_lock(&prepare_lock);
1357 
1358 	/* check to see if a clock with this name is already registered */
1359 	if (__clk_lookup(clk->name)) {
1360 		pr_debug("%s: clk %s already initialized\n",
1361 				__func__, clk->name);
1362 		ret = -EEXIST;
1363 		goto out;
1364 	}
1365 
1366 	/* check that clk_ops are sane.  See Documentation/clk.txt */
1367 	if (clk->ops->set_rate &&
1368 			!(clk->ops->round_rate && clk->ops->recalc_rate)) {
1369 		pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1370 				__func__, clk->name);
1371 		ret = -EINVAL;
1372 		goto out;
1373 	}
1374 
1375 	if (clk->ops->set_parent && !clk->ops->get_parent) {
1376 		pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1377 				__func__, clk->name);
1378 		ret = -EINVAL;
1379 		goto out;
1380 	}
1381 
1382 	/* throw a WARN if any entries in parent_names are NULL */
1383 	for (i = 0; i < clk->num_parents; i++)
1384 		WARN(!clk->parent_names[i],
1385 				"%s: invalid NULL in %s's .parent_names\n",
1386 				__func__, clk->name);
1387 
1388 	/*
1389 	 * Allocate an array of struct clk *'s to avoid unnecessary string
1390 	 * look-ups of clk's possible parents.  This can fail for clocks passed
1391 	 * in to clk_init during early boot; thus any access to clk->parents[]
1392 	 * must always check for a NULL pointer and try to populate it if
1393 	 * necessary.
1394 	 *
1395 	 * If clk->parents is not NULL we skip this entire block.  This allows
1396 	 * for clock drivers to statically initialize clk->parents.
1397 	 */
1398 	if (clk->num_parents > 1 && !clk->parents) {
1399 		clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1400 				GFP_KERNEL);
1401 		/*
1402 		 * __clk_lookup returns NULL for parents that have not been
1403 		 * clk_init'd; thus any access to clk->parents[] must check
1404 		 * for a NULL pointer.  We can always perform lazy lookups for
1405 		 * missing parents later on.
1406 		 */
1407 		if (clk->parents)
1408 			for (i = 0; i < clk->num_parents; i++)
1409 				clk->parents[i] =
1410 					__clk_lookup(clk->parent_names[i]);
1411 	}
1412 
1413 	clk->parent = __clk_init_parent(clk);
1414 
1415 	/*
1416 	 * Populate clk->parent if parent has already been __clk_init'd.  If
1417 	 * parent has not yet been __clk_init'd then place clk in the orphan
1418 	 * list.  If clk has set the CLK_IS_ROOT flag then place it in the root
1419 	 * clk list.
1420 	 *
1421 	 * Every time a new clk is clk_init'd then we walk the list of orphan
1422 	 * clocks and re-parent any that are children of the clock currently
1423 	 * being clk_init'd.
1424 	 */
1425 	if (clk->parent)
1426 		hlist_add_head(&clk->child_node,
1427 				&clk->parent->children);
1428 	else if (clk->flags & CLK_IS_ROOT)
1429 		hlist_add_head(&clk->child_node, &clk_root_list);
1430 	else
1431 		hlist_add_head(&clk->child_node, &clk_orphan_list);
1432 
1433 	/*
1434 	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
1435 	 * simple clocks and lazy developers the default fallback is to use the
1436 	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
1437 	 * then rate is set to zero.
1438 	 */
1439 	if (clk->ops->recalc_rate)
1440 		clk->rate = clk->ops->recalc_rate(clk->hw,
1441 				__clk_get_rate(clk->parent));
1442 	else if (clk->parent)
1443 		clk->rate = clk->parent->rate;
1444 	else
1445 		clk->rate = 0;
1446 
1447 	/*
1448 	 * walk the list of orphan clocks and reparent any that are children of
1449 	 * this clock
1450 	 */
1451 	hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
1452 		if (orphan->ops->get_parent) {
1453 			i = orphan->ops->get_parent(orphan->hw);
1454 			if (!strcmp(clk->name, orphan->parent_names[i]))
1455 				__clk_reparent(orphan, clk);
1456 			continue;
1457 		}
1458 
1459 		for (i = 0; i < orphan->num_parents; i++)
1460 			if (!strcmp(clk->name, orphan->parent_names[i])) {
1461 				__clk_reparent(orphan, clk);
1462 				break;
1463 			}
1464 	 }
1465 
1466 	/*
1467 	 * optional platform-specific magic
1468 	 *
1469 	 * The .init callback is not used by any of the basic clock types, but
1470 	 * exists for weird hardware that must perform initialization magic.
1471 	 * Please consider other ways of solving initialization problems before
1472 	 * using this callback, as it's use is discouraged.
1473 	 */
1474 	if (clk->ops->init)
1475 		clk->ops->init(clk->hw);
1476 
1477 	clk_debug_register(clk);
1478 
1479 out:
1480 	mutex_unlock(&prepare_lock);
1481 
1482 	return ret;
1483 }
1484 
1485 /**
1486  * __clk_register - register a clock and return a cookie.
1487  *
1488  * Same as clk_register, except that the .clk field inside hw shall point to a
1489  * preallocated (generally statically allocated) struct clk. None of the fields
1490  * of the struct clk need to be initialized.
1491  *
1492  * The data pointed to by .init and .clk field shall NOT be marked as init
1493  * data.
1494  *
1495  * __clk_register is only exposed via clk-private.h and is intended for use with
1496  * very large numbers of clocks that need to be statically initialized.  It is
1497  * a layering violation to include clk-private.h from any code which implements
1498  * a clock's .ops; as such any statically initialized clock data MUST be in a
1499  * separate C file from the logic that implements it's operations.  Returns 0
1500  * on success, otherwise an error code.
1501  */
1502 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1503 {
1504 	int ret;
1505 	struct clk *clk;
1506 
1507 	clk = hw->clk;
1508 	clk->name = hw->init->name;
1509 	clk->ops = hw->init->ops;
1510 	clk->hw = hw;
1511 	clk->flags = hw->init->flags;
1512 	clk->parent_names = hw->init->parent_names;
1513 	clk->num_parents = hw->init->num_parents;
1514 
1515 	ret = __clk_init(dev, clk);
1516 	if (ret)
1517 		return ERR_PTR(ret);
1518 
1519 	return clk;
1520 }
1521 EXPORT_SYMBOL_GPL(__clk_register);
1522 
1523 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1524 {
1525 	int i, ret;
1526 
1527 	clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1528 	if (!clk->name) {
1529 		pr_err("%s: could not allocate clk->name\n", __func__);
1530 		ret = -ENOMEM;
1531 		goto fail_name;
1532 	}
1533 	clk->ops = hw->init->ops;
1534 	clk->hw = hw;
1535 	clk->flags = hw->init->flags;
1536 	clk->num_parents = hw->init->num_parents;
1537 	hw->clk = clk;
1538 
1539 	/* allocate local copy in case parent_names is __initdata */
1540 	clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1541 			GFP_KERNEL);
1542 
1543 	if (!clk->parent_names) {
1544 		pr_err("%s: could not allocate clk->parent_names\n", __func__);
1545 		ret = -ENOMEM;
1546 		goto fail_parent_names;
1547 	}
1548 
1549 
1550 	/* copy each string name in case parent_names is __initdata */
1551 	for (i = 0; i < clk->num_parents; i++) {
1552 		clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1553 						GFP_KERNEL);
1554 		if (!clk->parent_names[i]) {
1555 			pr_err("%s: could not copy parent_names\n", __func__);
1556 			ret = -ENOMEM;
1557 			goto fail_parent_names_copy;
1558 		}
1559 	}
1560 
1561 	ret = __clk_init(dev, clk);
1562 	if (!ret)
1563 		return 0;
1564 
1565 fail_parent_names_copy:
1566 	while (--i >= 0)
1567 		kfree(clk->parent_names[i]);
1568 	kfree(clk->parent_names);
1569 fail_parent_names:
1570 	kfree(clk->name);
1571 fail_name:
1572 	return ret;
1573 }
1574 
1575 /**
1576  * clk_register - allocate a new clock, register it and return an opaque cookie
1577  * @dev: device that is registering this clock
1578  * @hw: link to hardware-specific clock data
1579  *
1580  * clk_register is the primary interface for populating the clock tree with new
1581  * clock nodes.  It returns a pointer to the newly allocated struct clk which
1582  * cannot be dereferenced by driver code but may be used in conjuction with the
1583  * rest of the clock API.  In the event of an error clk_register will return an
1584  * error code; drivers must test for an error code after calling clk_register.
1585  */
1586 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1587 {
1588 	int ret;
1589 	struct clk *clk;
1590 
1591 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1592 	if (!clk) {
1593 		pr_err("%s: could not allocate clk\n", __func__);
1594 		ret = -ENOMEM;
1595 		goto fail_out;
1596 	}
1597 
1598 	ret = _clk_register(dev, hw, clk);
1599 	if (!ret)
1600 		return clk;
1601 
1602 	kfree(clk);
1603 fail_out:
1604 	return ERR_PTR(ret);
1605 }
1606 EXPORT_SYMBOL_GPL(clk_register);
1607 
1608 /**
1609  * clk_unregister - unregister a currently registered clock
1610  * @clk: clock to unregister
1611  *
1612  * Currently unimplemented.
1613  */
1614 void clk_unregister(struct clk *clk) {}
1615 EXPORT_SYMBOL_GPL(clk_unregister);
1616 
1617 static void devm_clk_release(struct device *dev, void *res)
1618 {
1619 	clk_unregister(res);
1620 }
1621 
1622 /**
1623  * devm_clk_register - resource managed clk_register()
1624  * @dev: device that is registering this clock
1625  * @hw: link to hardware-specific clock data
1626  *
1627  * Managed clk_register(). Clocks returned from this function are
1628  * automatically clk_unregister()ed on driver detach. See clk_register() for
1629  * more information.
1630  */
1631 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1632 {
1633 	struct clk *clk;
1634 	int ret;
1635 
1636 	clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1637 	if (!clk)
1638 		return ERR_PTR(-ENOMEM);
1639 
1640 	ret = _clk_register(dev, hw, clk);
1641 	if (!ret) {
1642 		devres_add(dev, clk);
1643 	} else {
1644 		devres_free(clk);
1645 		clk = ERR_PTR(ret);
1646 	}
1647 
1648 	return clk;
1649 }
1650 EXPORT_SYMBOL_GPL(devm_clk_register);
1651 
1652 static int devm_clk_match(struct device *dev, void *res, void *data)
1653 {
1654 	struct clk *c = res;
1655 	if (WARN_ON(!c))
1656 		return 0;
1657 	return c == data;
1658 }
1659 
1660 /**
1661  * devm_clk_unregister - resource managed clk_unregister()
1662  * @clk: clock to unregister
1663  *
1664  * Deallocate a clock allocated with devm_clk_register(). Normally
1665  * this function will not need to be called and the resource management
1666  * code will ensure that the resource is freed.
1667  */
1668 void devm_clk_unregister(struct device *dev, struct clk *clk)
1669 {
1670 	WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1671 }
1672 EXPORT_SYMBOL_GPL(devm_clk_unregister);
1673 
1674 /***        clk rate change notifiers        ***/
1675 
1676 /**
1677  * clk_notifier_register - add a clk rate change notifier
1678  * @clk: struct clk * to watch
1679  * @nb: struct notifier_block * with callback info
1680  *
1681  * Request notification when clk's rate changes.  This uses an SRCU
1682  * notifier because we want it to block and notifier unregistrations are
1683  * uncommon.  The callbacks associated with the notifier must not
1684  * re-enter into the clk framework by calling any top-level clk APIs;
1685  * this will cause a nested prepare_lock mutex.
1686  *
1687  * Pre-change notifier callbacks will be passed the current, pre-change
1688  * rate of the clk via struct clk_notifier_data.old_rate.  The new,
1689  * post-change rate of the clk is passed via struct
1690  * clk_notifier_data.new_rate.
1691  *
1692  * Post-change notifiers will pass the now-current, post-change rate of
1693  * the clk in both struct clk_notifier_data.old_rate and struct
1694  * clk_notifier_data.new_rate.
1695  *
1696  * Abort-change notifiers are effectively the opposite of pre-change
1697  * notifiers: the original pre-change clk rate is passed in via struct
1698  * clk_notifier_data.new_rate and the failed post-change rate is passed
1699  * in via struct clk_notifier_data.old_rate.
1700  *
1701  * clk_notifier_register() must be called from non-atomic context.
1702  * Returns -EINVAL if called with null arguments, -ENOMEM upon
1703  * allocation failure; otherwise, passes along the return value of
1704  * srcu_notifier_chain_register().
1705  */
1706 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1707 {
1708 	struct clk_notifier *cn;
1709 	int ret = -ENOMEM;
1710 
1711 	if (!clk || !nb)
1712 		return -EINVAL;
1713 
1714 	mutex_lock(&prepare_lock);
1715 
1716 	/* search the list of notifiers for this clk */
1717 	list_for_each_entry(cn, &clk_notifier_list, node)
1718 		if (cn->clk == clk)
1719 			break;
1720 
1721 	/* if clk wasn't in the notifier list, allocate new clk_notifier */
1722 	if (cn->clk != clk) {
1723 		cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1724 		if (!cn)
1725 			goto out;
1726 
1727 		cn->clk = clk;
1728 		srcu_init_notifier_head(&cn->notifier_head);
1729 
1730 		list_add(&cn->node, &clk_notifier_list);
1731 	}
1732 
1733 	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1734 
1735 	clk->notifier_count++;
1736 
1737 out:
1738 	mutex_unlock(&prepare_lock);
1739 
1740 	return ret;
1741 }
1742 EXPORT_SYMBOL_GPL(clk_notifier_register);
1743 
1744 /**
1745  * clk_notifier_unregister - remove a clk rate change notifier
1746  * @clk: struct clk *
1747  * @nb: struct notifier_block * with callback info
1748  *
1749  * Request no further notification for changes to 'clk' and frees memory
1750  * allocated in clk_notifier_register.
1751  *
1752  * Returns -EINVAL if called with null arguments; otherwise, passes
1753  * along the return value of srcu_notifier_chain_unregister().
1754  */
1755 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1756 {
1757 	struct clk_notifier *cn = NULL;
1758 	int ret = -EINVAL;
1759 
1760 	if (!clk || !nb)
1761 		return -EINVAL;
1762 
1763 	mutex_lock(&prepare_lock);
1764 
1765 	list_for_each_entry(cn, &clk_notifier_list, node)
1766 		if (cn->clk == clk)
1767 			break;
1768 
1769 	if (cn->clk == clk) {
1770 		ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1771 
1772 		clk->notifier_count--;
1773 
1774 		/* XXX the notifier code should handle this better */
1775 		if (!cn->notifier_head.head) {
1776 			srcu_cleanup_notifier_head(&cn->notifier_head);
1777 			kfree(cn);
1778 		}
1779 
1780 	} else {
1781 		ret = -ENOENT;
1782 	}
1783 
1784 	mutex_unlock(&prepare_lock);
1785 
1786 	return ret;
1787 }
1788 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1789 
1790 #ifdef CONFIG_OF
1791 /**
1792  * struct of_clk_provider - Clock provider registration structure
1793  * @link: Entry in global list of clock providers
1794  * @node: Pointer to device tree node of clock provider
1795  * @get: Get clock callback.  Returns NULL or a struct clk for the
1796  *       given clock specifier
1797  * @data: context pointer to be passed into @get callback
1798  */
1799 struct of_clk_provider {
1800 	struct list_head link;
1801 
1802 	struct device_node *node;
1803 	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1804 	void *data;
1805 };
1806 
1807 extern struct of_device_id __clk_of_table[];
1808 
1809 static const struct of_device_id __clk_of_table_sentinel
1810 	__used __section(__clk_of_table_end);
1811 
1812 static LIST_HEAD(of_clk_providers);
1813 static DEFINE_MUTEX(of_clk_lock);
1814 
1815 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1816 				     void *data)
1817 {
1818 	return data;
1819 }
1820 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1821 
1822 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1823 {
1824 	struct clk_onecell_data *clk_data = data;
1825 	unsigned int idx = clkspec->args[0];
1826 
1827 	if (idx >= clk_data->clk_num) {
1828 		pr_err("%s: invalid clock index %d\n", __func__, idx);
1829 		return ERR_PTR(-EINVAL);
1830 	}
1831 
1832 	return clk_data->clks[idx];
1833 }
1834 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1835 
1836 /**
1837  * of_clk_add_provider() - Register a clock provider for a node
1838  * @np: Device node pointer associated with clock provider
1839  * @clk_src_get: callback for decoding clock
1840  * @data: context pointer for @clk_src_get callback.
1841  */
1842 int of_clk_add_provider(struct device_node *np,
1843 			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1844 						   void *data),
1845 			void *data)
1846 {
1847 	struct of_clk_provider *cp;
1848 
1849 	cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1850 	if (!cp)
1851 		return -ENOMEM;
1852 
1853 	cp->node = of_node_get(np);
1854 	cp->data = data;
1855 	cp->get = clk_src_get;
1856 
1857 	mutex_lock(&of_clk_lock);
1858 	list_add(&cp->link, &of_clk_providers);
1859 	mutex_unlock(&of_clk_lock);
1860 	pr_debug("Added clock from %s\n", np->full_name);
1861 
1862 	return 0;
1863 }
1864 EXPORT_SYMBOL_GPL(of_clk_add_provider);
1865 
1866 /**
1867  * of_clk_del_provider() - Remove a previously registered clock provider
1868  * @np: Device node pointer associated with clock provider
1869  */
1870 void of_clk_del_provider(struct device_node *np)
1871 {
1872 	struct of_clk_provider *cp;
1873 
1874 	mutex_lock(&of_clk_lock);
1875 	list_for_each_entry(cp, &of_clk_providers, link) {
1876 		if (cp->node == np) {
1877 			list_del(&cp->link);
1878 			of_node_put(cp->node);
1879 			kfree(cp);
1880 			break;
1881 		}
1882 	}
1883 	mutex_unlock(&of_clk_lock);
1884 }
1885 EXPORT_SYMBOL_GPL(of_clk_del_provider);
1886 
1887 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1888 {
1889 	struct of_clk_provider *provider;
1890 	struct clk *clk = ERR_PTR(-ENOENT);
1891 
1892 	/* Check if we have such a provider in our array */
1893 	mutex_lock(&of_clk_lock);
1894 	list_for_each_entry(provider, &of_clk_providers, link) {
1895 		if (provider->node == clkspec->np)
1896 			clk = provider->get(clkspec, provider->data);
1897 		if (!IS_ERR(clk))
1898 			break;
1899 	}
1900 	mutex_unlock(&of_clk_lock);
1901 
1902 	return clk;
1903 }
1904 
1905 const char *of_clk_get_parent_name(struct device_node *np, int index)
1906 {
1907 	struct of_phandle_args clkspec;
1908 	const char *clk_name;
1909 	int rc;
1910 
1911 	if (index < 0)
1912 		return NULL;
1913 
1914 	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1915 					&clkspec);
1916 	if (rc)
1917 		return NULL;
1918 
1919 	if (of_property_read_string_index(clkspec.np, "clock-output-names",
1920 					  clkspec.args_count ? clkspec.args[0] : 0,
1921 					  &clk_name) < 0)
1922 		clk_name = clkspec.np->name;
1923 
1924 	of_node_put(clkspec.np);
1925 	return clk_name;
1926 }
1927 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1928 
1929 /**
1930  * of_clk_init() - Scan and init clock providers from the DT
1931  * @matches: array of compatible values and init functions for providers.
1932  *
1933  * This function scans the device tree for matching clock providers and
1934  * calls their initialization functions
1935  */
1936 void __init of_clk_init(const struct of_device_id *matches)
1937 {
1938 	struct device_node *np;
1939 
1940 	if (!matches)
1941 		matches = __clk_of_table;
1942 
1943 	for_each_matching_node(np, matches) {
1944 		const struct of_device_id *match = of_match_node(matches, np);
1945 		of_clk_init_cb_t clk_init_cb = match->data;
1946 		clk_init_cb(np);
1947 	}
1948 }
1949 #endif
1950