xref: /openbmc/linux/arch/arm/mach-omap2/clock.c (revision 8577413c)
1 /*
2  *  linux/arch/arm/mach-omap2/clock.c
3  *
4  *  Copyright (C) 2005-2008 Texas Instruments, Inc.
5  *  Copyright (C) 2004-2010 Nokia Corporation
6  *
7  *  Contacts:
8  *  Richard Woodruff <r-woodruff2@ti.com>
9  *  Paul Walmsley
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #undef DEBUG
16 
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/list.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #ifdef CONFIG_COMMON_CLK
24 #include <linux/clk-provider.h>
25 #else
26 #include <linux/clk.h>
27 #endif
28 #include <linux/io.h>
29 #include <linux/bitops.h>
30 
31 #include <asm/cpu.h>
32 
33 
34 #include <trace/events/power.h>
35 
36 #include "soc.h"
37 #include "clockdomain.h"
38 #include "clock.h"
39 #include "cm.h"
40 #include "cm2xxx.h"
41 #include "cm3xxx.h"
42 #include "cm-regbits-24xx.h"
43 #include "cm-regbits-34xx.h"
44 #include "common.h"
45 
46 /*
47  * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
48  * for a module to indicate that it is no longer in idle
49  */
50 #define MAX_MODULE_ENABLE_WAIT		100000
51 
52 u16 cpu_mask;
53 
54 /*
55  * clkdm_control: if true, then when a clock is enabled in the
56  * hardware, its clockdomain will first be enabled; and when a clock
57  * is disabled in the hardware, its clockdomain will be disabled
58  * afterwards.
59  */
60 static bool clkdm_control = true;
61 
62 static LIST_HEAD(clocks);
63 static DEFINE_MUTEX(clocks_mutex);
64 #ifndef CONFIG_COMMON_CLK
65 static DEFINE_SPINLOCK(clockfw_lock);
66 #endif
67 
68 #ifdef CONFIG_COMMON_CLK
69 static LIST_HEAD(clk_hw_omap_clocks);
70 
71 /*
72  * Used for clocks that have the same value as the parent clock,
73  * divided by some factor
74  */
75 unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
76 		unsigned long parent_rate)
77 {
78 	struct clk_hw_omap *oclk;
79 
80 	if (!hw) {
81 		pr_warn("%s: hw is NULL\n", __func__);
82 		return -EINVAL;
83 	}
84 
85 	oclk = to_clk_hw_omap(hw);
86 
87 	WARN_ON(!oclk->fixed_div);
88 
89 	return parent_rate / oclk->fixed_div;
90 }
91 #endif
92 
93 /*
94  * OMAP2+ specific clock functions
95  */
96 
97 /* Private functions */
98 
99 
100 /**
101  * _wait_idlest_generic - wait for a module to leave the idle state
102  * @reg: virtual address of module IDLEST register
103  * @mask: value to mask against to determine if the module is active
104  * @idlest: idle state indicator (0 or 1) for the clock
105  * @name: name of the clock (for printk)
106  *
107  * Wait for a module to leave idle, where its idle-status register is
108  * not inside the CM module.  Returns 1 if the module left idle
109  * promptly, or 0 if the module did not leave idle before the timeout
110  * elapsed.  XXX Deprecated - should be moved into drivers for the
111  * individual IP block that the IDLEST register exists in.
112  */
113 static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
114 				const char *name)
115 {
116 	int i = 0, ena = 0;
117 
118 	ena = (idlest) ? 0 : mask;
119 
120 	omap_test_timeout(((__raw_readl(reg) & mask) == ena),
121 			  MAX_MODULE_ENABLE_WAIT, i);
122 
123 	if (i < MAX_MODULE_ENABLE_WAIT)
124 		pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
125 			 name, i);
126 	else
127 		pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
128 		       name, MAX_MODULE_ENABLE_WAIT);
129 
130 	return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
131 };
132 
133 /**
134  * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
135  * @clk: struct clk * belonging to the module
136  *
137  * If the necessary clocks for the OMAP hardware IP block that
138  * corresponds to clock @clk are enabled, then wait for the module to
139  * indicate readiness (i.e., to leave IDLE).  This code does not
140  * belong in the clock code and will be moved in the medium term to
141  * module-dependent code.  No return value.
142  */
143 #ifdef CONFIG_COMMON_CLK
144 static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
145 #else
146 static void _omap2_module_wait_ready(struct clk *clk)
147 #endif
148 {
149 	void __iomem *companion_reg, *idlest_reg;
150 	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
151 	s16 prcm_mod;
152 	int r;
153 
154 	/* Not all modules have multiple clocks that their IDLEST depends on */
155 	if (clk->ops->find_companion) {
156 		clk->ops->find_companion(clk, &companion_reg, &other_bit);
157 		if (!(__raw_readl(companion_reg) & (1 << other_bit)))
158 			return;
159 	}
160 
161 	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
162 	r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
163 	if (r) {
164 		/* IDLEST register not in the CM module */
165 		_wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
166 #ifdef CONFIG_COMMON_CLK
167 				     __clk_get_name(clk->hw.clk));
168 #else
169 				     clk->name);
170 #endif
171 	} else {
172 		cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
173 	};
174 }
175 
176 /* Public functions */
177 
178 /**
179  * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
180  * @clk: OMAP clock struct ptr to use
181  *
182  * Convert a clockdomain name stored in a struct clk 'clk' into a
183  * clockdomain pointer, and save it into the struct clk.  Intended to be
184  * called during clk_register().  No return value.
185  */
186 #ifdef CONFIG_COMMON_CLK
187 void omap2_init_clk_clkdm(struct clk_hw *hw)
188 {
189 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
190 #else
191 void omap2_init_clk_clkdm(struct clk *clk)
192 {
193 #endif
194 	struct clockdomain *clkdm;
195 	const char *clk_name;
196 
197 	if (!clk->clkdm_name)
198 		return;
199 
200 #ifdef CONFIG_COMMON_CLK
201 	clk_name = __clk_get_name(hw->clk);
202 #else
203 	clk_name = __clk_get_name(clk);
204 #endif
205 
206 	clkdm = clkdm_lookup(clk->clkdm_name);
207 	if (clkdm) {
208 		pr_debug("clock: associated clk %s to clkdm %s\n",
209 			 clk_name, clk->clkdm_name);
210 		clk->clkdm = clkdm;
211 	} else {
212 		pr_debug("clock: could not associate clk %s to clkdm %s\n",
213 			 clk_name, clk->clkdm_name);
214 	}
215 }
216 
217 /**
218  * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
219  *
220  * Prevent the OMAP clock code from calling into the clockdomain code
221  * when a hardware clock in that clockdomain is enabled or disabled.
222  * Intended to be called at init time from omap*_clk_init().  No
223  * return value.
224  */
225 void __init omap2_clk_disable_clkdm_control(void)
226 {
227 	clkdm_control = false;
228 }
229 
230 /**
231  * omap2_clk_dflt_find_companion - find companion clock to @clk
232  * @clk: struct clk * to find the companion clock of
233  * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
234  * @other_bit: u8 ** to return the companion clock bit shift in
235  *
236  * Note: We don't need special code here for INVERT_ENABLE for the
237  * time being since INVERT_ENABLE only applies to clocks enabled by
238  * CM_CLKEN_PLL
239  *
240  * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes it's
241  * just a matter of XORing the bits.
242  *
243  * Some clocks don't have companion clocks.  For example, modules with
244  * only an interface clock (such as MAILBOXES) don't have a companion
245  * clock.  Right now, this code relies on the hardware exporting a bit
246  * in the correct companion register that indicates that the
247  * nonexistent 'companion clock' is active.  Future patches will
248  * associate this type of code with per-module data structures to
249  * avoid this issue, and remove the casts.  No return value.
250  */
251 #ifdef CONFIG_COMMON_CLK
252 void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
253 #else
254 void omap2_clk_dflt_find_companion(struct clk *clk,
255 #endif
256 			void __iomem **other_reg, u8 *other_bit)
257 {
258 	u32 r;
259 
260 	/*
261 	 * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes
262 	 * it's just a matter of XORing the bits.
263 	 */
264 	r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
265 
266 	*other_reg = (__force void __iomem *)r;
267 	*other_bit = clk->enable_bit;
268 }
269 
270 /**
271  * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
272  * @clk: struct clk * to find IDLEST info for
273  * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
274  * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
275  * @idlest_val: u8 * to return the idle status indicator
276  *
277  * Return the CM_IDLEST register address and bit shift corresponding
278  * to the module that "owns" this clock.  This default code assumes
279  * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
280  * the IDLEST register address ID corresponds to the CM_*CLKEN
281  * register address ID (e.g., that CM_FCLKEN2 corresponds to
282  * CM_IDLEST2).  This is not true for all modules.  No return value.
283  */
284 #ifdef CONFIG_COMMON_CLK
285 void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
286 #else
287 void omap2_clk_dflt_find_idlest(struct clk *clk,
288 #endif
289 		void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val)
290 {
291 	u32 r;
292 
293 	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
294 	*idlest_reg = (__force void __iomem *)r;
295 	*idlest_bit = clk->enable_bit;
296 
297 	/*
298 	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
299 	 * 34xx reverses this, just to keep us on our toes
300 	 * AM35xx uses both, depending on the module.
301 	 */
302 	if (cpu_is_omap24xx())
303 		*idlest_val = OMAP24XX_CM_IDLEST_VAL;
304 	else if (cpu_is_omap34xx())
305 		*idlest_val = OMAP34XX_CM_IDLEST_VAL;
306 	else
307 		BUG();
308 
309 }
310 
311 #ifdef CONFIG_COMMON_CLK
312 /**
313  * omap2_dflt_clk_enable - enable a clock in the hardware
314  * @hw: struct clk_hw * of the clock to enable
315  *
316  * Enable the clock @hw in the hardware.  We first call into the OMAP
317  * clockdomain code to "enable" the corresponding clockdomain if this
318  * is the first enabled user of the clockdomain.  Then program the
319  * hardware to enable the clock.  Then wait for the IP block that uses
320  * this clock to leave idle (if applicable).  Returns the error value
321  * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
322  * if @hw has a null clock enable_reg, or zero upon success.
323  */
324 int omap2_dflt_clk_enable(struct clk_hw *hw)
325 {
326 	struct clk_hw_omap *clk;
327 	u32 v;
328 	int ret = 0;
329 
330 	clk = to_clk_hw_omap(hw);
331 
332 	if (clkdm_control && clk->clkdm) {
333 		ret = clkdm_clk_enable(clk->clkdm, hw->clk);
334 		if (ret) {
335 			WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
336 			     __func__, __clk_get_name(hw->clk),
337 			     clk->clkdm->name, ret);
338 			return ret;
339 		}
340 	}
341 
342 	if (unlikely(clk->enable_reg == NULL)) {
343 		pr_err("%s: %s missing enable_reg\n", __func__,
344 		       __clk_get_name(hw->clk));
345 		ret = -EINVAL;
346 		goto err;
347 	}
348 
349 	/* FIXME should not have INVERT_ENABLE bit here */
350 	v = __raw_readl(clk->enable_reg);
351 	if (clk->flags & INVERT_ENABLE)
352 		v &= ~(1 << clk->enable_bit);
353 	else
354 		v |= (1 << clk->enable_bit);
355 	__raw_writel(v, clk->enable_reg);
356 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
357 
358 	if (clk->ops && clk->ops->find_idlest)
359 		_omap2_module_wait_ready(clk);
360 
361 	return 0;
362 
363 err:
364 	if (clkdm_control && clk->clkdm)
365 		clkdm_clk_disable(clk->clkdm, hw->clk);
366 	return ret;
367 }
368 
369 /**
370  * omap2_dflt_clk_disable - disable a clock in the hardware
371  * @hw: struct clk_hw * of the clock to disable
372  *
373  * Disable the clock @hw in the hardware, and call into the OMAP
374  * clockdomain code to "disable" the corresponding clockdomain if all
375  * clocks/hwmods in that clockdomain are now disabled.  No return
376  * value.
377  */
378 void omap2_dflt_clk_disable(struct clk_hw *hw)
379 {
380 	struct clk_hw_omap *clk;
381 	u32 v;
382 
383 	clk = to_clk_hw_omap(hw);
384 	if (!clk->enable_reg) {
385 		/*
386 		 * 'independent' here refers to a clock which is not
387 		 * controlled by its parent.
388 		 */
389 		pr_err("%s: independent clock %s has no enable_reg\n",
390 		       __func__, __clk_get_name(hw->clk));
391 		return;
392 	}
393 
394 	v = __raw_readl(clk->enable_reg);
395 	if (clk->flags & INVERT_ENABLE)
396 		v |= (1 << clk->enable_bit);
397 	else
398 		v &= ~(1 << clk->enable_bit);
399 	__raw_writel(v, clk->enable_reg);
400 	/* No OCP barrier needed here since it is a disable operation */
401 
402 	if (clkdm_control && clk->clkdm)
403 		clkdm_clk_disable(clk->clkdm, hw->clk);
404 }
405 
406 /**
407  * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
408  * @hw: struct clk_hw * of the clock being enabled
409  *
410  * Increment the usecount of the clockdomain of the clock pointed to
411  * by @hw; if the usecount is 1, the clockdomain will be "enabled."
412  * Only needed for clocks that don't use omap2_dflt_clk_enable() as
413  * their enable function pointer.  Passes along the return value of
414  * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
415  * clockdomain, or 0 if clock framework-based clockdomain control is
416  * not implemented.
417  */
418 int omap2_clkops_enable_clkdm(struct clk_hw *hw)
419 {
420 	struct clk_hw_omap *clk;
421 	int ret = 0;
422 
423 	clk = to_clk_hw_omap(hw);
424 
425 	if (unlikely(!clk->clkdm)) {
426 		pr_err("%s: %s: no clkdm set ?!\n", __func__,
427 		       __clk_get_name(hw->clk));
428 		return -EINVAL;
429 	}
430 
431 	if (unlikely(clk->enable_reg))
432 		pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
433 		       __clk_get_name(hw->clk));
434 
435 	if (!clkdm_control) {
436 		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
437 		       __func__, __clk_get_name(hw->clk));
438 		return 0;
439 	}
440 
441 	ret = clkdm_clk_enable(clk->clkdm, hw->clk);
442 	WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
443 	     __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);
444 
445 	return ret;
446 }
447 
448 /**
449  * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
450  * @hw: struct clk_hw * of the clock being disabled
451  *
452  * Decrement the usecount of the clockdomain of the clock pointed to
453  * by @hw; if the usecount is 0, the clockdomain will be "disabled."
454  * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
455  * disable function pointer.  No return value.
456  */
457 void omap2_clkops_disable_clkdm(struct clk_hw *hw)
458 {
459 	struct clk_hw_omap *clk;
460 
461 	clk = to_clk_hw_omap(hw);
462 
463 	if (unlikely(!clk->clkdm)) {
464 		pr_err("%s: %s: no clkdm set ?!\n", __func__,
465 		       __clk_get_name(hw->clk));
466 		return;
467 	}
468 
469 	if (unlikely(clk->enable_reg))
470 		pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
471 		       __clk_get_name(hw->clk));
472 
473 	if (!clkdm_control) {
474 		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
475 		       __func__, __clk_get_name(hw->clk));
476 		return;
477 	}
478 
479 	clkdm_clk_disable(clk->clkdm, hw->clk);
480 }
481 
482 /**
483  * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
484  * @hw: struct clk_hw * to check
485  *
486  * Return 1 if the clock represented by @hw is enabled in the
487  * hardware, or 0 otherwise.  Intended for use in the struct
488  * clk_ops.is_enabled function pointer.
489  */
490 int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
491 {
492 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
493 	u32 v;
494 
495 	v = __raw_readl(clk->enable_reg);
496 
497 	if (clk->flags & INVERT_ENABLE)
498 		v ^= BIT(clk->enable_bit);
499 
500 	v &= BIT(clk->enable_bit);
501 
502 	return v ? 1 : 0;
503 }
504 
505 static int __initdata mpurate;
506 
507 /*
508  * By default we use the rate set by the bootloader.
509  * You can override this with mpurate= cmdline option.
510  */
511 static int __init omap_clk_setup(char *str)
512 {
513 	get_option(&str, &mpurate);
514 
515 	if (!mpurate)
516 		return 1;
517 
518 	if (mpurate < 1000)
519 		mpurate *= 1000000;
520 
521 	return 1;
522 }
523 __setup("mpurate=", omap_clk_setup);
524 
525 /**
526  * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
527  * @clk: struct clk * to initialize
528  *
529  * Add an OMAP clock @clk to the internal list of OMAP clocks.  Used
530  * temporarily for autoidle handling, until this support can be
531  * integrated into the common clock framework code in some way.  No
532  * return value.
533  */
534 void omap2_init_clk_hw_omap_clocks(struct clk *clk)
535 {
536 	struct clk_hw_omap *c;
537 
538 	if (__clk_get_flags(clk) & CLK_IS_BASIC)
539 		return;
540 
541 	c = to_clk_hw_omap(__clk_get_hw(clk));
542 	list_add(&c->node, &clk_hw_omap_clocks);
543 }
544 
545 /**
546  * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
547  * support it
548  *
549  * Enable clock autoidle on all OMAP clocks that have allow_idle
550  * function pointers associated with them.  This function is intended
551  * to be temporary until support for this is added to the common clock
552  * code.  Returns 0.
553  */
554 int omap2_clk_enable_autoidle_all(void)
555 {
556 	struct clk_hw_omap *c;
557 
558 	list_for_each_entry(c, &clk_hw_omap_clocks, node)
559 		if (c->ops && c->ops->allow_idle)
560 			c->ops->allow_idle(c);
561 	return 0;
562 }
563 
564 /**
565  * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
566  * support it
567  *
568  * Disable clock autoidle on all OMAP clocks that have allow_idle
569  * function pointers associated with them.  This function is intended
570  * to be temporary until support for this is added to the common clock
571  * code.  Returns 0.
572  */
573 int omap2_clk_disable_autoidle_all(void)
574 {
575 	struct clk_hw_omap *c;
576 
577 	list_for_each_entry(c, &clk_hw_omap_clocks, node)
578 		if (c->ops && c->ops->deny_idle)
579 			c->ops->deny_idle(c);
580 	return 0;
581 }
582 
583 /**
584  * omap2_clk_enable_init_clocks - prepare & enable a list of clocks
585  * @clk_names: ptr to an array of strings of clock names to enable
586  * @num_clocks: number of clock names in @clk_names
587  *
588  * Prepare and enable a list of clocks, named by @clk_names.  No
589  * return value. XXX Deprecated; only needed until these clocks are
590  * properly claimed and enabled by the drivers or core code that uses
591  * them.  XXX What code disables & calls clk_put on these clocks?
592  */
593 void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
594 {
595 	struct clk *init_clk;
596 	int i;
597 
598 	for (i = 0; i < num_clocks; i++) {
599 		init_clk = clk_get(NULL, clk_names[i]);
600 		clk_prepare_enable(init_clk);
601 	}
602 }
603 
604 const struct clk_hw_omap_ops clkhwops_wait = {
605 	.find_idlest	= omap2_clk_dflt_find_idlest,
606 	.find_companion	= omap2_clk_dflt_find_companion,
607 };
608 #else
609 int omap2_dflt_clk_enable(struct clk *clk)
610 {
611 	u32 v;
612 
613 	if (unlikely(clk->enable_reg == NULL)) {
614 		pr_err("clock.c: Enable for %s without enable code\n",
615 		       clk->name);
616 		return 0; /* REVISIT: -EINVAL */
617 	}
618 
619 	v = __raw_readl(clk->enable_reg);
620 	if (clk->flags & INVERT_ENABLE)
621 		v &= ~(1 << clk->enable_bit);
622 	else
623 		v |= (1 << clk->enable_bit);
624 	__raw_writel(v, clk->enable_reg);
625 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
626 
627 	if (clk->ops->find_idlest)
628 		_omap2_module_wait_ready(clk);
629 
630 	return 0;
631 }
632 
633 void omap2_dflt_clk_disable(struct clk *clk)
634 {
635 	u32 v;
636 
637 	if (!clk->enable_reg) {
638 		/*
639 		 * 'Independent' here refers to a clock which is not
640 		 * controlled by its parent.
641 		 */
642 		pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
643 		return;
644 	}
645 
646 	v = __raw_readl(clk->enable_reg);
647 	if (clk->flags & INVERT_ENABLE)
648 		v |= (1 << clk->enable_bit);
649 	else
650 		v &= ~(1 << clk->enable_bit);
651 	__raw_writel(v, clk->enable_reg);
652 	/* No OCP barrier needed here since it is a disable operation */
653 }
654 
655 const struct clkops clkops_omap2_dflt_wait = {
656 	.enable		= omap2_dflt_clk_enable,
657 	.disable	= omap2_dflt_clk_disable,
658 	.find_companion	= omap2_clk_dflt_find_companion,
659 	.find_idlest	= omap2_clk_dflt_find_idlest,
660 };
661 
662 const struct clkops clkops_omap2_dflt = {
663 	.enable		= omap2_dflt_clk_enable,
664 	.disable	= omap2_dflt_clk_disable,
665 };
666 
667 /**
668  * omap2_clk_disable - disable a clock, if the system is not using it
669  * @clk: struct clk * to disable
670  *
671  * Decrements the usecount on struct clk @clk.  If there are no users
672  * left, call the clkops-specific clock disable function to disable it
673  * in hardware.  If the clock is part of a clockdomain (which they all
674  * should be), request that the clockdomain be disabled.  (It too has
675  * a usecount, and so will not be disabled in the hardware until it no
676  * longer has any users.)  If the clock has a parent clock (most of
677  * them do), then call ourselves, recursing on the parent clock.  This
678  * can cause an entire branch of the clock tree to be powered off by
679  * simply disabling one clock.  Intended to be called with the clockfw_lock
680  * spinlock held.  No return value.
681  */
682 void omap2_clk_disable(struct clk *clk)
683 {
684 	if (clk->usecount == 0) {
685 		WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
686 		return;
687 	}
688 
689 	pr_debug("clock: %s: decrementing usecount\n", clk->name);
690 
691 	clk->usecount--;
692 
693 	if (clk->usecount > 0)
694 		return;
695 
696 	pr_debug("clock: %s: disabling in hardware\n", clk->name);
697 
698 	if (clk->ops && clk->ops->disable) {
699 		trace_clock_disable(clk->name, 0, smp_processor_id());
700 		clk->ops->disable(clk);
701 	}
702 
703 	if (clkdm_control && clk->clkdm)
704 		clkdm_clk_disable(clk->clkdm, clk);
705 
706 	if (clk->parent)
707 		omap2_clk_disable(clk->parent);
708 }
709 
710 /**
711  * omap2_clk_enable - request that the system enable a clock
712  * @clk: struct clk * to enable
713  *
714  * Increments the usecount on struct clk @clk.  If there were no users
715  * previously, then recurse up the clock tree, enabling all of the
716  * clock's parents and all of the parent clockdomains, and finally,
717  * enabling @clk's clockdomain, and @clk itself.  Intended to be
718  * called with the clockfw_lock spinlock held.  Returns 0 upon success
719  * or a negative error code upon failure.
720  */
721 int omap2_clk_enable(struct clk *clk)
722 {
723 	int ret;
724 
725 	pr_debug("clock: %s: incrementing usecount\n", clk->name);
726 
727 	clk->usecount++;
728 
729 	if (clk->usecount > 1)
730 		return 0;
731 
732 	pr_debug("clock: %s: enabling in hardware\n", clk->name);
733 
734 	if (clk->parent) {
735 		ret = omap2_clk_enable(clk->parent);
736 		if (ret) {
737 			WARN(1, "clock: %s: could not enable parent %s: %d\n",
738 			     clk->name, clk->parent->name, ret);
739 			goto oce_err1;
740 		}
741 	}
742 
743 	if (clkdm_control && clk->clkdm) {
744 		ret = clkdm_clk_enable(clk->clkdm, clk);
745 		if (ret) {
746 			WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
747 			     clk->name, clk->clkdm->name, ret);
748 			goto oce_err2;
749 		}
750 	}
751 
752 	if (clk->ops && clk->ops->enable) {
753 		trace_clock_enable(clk->name, 1, smp_processor_id());
754 		ret = clk->ops->enable(clk);
755 		if (ret) {
756 			WARN(1, "clock: %s: could not enable: %d\n",
757 			     clk->name, ret);
758 			goto oce_err3;
759 		}
760 	}
761 
762 	return 0;
763 
764 oce_err3:
765 	if (clkdm_control && clk->clkdm)
766 		clkdm_clk_disable(clk->clkdm, clk);
767 oce_err2:
768 	if (clk->parent)
769 		omap2_clk_disable(clk->parent);
770 oce_err1:
771 	clk->usecount--;
772 
773 	return ret;
774 }
775 
776 /* Given a clock and a rate apply a clock specific rounding function */
777 long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
778 {
779 	if (clk->round_rate)
780 		return clk->round_rate(clk, rate);
781 
782 	return clk->rate;
783 }
784 
785 /* Set the clock rate for a clock source */
786 int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
787 {
788 	int ret = -EINVAL;
789 
790 	pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
791 
792 	/* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
793 	if (clk->set_rate) {
794 		trace_clock_set_rate(clk->name, rate, smp_processor_id());
795 		ret = clk->set_rate(clk, rate);
796 	}
797 
798 	return ret;
799 }
800 
801 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
802 {
803 	if (!clk->clksel)
804 		return -EINVAL;
805 
806 	if (clk->parent == new_parent)
807 		return 0;
808 
809 	return omap2_clksel_set_parent(clk, new_parent);
810 }
811 
812 /*
813  * OMAP2+ clock reset and init functions
814  */
815 
816 #ifdef CONFIG_OMAP_RESET_CLOCKS
817 void omap2_clk_disable_unused(struct clk *clk)
818 {
819 	u32 regval32, v;
820 
821 	v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
822 
823 	regval32 = __raw_readl(clk->enable_reg);
824 	if ((regval32 & (1 << clk->enable_bit)) == v)
825 		return;
826 
827 	pr_debug("Disabling unused clock \"%s\"\n", clk->name);
828 	if (cpu_is_omap34xx()) {
829 		omap2_clk_enable(clk);
830 		omap2_clk_disable(clk);
831 	} else {
832 		clk->ops->disable(clk);
833 	}
834 	if (clk->clkdm != NULL)
835 		pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
836 }
837 #endif
838 
839 #endif /* CONFIG_COMMON_CLK */
840 
841 /**
842  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
843  * @mpurate_ck_name: clk name of the clock to change rate
844  *
845  * Change the ARM MPU clock rate to the rate specified on the command
846  * line, if one was specified.  @mpurate_ck_name should be
847  * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
848  * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
849  * handled by the virt_prcm_set clock, but this should be handled by
850  * the OPP layer.  XXX This is intended to be handled by the OPP layer
851  * code in the near future and should be removed from the clock code.
852  * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
853  * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
854  * cannot be found, or 0 upon success.
855  */
856 int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
857 {
858 	struct clk *mpurate_ck;
859 	int r;
860 
861 	if (!mpurate)
862 		return -EINVAL;
863 
864 	mpurate_ck = clk_get(NULL, mpurate_ck_name);
865 	if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
866 		return -ENOENT;
867 
868 	r = clk_set_rate(mpurate_ck, mpurate);
869 	if (IS_ERR_VALUE(r)) {
870 		WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
871 		     mpurate_ck_name, mpurate, r);
872 		clk_put(mpurate_ck);
873 		return -EINVAL;
874 	}
875 
876 	calibrate_delay();
877 #ifndef CONFIG_COMMON_CLK
878 	recalculate_root_clocks();
879 #endif
880 
881 	clk_put(mpurate_ck);
882 
883 	return 0;
884 }
885 
886 /**
887  * omap2_clk_print_new_rates - print summary of current clock tree rates
888  * @hfclkin_ck_name: clk name for the off-chip HF oscillator
889  * @core_ck_name: clk name for the on-chip CORE_CLK
890  * @mpu_ck_name: clk name for the ARM MPU clock
891  *
892  * Prints a short message to the console with the HFCLKIN oscillator
893  * rate, the rate of the CORE clock, and the rate of the ARM MPU clock.
894  * Called by the boot-time MPU rate switching code.   XXX This is intended
895  * to be handled by the OPP layer code in the near future and should be
896  * removed from the clock code.  No return value.
897  */
898 void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
899 				      const char *core_ck_name,
900 				      const char *mpu_ck_name)
901 {
902 	struct clk *hfclkin_ck, *core_ck, *mpu_ck;
903 	unsigned long hfclkin_rate;
904 
905 	mpu_ck = clk_get(NULL, mpu_ck_name);
906 	if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
907 		return;
908 
909 	core_ck = clk_get(NULL, core_ck_name);
910 	if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
911 		return;
912 
913 	hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
914 	if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
915 		return;
916 
917 	hfclkin_rate = clk_get_rate(hfclkin_ck);
918 
919 	pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
920 		(hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
921 		(clk_get_rate(core_ck) / 1000000),
922 		(clk_get_rate(mpu_ck) / 1000000));
923 }
924 
925 #ifndef CONFIG_COMMON_CLK
926 /* Common data */
927 int clk_enable(struct clk *clk)
928 {
929 	unsigned long flags;
930 	int ret;
931 
932 	if (clk == NULL || IS_ERR(clk))
933 		return -EINVAL;
934 
935 	spin_lock_irqsave(&clockfw_lock, flags);
936 	ret = omap2_clk_enable(clk);
937 	spin_unlock_irqrestore(&clockfw_lock, flags);
938 
939 	return ret;
940 }
941 EXPORT_SYMBOL(clk_enable);
942 
943 void clk_disable(struct clk *clk)
944 {
945 	unsigned long flags;
946 
947 	if (clk == NULL || IS_ERR(clk))
948 		return;
949 
950 	spin_lock_irqsave(&clockfw_lock, flags);
951 	if (clk->usecount == 0) {
952 		pr_err("Trying disable clock %s with 0 usecount\n",
953 		       clk->name);
954 		WARN_ON(1);
955 		goto out;
956 	}
957 
958 	omap2_clk_disable(clk);
959 
960 out:
961 	spin_unlock_irqrestore(&clockfw_lock, flags);
962 }
963 EXPORT_SYMBOL(clk_disable);
964 
965 unsigned long clk_get_rate(struct clk *clk)
966 {
967 	unsigned long flags;
968 	unsigned long ret;
969 
970 	if (clk == NULL || IS_ERR(clk))
971 		return 0;
972 
973 	spin_lock_irqsave(&clockfw_lock, flags);
974 	ret = clk->rate;
975 	spin_unlock_irqrestore(&clockfw_lock, flags);
976 
977 	return ret;
978 }
979 EXPORT_SYMBOL(clk_get_rate);
980 
981 /*
982  * Optional clock functions defined in include/linux/clk.h
983  */
984 
985 long clk_round_rate(struct clk *clk, unsigned long rate)
986 {
987 	unsigned long flags;
988 	long ret;
989 
990 	if (clk == NULL || IS_ERR(clk))
991 		return 0;
992 
993 	spin_lock_irqsave(&clockfw_lock, flags);
994 	ret = omap2_clk_round_rate(clk, rate);
995 	spin_unlock_irqrestore(&clockfw_lock, flags);
996 
997 	return ret;
998 }
999 EXPORT_SYMBOL(clk_round_rate);
1000 
1001 int clk_set_rate(struct clk *clk, unsigned long rate)
1002 {
1003 	unsigned long flags;
1004 	int ret = -EINVAL;
1005 
1006 	if (clk == NULL || IS_ERR(clk))
1007 		return ret;
1008 
1009 	spin_lock_irqsave(&clockfw_lock, flags);
1010 	ret = omap2_clk_set_rate(clk, rate);
1011 	if (ret == 0)
1012 		propagate_rate(clk);
1013 	spin_unlock_irqrestore(&clockfw_lock, flags);
1014 
1015 	return ret;
1016 }
1017 EXPORT_SYMBOL(clk_set_rate);
1018 
1019 int clk_set_parent(struct clk *clk, struct clk *parent)
1020 {
1021 	unsigned long flags;
1022 	int ret = -EINVAL;
1023 
1024 	if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
1025 		return ret;
1026 
1027 	spin_lock_irqsave(&clockfw_lock, flags);
1028 	if (clk->usecount == 0) {
1029 		ret = omap2_clk_set_parent(clk, parent);
1030 		if (ret == 0)
1031 			propagate_rate(clk);
1032 	} else {
1033 		ret = -EBUSY;
1034 	}
1035 	spin_unlock_irqrestore(&clockfw_lock, flags);
1036 
1037 	return ret;
1038 }
1039 EXPORT_SYMBOL(clk_set_parent);
1040 
1041 struct clk *clk_get_parent(struct clk *clk)
1042 {
1043 	return clk->parent;
1044 }
1045 EXPORT_SYMBOL(clk_get_parent);
1046 
1047 /*
1048  * OMAP specific clock functions shared between omap1 and omap2
1049  */
1050 
1051 int __initdata mpurate;
1052 
1053 /*
1054  * By default we use the rate set by the bootloader.
1055  * You can override this with mpurate= cmdline option.
1056  */
1057 static int __init omap_clk_setup(char *str)
1058 {
1059 	get_option(&str, &mpurate);
1060 
1061 	if (!mpurate)
1062 		return 1;
1063 
1064 	if (mpurate < 1000)
1065 		mpurate *= 1000000;
1066 
1067 	return 1;
1068 }
1069 __setup("mpurate=", omap_clk_setup);
1070 
1071 /* Used for clocks that always have same value as the parent clock */
1072 unsigned long followparent_recalc(struct clk *clk)
1073 {
1074 	return clk->parent->rate;
1075 }
1076 
1077 /*
1078  * Used for clocks that have the same value as the parent clock,
1079  * divided by some factor
1080  */
1081 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
1082 {
1083 	WARN_ON(!clk->fixed_div);
1084 
1085 	return clk->parent->rate / clk->fixed_div;
1086 }
1087 
1088 void clk_reparent(struct clk *child, struct clk *parent)
1089 {
1090 	list_del_init(&child->sibling);
1091 	if (parent)
1092 		list_add(&child->sibling, &parent->children);
1093 	child->parent = parent;
1094 
1095 	/* now do the debugfs renaming to reattach the child
1096 	   to the proper parent */
1097 }
1098 
1099 /* Propagate rate to children */
1100 void propagate_rate(struct clk *tclk)
1101 {
1102 	struct clk *clkp;
1103 
1104 	list_for_each_entry(clkp, &tclk->children, sibling) {
1105 		if (clkp->recalc)
1106 			clkp->rate = clkp->recalc(clkp);
1107 		propagate_rate(clkp);
1108 	}
1109 }
1110 
1111 static LIST_HEAD(root_clks);
1112 
1113 /**
1114  * recalculate_root_clocks - recalculate and propagate all root clocks
1115  *
1116  * Recalculates all root clocks (clocks with no parent), which if the
1117  * clock's .recalc is set correctly, should also propagate their rates.
1118  * Called at init.
1119  */
1120 void recalculate_root_clocks(void)
1121 {
1122 	struct clk *clkp;
1123 
1124 	list_for_each_entry(clkp, &root_clks, sibling) {
1125 		if (clkp->recalc)
1126 			clkp->rate = clkp->recalc(clkp);
1127 		propagate_rate(clkp);
1128 	}
1129 }
1130 
1131 /**
1132  * clk_preinit - initialize any fields in the struct clk before clk init
1133  * @clk: struct clk * to initialize
1134  *
1135  * Initialize any struct clk fields needed before normal clk initialization
1136  * can run.  No return value.
1137  */
1138 void clk_preinit(struct clk *clk)
1139 {
1140 	INIT_LIST_HEAD(&clk->children);
1141 }
1142 
1143 int clk_register(struct clk *clk)
1144 {
1145 	if (clk == NULL || IS_ERR(clk))
1146 		return -EINVAL;
1147 
1148 	/*
1149 	 * trap out already registered clocks
1150 	 */
1151 	if (clk->node.next || clk->node.prev)
1152 		return 0;
1153 
1154 	mutex_lock(&clocks_mutex);
1155 	if (clk->parent)
1156 		list_add(&clk->sibling, &clk->parent->children);
1157 	else
1158 		list_add(&clk->sibling, &root_clks);
1159 
1160 	list_add(&clk->node, &clocks);
1161 	if (clk->init)
1162 		clk->init(clk);
1163 	mutex_unlock(&clocks_mutex);
1164 
1165 	return 0;
1166 }
1167 EXPORT_SYMBOL(clk_register);
1168 
1169 void clk_unregister(struct clk *clk)
1170 {
1171 	if (clk == NULL || IS_ERR(clk))
1172 		return;
1173 
1174 	mutex_lock(&clocks_mutex);
1175 	list_del(&clk->sibling);
1176 	list_del(&clk->node);
1177 	mutex_unlock(&clocks_mutex);
1178 }
1179 EXPORT_SYMBOL(clk_unregister);
1180 
1181 void clk_enable_init_clocks(void)
1182 {
1183 	struct clk *clkp;
1184 
1185 	list_for_each_entry(clkp, &clocks, node)
1186 		if (clkp->flags & ENABLE_ON_INIT)
1187 			clk_enable(clkp);
1188 }
1189 
1190 /**
1191  * omap_clk_get_by_name - locate OMAP struct clk by its name
1192  * @name: name of the struct clk to locate
1193  *
1194  * Locate an OMAP struct clk by its name.  Assumes that struct clk
1195  * names are unique.  Returns NULL if not found or a pointer to the
1196  * struct clk if found.
1197  */
1198 struct clk *omap_clk_get_by_name(const char *name)
1199 {
1200 	struct clk *c;
1201 	struct clk *ret = NULL;
1202 
1203 	mutex_lock(&clocks_mutex);
1204 
1205 	list_for_each_entry(c, &clocks, node) {
1206 		if (!strcmp(c->name, name)) {
1207 			ret = c;
1208 			break;
1209 		}
1210 	}
1211 
1212 	mutex_unlock(&clocks_mutex);
1213 
1214 	return ret;
1215 }
1216 
1217 int omap_clk_enable_autoidle_all(void)
1218 {
1219 	struct clk *c;
1220 	unsigned long flags;
1221 
1222 	spin_lock_irqsave(&clockfw_lock, flags);
1223 
1224 	list_for_each_entry(c, &clocks, node)
1225 		if (c->ops->allow_idle)
1226 			c->ops->allow_idle(c);
1227 
1228 	spin_unlock_irqrestore(&clockfw_lock, flags);
1229 
1230 	return 0;
1231 }
1232 
1233 int omap_clk_disable_autoidle_all(void)
1234 {
1235 	struct clk *c;
1236 	unsigned long flags;
1237 
1238 	spin_lock_irqsave(&clockfw_lock, flags);
1239 
1240 	list_for_each_entry(c, &clocks, node)
1241 		if (c->ops->deny_idle)
1242 			c->ops->deny_idle(c);
1243 
1244 	spin_unlock_irqrestore(&clockfw_lock, flags);
1245 
1246 	return 0;
1247 }
1248 
1249 /*
1250  * Low level helpers
1251  */
1252 static int clkll_enable_null(struct clk *clk)
1253 {
1254 	return 0;
1255 }
1256 
1257 static void clkll_disable_null(struct clk *clk)
1258 {
1259 }
1260 
1261 const struct clkops clkops_null = {
1262 	.enable		= clkll_enable_null,
1263 	.disable	= clkll_disable_null,
1264 };
1265 
1266 /*
1267  * Dummy clock
1268  *
1269  * Used for clock aliases that are needed on some OMAPs, but not others
1270  */
1271 struct clk dummy_ck = {
1272 	.name	= "dummy",
1273 	.ops	= &clkops_null,
1274 };
1275 
1276 /*
1277  *
1278  */
1279 
1280 #ifdef CONFIG_OMAP_RESET_CLOCKS
1281 /*
1282  * Disable any unused clocks left on by the bootloader
1283  */
1284 static int __init clk_disable_unused(void)
1285 {
1286 	struct clk *ck;
1287 	unsigned long flags;
1288 
1289 	pr_info("clock: disabling unused clocks to save power\n");
1290 
1291 	spin_lock_irqsave(&clockfw_lock, flags);
1292 	list_for_each_entry(ck, &clocks, node) {
1293 		if (ck->ops == &clkops_null)
1294 			continue;
1295 
1296 		if (ck->usecount > 0 || !ck->enable_reg)
1297 			continue;
1298 
1299 		omap2_clk_disable_unused(ck);
1300 	}
1301 	spin_unlock_irqrestore(&clockfw_lock, flags);
1302 
1303 	return 0;
1304 }
1305 late_initcall(clk_disable_unused);
1306 late_initcall(omap_clk_enable_autoidle_all);
1307 #endif
1308 
1309 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
1310 /*
1311  *	debugfs support to trace clock tree hierarchy and attributes
1312  */
1313 
1314 #include <linux/debugfs.h>
1315 #include <linux/seq_file.h>
1316 
1317 static struct dentry *clk_debugfs_root;
1318 
1319 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
1320 {
1321 	struct clk *c;
1322 	struct clk *pa;
1323 
1324 	mutex_lock(&clocks_mutex);
1325 	seq_printf(s, "%-30s %-30s %-10s %s\n",
1326 		   "clock-name", "parent-name", "rate", "use-count");
1327 
1328 	list_for_each_entry(c, &clocks, node) {
1329 		pa = c->parent;
1330 		seq_printf(s, "%-30s %-30s %-10lu %d\n",
1331 			   c->name, pa ? pa->name : "none", c->rate,
1332 			   c->usecount);
1333 	}
1334 	mutex_unlock(&clocks_mutex);
1335 
1336 	return 0;
1337 }
1338 
1339 static int clk_dbg_open(struct inode *inode, struct file *file)
1340 {
1341 	return single_open(file, clk_dbg_show_summary, inode->i_private);
1342 }
1343 
1344 static const struct file_operations debug_clock_fops = {
1345 	.open           = clk_dbg_open,
1346 	.read           = seq_read,
1347 	.llseek         = seq_lseek,
1348 	.release        = single_release,
1349 };
1350 
1351 static int clk_debugfs_register_one(struct clk *c)
1352 {
1353 	int err;
1354 	struct dentry *d;
1355 	struct clk *pa = c->parent;
1356 
1357 	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1358 	if (!d)
1359 		return -ENOMEM;
1360 	c->dent = d;
1361 
1362 	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1363 	if (!d) {
1364 		err = -ENOMEM;
1365 		goto err_out;
1366 	}
1367 	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1368 	if (!d) {
1369 		err = -ENOMEM;
1370 		goto err_out;
1371 	}
1372 	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1373 	if (!d) {
1374 		err = -ENOMEM;
1375 		goto err_out;
1376 	}
1377 	return 0;
1378 
1379 err_out:
1380 	debugfs_remove_recursive(c->dent);
1381 	return err;
1382 }
1383 
1384 static int clk_debugfs_register(struct clk *c)
1385 {
1386 	int err;
1387 	struct clk *pa = c->parent;
1388 
1389 	if (pa && !pa->dent) {
1390 		err = clk_debugfs_register(pa);
1391 		if (err)
1392 			return err;
1393 	}
1394 
1395 	if (!c->dent) {
1396 		err = clk_debugfs_register_one(c);
1397 		if (err)
1398 			return err;
1399 	}
1400 	return 0;
1401 }
1402 
1403 static int __init clk_debugfs_init(void)
1404 {
1405 	struct clk *c;
1406 	struct dentry *d;
1407 	int err;
1408 
1409 	d = debugfs_create_dir("clock", NULL);
1410 	if (!d)
1411 		return -ENOMEM;
1412 	clk_debugfs_root = d;
1413 
1414 	list_for_each_entry(c, &clocks, node) {
1415 		err = clk_debugfs_register(c);
1416 		if (err)
1417 			goto err_out;
1418 	}
1419 
1420 	d = debugfs_create_file("summary", S_IRUGO,
1421 		d, NULL, &debug_clock_fops);
1422 	if (!d)
1423 		return -ENOMEM;
1424 
1425 	return 0;
1426 err_out:
1427 	debugfs_remove_recursive(clk_debugfs_root);
1428 	return err;
1429 }
1430 late_initcall(clk_debugfs_init);
1431 
1432 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1433 #endif /* CONFIG_COMMON_CLK */
1434