xref: /openbmc/linux/arch/arm/mach-omap2/clock.c (revision 23fb8ba3)
1 /*
2  *  linux/arch/arm/mach-omap2/clock.c
3  *
4  *  Copyright (C) 2005-2008 Texas Instruments, Inc.
5  *  Copyright (C) 2004-2010 Nokia Corporation
6  *
7  *  Contacts:
8  *  Richard Woodruff <r-woodruff2@ti.com>
9  *  Paul Walmsley
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #undef DEBUG
16 
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/list.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #ifdef CONFIG_COMMON_CLK
24 #include <linux/clk-provider.h>
25 #else
26 #include <linux/clk.h>
27 #endif
28 #include <linux/io.h>
29 #include <linux/bitops.h>
30 
31 #include <asm/cpu.h>
32 
33 
34 #include <trace/events/power.h>
35 
36 #include "soc.h"
37 #include "clockdomain.h"
38 #include "clock.h"
39 #include "cm.h"
40 #include "cm2xxx.h"
41 #include "cm3xxx.h"
42 #include "cm-regbits-24xx.h"
43 #include "cm-regbits-34xx.h"
44 #include "common.h"
45 
46 /*
47  * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
48  * for a module to indicate that it is no longer in idle
49  */
50 #define MAX_MODULE_ENABLE_WAIT		100000
51 
52 u16 cpu_mask;
53 
54 /*
55  * clkdm_control: if true, then when a clock is enabled in the
56  * hardware, its clockdomain will first be enabled; and when a clock
57  * is disabled in the hardware, its clockdomain will be disabled
58  * afterwards.
59  */
60 static bool clkdm_control = true;
61 
62 static LIST_HEAD(clocks);
63 static DEFINE_MUTEX(clocks_mutex);
64 #ifndef CONFIG_COMMON_CLK
65 static DEFINE_SPINLOCK(clockfw_lock);
66 #endif
67 
68 #ifdef CONFIG_COMMON_CLK
69 static LIST_HEAD(clk_hw_omap_clocks);
70 
71 /*
72  * Used for clocks that have the same value as the parent clock,
73  * divided by some factor
74  */
75 unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
76 		unsigned long parent_rate)
77 {
78 	struct clk_hw_omap *oclk;
79 
80 	if (!hw) {
81 		pr_warn("%s: hw is NULL\n", __func__);
82 		return -EINVAL;
83 	}
84 
85 	oclk = to_clk_hw_omap(hw);
86 
87 	WARN_ON(!oclk->fixed_div);
88 
89 	return parent_rate / oclk->fixed_div;
90 }
91 #endif
92 
93 /*
94  * OMAP2+ specific clock functions
95  */
96 
97 /* Private functions */
98 
99 
100 /**
101  * _wait_idlest_generic - wait for a module to leave the idle state
102  * @reg: virtual address of module IDLEST register
103  * @mask: value to mask against to determine if the module is active
104  * @idlest: idle state indicator (0 or 1) for the clock
105  * @name: name of the clock (for printk)
106  *
107  * Wait for a module to leave idle, where its idle-status register is
108  * not inside the CM module.  Returns 1 if the module left idle
109  * promptly, or 0 if the module did not leave idle before the timeout
110  * elapsed.  XXX Deprecated - should be moved into drivers for the
111  * individual IP block that the IDLEST register exists in.
112  */
113 static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
114 				const char *name)
115 {
116 	int i = 0, ena = 0;
117 
118 	ena = (idlest) ? 0 : mask;
119 
120 	omap_test_timeout(((__raw_readl(reg) & mask) == ena),
121 			  MAX_MODULE_ENABLE_WAIT, i);
122 
123 	if (i < MAX_MODULE_ENABLE_WAIT)
124 		pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
125 			 name, i);
126 	else
127 		pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
128 		       name, MAX_MODULE_ENABLE_WAIT);
129 
130 	return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
131 };
132 
133 /**
134  * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
135  * @clk: struct clk * belonging to the module
136  *
137  * If the necessary clocks for the OMAP hardware IP block that
138  * corresponds to clock @clk are enabled, then wait for the module to
139  * indicate readiness (i.e., to leave IDLE).  This code does not
140  * belong in the clock code and will be moved in the medium term to
141  * module-dependent code.  No return value.
142  */
143 #ifdef CONFIG_COMMON_CLK
144 static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
145 #else
146 static void _omap2_module_wait_ready(struct clk *clk)
147 #endif
148 {
149 	void __iomem *companion_reg, *idlest_reg;
150 	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
151 	s16 prcm_mod;
152 	int r;
153 
154 	/* Not all modules have multiple clocks that their IDLEST depends on */
155 	if (clk->ops->find_companion) {
156 		clk->ops->find_companion(clk, &companion_reg, &other_bit);
157 		if (!(__raw_readl(companion_reg) & (1 << other_bit)))
158 			return;
159 	}
160 
161 	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
162 	r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
163 	if (r) {
164 		/* IDLEST register not in the CM module */
165 		_wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
166 #ifdef CONFIG_COMMON_CLK
167 				     __clk_get_name(clk->hw.clk));
168 #else
169 				     clk->name);
170 #endif
171 	} else {
172 		cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
173 	};
174 }
175 
176 /* Public functions */
177 
178 /**
179  * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
180  * @clk: OMAP clock struct ptr to use
181  *
182  * Convert a clockdomain name stored in a struct clk 'clk' into a
183  * clockdomain pointer, and save it into the struct clk.  Intended to be
184  * called during clk_register().  No return value.
185  */
186 #ifdef CONFIG_COMMON_CLK
187 void omap2_init_clk_clkdm(struct clk_hw *hw)
188 {
189 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
190 #else
191 void omap2_init_clk_clkdm(struct clk *clk)
192 {
193 #endif
194 	struct clockdomain *clkdm;
195 	const char *clk_name;
196 
197 	if (!clk->clkdm_name)
198 		return;
199 
200 #ifdef CONFIG_COMMON_CLK
201 	clk_name = __clk_get_name(hw->clk);
202 #else
203 	clk_name = __clk_get_name(clk);
204 #endif
205 
206 	clkdm = clkdm_lookup(clk->clkdm_name);
207 	if (clkdm) {
208 		pr_debug("clock: associated clk %s to clkdm %s\n",
209 			 clk_name, clk->clkdm_name);
210 		clk->clkdm = clkdm;
211 	} else {
212 		pr_debug("clock: could not associate clk %s to clkdm %s\n",
213 			 clk_name, clk->clkdm_name);
214 	}
215 }
216 
217 /**
218  * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
219  *
220  * Prevent the OMAP clock code from calling into the clockdomain code
221  * when a hardware clock in that clockdomain is enabled or disabled.
222  * Intended to be called at init time from omap*_clk_init().  No
223  * return value.
224  */
225 void __init omap2_clk_disable_clkdm_control(void)
226 {
227 	clkdm_control = false;
228 }
229 
230 /**
231  * omap2_clk_dflt_find_companion - find companion clock to @clk
232  * @clk: struct clk * to find the companion clock of
233  * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
234  * @other_bit: u8 ** to return the companion clock bit shift in
235  *
236  * Note: We don't need special code here for INVERT_ENABLE for the
237  * time being since INVERT_ENABLE only applies to clocks enabled by
238  * CM_CLKEN_PLL
239  *
240  * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes it's
241  * just a matter of XORing the bits.
242  *
243  * Some clocks don't have companion clocks.  For example, modules with
244  * only an interface clock (such as MAILBOXES) don't have a companion
245  * clock.  Right now, this code relies on the hardware exporting a bit
246  * in the correct companion register that indicates that the
247  * nonexistent 'companion clock' is active.  Future patches will
248  * associate this type of code with per-module data structures to
249  * avoid this issue, and remove the casts.  No return value.
250  */
251 #ifdef CONFIG_COMMON_CLK
252 void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
253 #else
254 void omap2_clk_dflt_find_companion(struct clk *clk,
255 #endif
256 			void __iomem **other_reg, u8 *other_bit)
257 {
258 	u32 r;
259 
260 	/*
261 	 * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes
262 	 * it's just a matter of XORing the bits.
263 	 */
264 	r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
265 
266 	*other_reg = (__force void __iomem *)r;
267 	*other_bit = clk->enable_bit;
268 }
269 
270 /**
271  * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
272  * @clk: struct clk * to find IDLEST info for
273  * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
274  * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
275  * @idlest_val: u8 * to return the idle status indicator
276  *
277  * Return the CM_IDLEST register address and bit shift corresponding
278  * to the module that "owns" this clock.  This default code assumes
279  * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
280  * the IDLEST register address ID corresponds to the CM_*CLKEN
281  * register address ID (e.g., that CM_FCLKEN2 corresponds to
282  * CM_IDLEST2).  This is not true for all modules.  No return value.
283  */
284 #ifdef CONFIG_COMMON_CLK
285 void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
286 #else
287 void omap2_clk_dflt_find_idlest(struct clk *clk,
288 #endif
289 		void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val)
290 {
291 	u32 r;
292 
293 	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
294 	*idlest_reg = (__force void __iomem *)r;
295 	*idlest_bit = clk->enable_bit;
296 
297 	/*
298 	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
299 	 * 34xx reverses this, just to keep us on our toes
300 	 * AM35xx uses both, depending on the module.
301 	 */
302 	if (cpu_is_omap24xx())
303 		*idlest_val = OMAP24XX_CM_IDLEST_VAL;
304 	else if (cpu_is_omap34xx())
305 		*idlest_val = OMAP34XX_CM_IDLEST_VAL;
306 	else
307 		BUG();
308 
309 }
310 
311 #ifdef CONFIG_COMMON_CLK
312 /**
313  * omap2_dflt_clk_enable - enable a clock in the hardware
314  * @hw: struct clk_hw * of the clock to enable
315  *
316  * Enable the clock @hw in the hardware.  We first call into the OMAP
317  * clockdomain code to "enable" the corresponding clockdomain if this
318  * is the first enabled user of the clockdomain.  Then program the
319  * hardware to enable the clock.  Then wait for the IP block that uses
320  * this clock to leave idle (if applicable).  Returns the error value
321  * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
322  * if @hw has a null clock enable_reg, or zero upon success.
323  */
324 int omap2_dflt_clk_enable(struct clk_hw *hw)
325 {
326 	struct clk_hw_omap *clk;
327 	u32 v;
328 	int ret = 0;
329 
330 	clk = to_clk_hw_omap(hw);
331 
332 	if (clkdm_control && clk->clkdm) {
333 		ret = clkdm_clk_enable(clk->clkdm, hw->clk);
334 		if (ret) {
335 			WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
336 			     __func__, __clk_get_name(hw->clk),
337 			     clk->clkdm->name, ret);
338 			return ret;
339 		}
340 	}
341 
342 	if (unlikely(clk->enable_reg == NULL)) {
343 		pr_err("%s: %s missing enable_reg\n", __func__,
344 		       __clk_get_name(hw->clk));
345 		ret = -EINVAL;
346 		goto err;
347 	}
348 
349 	/* FIXME should not have INVERT_ENABLE bit here */
350 	v = __raw_readl(clk->enable_reg);
351 	if (clk->flags & INVERT_ENABLE)
352 		v &= ~(1 << clk->enable_bit);
353 	else
354 		v |= (1 << clk->enable_bit);
355 	__raw_writel(v, clk->enable_reg);
356 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
357 
358 	if (clk->ops && clk->ops->find_idlest)
359 		_omap2_module_wait_ready(clk);
360 
361 	return 0;
362 
363 err:
364 	if (clkdm_control && clk->clkdm)
365 		clkdm_clk_disable(clk->clkdm, hw->clk);
366 	return ret;
367 }
368 
369 /**
370  * omap2_dflt_clk_disable - disable a clock in the hardware
371  * @hw: struct clk_hw * of the clock to disable
372  *
373  * Disable the clock @hw in the hardware, and call into the OMAP
374  * clockdomain code to "disable" the corresponding clockdomain if all
375  * clocks/hwmods in that clockdomain are now disabled.  No return
376  * value.
377  */
378 void omap2_dflt_clk_disable(struct clk_hw *hw)
379 {
380 	struct clk_hw_omap *clk;
381 	u32 v;
382 
383 	clk = to_clk_hw_omap(hw);
384 	if (!clk->enable_reg) {
385 		/*
386 		 * 'independent' here refers to a clock which is not
387 		 * controlled by its parent.
388 		 */
389 		pr_err("%s: independent clock %s has no enable_reg\n",
390 		       __func__, __clk_get_name(hw->clk));
391 		return;
392 	}
393 
394 	v = __raw_readl(clk->enable_reg);
395 	if (clk->flags & INVERT_ENABLE)
396 		v |= (1 << clk->enable_bit);
397 	else
398 		v &= ~(1 << clk->enable_bit);
399 	__raw_writel(v, clk->enable_reg);
400 	/* No OCP barrier needed here since it is a disable operation */
401 
402 	if (clkdm_control && clk->clkdm)
403 		clkdm_clk_disable(clk->clkdm, hw->clk);
404 }
405 
406 /**
407  * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
408  * @hw: struct clk_hw * of the clock being enabled
409  *
410  * Increment the usecount of the clockdomain of the clock pointed to
411  * by @hw; if the usecount is 1, the clockdomain will be "enabled."
412  * Only needed for clocks that don't use omap2_dflt_clk_enable() as
413  * their enable function pointer.  Passes along the return value of
414  * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
415  * clockdomain, or 0 if clock framework-based clockdomain control is
416  * not implemented.
417  */
418 int omap2_clkops_enable_clkdm(struct clk_hw *hw)
419 {
420 	struct clk_hw_omap *clk;
421 	int ret = 0;
422 
423 	clk = to_clk_hw_omap(hw);
424 
425 	if (unlikely(!clk->clkdm)) {
426 		pr_err("%s: %s: no clkdm set ?!\n", __func__,
427 		       __clk_get_name(hw->clk));
428 		return -EINVAL;
429 	}
430 
431 	if (unlikely(clk->enable_reg))
432 		pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
433 		       __clk_get_name(hw->clk));
434 
435 	if (!clkdm_control) {
436 		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
437 		       __func__, __clk_get_name(hw->clk));
438 		return 0;
439 	}
440 
441 	ret = clkdm_clk_enable(clk->clkdm, hw->clk);
442 	WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
443 	     __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);
444 
445 	return ret;
446 }
447 
448 /**
449  * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
450  * @hw: struct clk_hw * of the clock being disabled
451  *
452  * Decrement the usecount of the clockdomain of the clock pointed to
453  * by @hw; if the usecount is 0, the clockdomain will be "disabled."
454  * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
455  * disable function pointer.  No return value.
456  */
457 void omap2_clkops_disable_clkdm(struct clk_hw *hw)
458 {
459 	struct clk_hw_omap *clk;
460 
461 	clk = to_clk_hw_omap(hw);
462 
463 	if (unlikely(!clk->clkdm)) {
464 		pr_err("%s: %s: no clkdm set ?!\n", __func__,
465 		       __clk_get_name(hw->clk));
466 		return;
467 	}
468 
469 	if (unlikely(clk->enable_reg))
470 		pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
471 		       __clk_get_name(hw->clk));
472 
473 	if (!clkdm_control) {
474 		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
475 		       __func__, __clk_get_name(hw->clk));
476 		return;
477 	}
478 
479 	clkdm_clk_disable(clk->clkdm, hw->clk);
480 }
481 
482 /**
483  * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
484  * @hw: struct clk_hw * to check
485  *
486  * Return 1 if the clock represented by @hw is enabled in the
487  * hardware, or 0 otherwise.  Intended for use in the struct
488  * clk_ops.is_enabled function pointer.
489  */
490 int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
491 {
492 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
493 	u32 v;
494 
495 	v = __raw_readl(clk->enable_reg);
496 
497 	if (clk->flags & INVERT_ENABLE)
498 		v ^= BIT(clk->enable_bit);
499 
500 	v &= BIT(clk->enable_bit);
501 
502 	return v ? 1 : 0;
503 }
504 
505 static int __initdata mpurate;
506 
507 /*
508  * By default we use the rate set by the bootloader.
509  * You can override this with mpurate= cmdline option.
510  */
511 static int __init omap_clk_setup(char *str)
512 {
513 	get_option(&str, &mpurate);
514 
515 	if (!mpurate)
516 		return 1;
517 
518 	if (mpurate < 1000)
519 		mpurate *= 1000000;
520 
521 	return 1;
522 }
523 __setup("mpurate=", omap_clk_setup);
524 
525 /**
526  * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
527  * @clk: struct clk * to initialize
528  *
529  * Add an OMAP clock @clk to the internal list of OMAP clocks.  Used
530  * temporarily for autoidle handling, until this support can be
531  * integrated into the common clock framework code in some way.  No
532  * return value.
533  */
534 void omap2_init_clk_hw_omap_clocks(struct clk *clk)
535 {
536 	struct clk_hw_omap *c;
537 
538 	if (__clk_get_flags(clk) & CLK_IS_BASIC)
539 		return;
540 
541 	c = to_clk_hw_omap(__clk_get_hw(clk));
542 	list_add(&c->node, &clk_hw_omap_clocks);
543 }
544 
545 /**
546  * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
547  * support it
548  *
549  * Enable clock autoidle on all OMAP clocks that have allow_idle
550  * function pointers associated with them.  This function is intended
551  * to be temporary until support for this is added to the common clock
552  * code.  Returns 0.
553  */
554 int omap2_clk_enable_autoidle_all(void)
555 {
556 	struct clk_hw_omap *c;
557 
558 	list_for_each_entry(c, &clk_hw_omap_clocks, node)
559 		if (c->ops && c->ops->allow_idle)
560 			c->ops->allow_idle(c);
561 	return 0;
562 }
563 
564 /**
565  * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
566  * support it
567  *
568  * Disable clock autoidle on all OMAP clocks that have allow_idle
569  * function pointers associated with them.  This function is intended
570  * to be temporary until support for this is added to the common clock
571  * code.  Returns 0.
572  */
573 int omap2_clk_disable_autoidle_all(void)
574 {
575 	struct clk_hw_omap *c;
576 
577 	list_for_each_entry(c, &clk_hw_omap_clocks, node)
578 		if (c->ops && c->ops->deny_idle)
579 			c->ops->deny_idle(c);
580 	return 0;
581 }
582 
583 const struct clk_hw_omap_ops clkhwops_wait = {
584 	.find_idlest	= omap2_clk_dflt_find_idlest,
585 	.find_companion	= omap2_clk_dflt_find_companion,
586 };
587 #else
588 int omap2_dflt_clk_enable(struct clk *clk)
589 {
590 	u32 v;
591 
592 	if (unlikely(clk->enable_reg == NULL)) {
593 		pr_err("clock.c: Enable for %s without enable code\n",
594 		       clk->name);
595 		return 0; /* REVISIT: -EINVAL */
596 	}
597 
598 	v = __raw_readl(clk->enable_reg);
599 	if (clk->flags & INVERT_ENABLE)
600 		v &= ~(1 << clk->enable_bit);
601 	else
602 		v |= (1 << clk->enable_bit);
603 	__raw_writel(v, clk->enable_reg);
604 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
605 
606 	if (clk->ops->find_idlest)
607 		_omap2_module_wait_ready(clk);
608 
609 	return 0;
610 }
611 
612 void omap2_dflt_clk_disable(struct clk *clk)
613 {
614 	u32 v;
615 
616 	if (!clk->enable_reg) {
617 		/*
618 		 * 'Independent' here refers to a clock which is not
619 		 * controlled by its parent.
620 		 */
621 		pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
622 		return;
623 	}
624 
625 	v = __raw_readl(clk->enable_reg);
626 	if (clk->flags & INVERT_ENABLE)
627 		v |= (1 << clk->enable_bit);
628 	else
629 		v &= ~(1 << clk->enable_bit);
630 	__raw_writel(v, clk->enable_reg);
631 	/* No OCP barrier needed here since it is a disable operation */
632 }
633 
634 const struct clkops clkops_omap2_dflt_wait = {
635 	.enable		= omap2_dflt_clk_enable,
636 	.disable	= omap2_dflt_clk_disable,
637 	.find_companion	= omap2_clk_dflt_find_companion,
638 	.find_idlest	= omap2_clk_dflt_find_idlest,
639 };
640 
641 const struct clkops clkops_omap2_dflt = {
642 	.enable		= omap2_dflt_clk_enable,
643 	.disable	= omap2_dflt_clk_disable,
644 };
645 
646 /**
647  * omap2_clk_disable - disable a clock, if the system is not using it
648  * @clk: struct clk * to disable
649  *
650  * Decrements the usecount on struct clk @clk.  If there are no users
651  * left, call the clkops-specific clock disable function to disable it
652  * in hardware.  If the clock is part of a clockdomain (which they all
653  * should be), request that the clockdomain be disabled.  (It too has
654  * a usecount, and so will not be disabled in the hardware until it no
655  * longer has any users.)  If the clock has a parent clock (most of
656  * them do), then call ourselves, recursing on the parent clock.  This
657  * can cause an entire branch of the clock tree to be powered off by
658  * simply disabling one clock.  Intended to be called with the clockfw_lock
659  * spinlock held.  No return value.
660  */
661 void omap2_clk_disable(struct clk *clk)
662 {
663 	if (clk->usecount == 0) {
664 		WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
665 		return;
666 	}
667 
668 	pr_debug("clock: %s: decrementing usecount\n", clk->name);
669 
670 	clk->usecount--;
671 
672 	if (clk->usecount > 0)
673 		return;
674 
675 	pr_debug("clock: %s: disabling in hardware\n", clk->name);
676 
677 	if (clk->ops && clk->ops->disable) {
678 		trace_clock_disable(clk->name, 0, smp_processor_id());
679 		clk->ops->disable(clk);
680 	}
681 
682 	if (clkdm_control && clk->clkdm)
683 		clkdm_clk_disable(clk->clkdm, clk);
684 
685 	if (clk->parent)
686 		omap2_clk_disable(clk->parent);
687 }
688 
689 /**
690  * omap2_clk_enable - request that the system enable a clock
691  * @clk: struct clk * to enable
692  *
693  * Increments the usecount on struct clk @clk.  If there were no users
694  * previously, then recurse up the clock tree, enabling all of the
695  * clock's parents and all of the parent clockdomains, and finally,
696  * enabling @clk's clockdomain, and @clk itself.  Intended to be
697  * called with the clockfw_lock spinlock held.  Returns 0 upon success
698  * or a negative error code upon failure.
699  */
700 int omap2_clk_enable(struct clk *clk)
701 {
702 	int ret;
703 
704 	pr_debug("clock: %s: incrementing usecount\n", clk->name);
705 
706 	clk->usecount++;
707 
708 	if (clk->usecount > 1)
709 		return 0;
710 
711 	pr_debug("clock: %s: enabling in hardware\n", clk->name);
712 
713 	if (clk->parent) {
714 		ret = omap2_clk_enable(clk->parent);
715 		if (ret) {
716 			WARN(1, "clock: %s: could not enable parent %s: %d\n",
717 			     clk->name, clk->parent->name, ret);
718 			goto oce_err1;
719 		}
720 	}
721 
722 	if (clkdm_control && clk->clkdm) {
723 		ret = clkdm_clk_enable(clk->clkdm, clk);
724 		if (ret) {
725 			WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
726 			     clk->name, clk->clkdm->name, ret);
727 			goto oce_err2;
728 		}
729 	}
730 
731 	if (clk->ops && clk->ops->enable) {
732 		trace_clock_enable(clk->name, 1, smp_processor_id());
733 		ret = clk->ops->enable(clk);
734 		if (ret) {
735 			WARN(1, "clock: %s: could not enable: %d\n",
736 			     clk->name, ret);
737 			goto oce_err3;
738 		}
739 	}
740 
741 	return 0;
742 
743 oce_err3:
744 	if (clkdm_control && clk->clkdm)
745 		clkdm_clk_disable(clk->clkdm, clk);
746 oce_err2:
747 	if (clk->parent)
748 		omap2_clk_disable(clk->parent);
749 oce_err1:
750 	clk->usecount--;
751 
752 	return ret;
753 }
754 
755 /* Given a clock and a rate apply a clock specific rounding function */
756 long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
757 {
758 	if (clk->round_rate)
759 		return clk->round_rate(clk, rate);
760 
761 	return clk->rate;
762 }
763 
764 /* Set the clock rate for a clock source */
765 int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
766 {
767 	int ret = -EINVAL;
768 
769 	pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
770 
771 	/* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
772 	if (clk->set_rate) {
773 		trace_clock_set_rate(clk->name, rate, smp_processor_id());
774 		ret = clk->set_rate(clk, rate);
775 	}
776 
777 	return ret;
778 }
779 
780 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
781 {
782 	if (!clk->clksel)
783 		return -EINVAL;
784 
785 	if (clk->parent == new_parent)
786 		return 0;
787 
788 	return omap2_clksel_set_parent(clk, new_parent);
789 }
790 
791 /*
792  * OMAP2+ clock reset and init functions
793  */
794 
795 #ifdef CONFIG_OMAP_RESET_CLOCKS
796 void omap2_clk_disable_unused(struct clk *clk)
797 {
798 	u32 regval32, v;
799 
800 	v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
801 
802 	regval32 = __raw_readl(clk->enable_reg);
803 	if ((regval32 & (1 << clk->enable_bit)) == v)
804 		return;
805 
806 	pr_debug("Disabling unused clock \"%s\"\n", clk->name);
807 	if (cpu_is_omap34xx()) {
808 		omap2_clk_enable(clk);
809 		omap2_clk_disable(clk);
810 	} else {
811 		clk->ops->disable(clk);
812 	}
813 	if (clk->clkdm != NULL)
814 		pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
815 }
816 #endif
817 
818 #endif /* CONFIG_COMMON_CLK */
819 
820 /**
821  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
822  * @mpurate_ck_name: clk name of the clock to change rate
823  *
824  * Change the ARM MPU clock rate to the rate specified on the command
825  * line, if one was specified.  @mpurate_ck_name should be
826  * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
827  * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
828  * handled by the virt_prcm_set clock, but this should be handled by
829  * the OPP layer.  XXX This is intended to be handled by the OPP layer
830  * code in the near future and should be removed from the clock code.
831  * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
832  * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
833  * cannot be found, or 0 upon success.
834  */
835 int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
836 {
837 	struct clk *mpurate_ck;
838 	int r;
839 
840 	if (!mpurate)
841 		return -EINVAL;
842 
843 	mpurate_ck = clk_get(NULL, mpurate_ck_name);
844 	if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
845 		return -ENOENT;
846 
847 	r = clk_set_rate(mpurate_ck, mpurate);
848 	if (IS_ERR_VALUE(r)) {
849 		WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
850 		     mpurate_ck_name, mpurate, r);
851 		clk_put(mpurate_ck);
852 		return -EINVAL;
853 	}
854 
855 	calibrate_delay();
856 #ifndef CONFIG_COMMON_CLK
857 	recalculate_root_clocks();
858 #endif
859 
860 	clk_put(mpurate_ck);
861 
862 	return 0;
863 }
864 
865 /**
866  * omap2_clk_print_new_rates - print summary of current clock tree rates
867  * @hfclkin_ck_name: clk name for the off-chip HF oscillator
868  * @core_ck_name: clk name for the on-chip CORE_CLK
869  * @mpu_ck_name: clk name for the ARM MPU clock
870  *
871  * Prints a short message to the console with the HFCLKIN oscillator
872  * rate, the rate of the CORE clock, and the rate of the ARM MPU clock.
873  * Called by the boot-time MPU rate switching code.   XXX This is intended
874  * to be handled by the OPP layer code in the near future and should be
875  * removed from the clock code.  No return value.
876  */
877 void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
878 				      const char *core_ck_name,
879 				      const char *mpu_ck_name)
880 {
881 	struct clk *hfclkin_ck, *core_ck, *mpu_ck;
882 	unsigned long hfclkin_rate;
883 
884 	mpu_ck = clk_get(NULL, mpu_ck_name);
885 	if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
886 		return;
887 
888 	core_ck = clk_get(NULL, core_ck_name);
889 	if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
890 		return;
891 
892 	hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
893 	if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
894 		return;
895 
896 	hfclkin_rate = clk_get_rate(hfclkin_ck);
897 
898 	pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
899 		(hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
900 		(clk_get_rate(core_ck) / 1000000),
901 		(clk_get_rate(mpu_ck) / 1000000));
902 }
903 
904 #ifndef CONFIG_COMMON_CLK
905 /* Common data */
906 int clk_enable(struct clk *clk)
907 {
908 	unsigned long flags;
909 	int ret;
910 
911 	if (clk == NULL || IS_ERR(clk))
912 		return -EINVAL;
913 
914 	spin_lock_irqsave(&clockfw_lock, flags);
915 	ret = omap2_clk_enable(clk);
916 	spin_unlock_irqrestore(&clockfw_lock, flags);
917 
918 	return ret;
919 }
920 EXPORT_SYMBOL(clk_enable);
921 
922 void clk_disable(struct clk *clk)
923 {
924 	unsigned long flags;
925 
926 	if (clk == NULL || IS_ERR(clk))
927 		return;
928 
929 	spin_lock_irqsave(&clockfw_lock, flags);
930 	if (clk->usecount == 0) {
931 		pr_err("Trying disable clock %s with 0 usecount\n",
932 		       clk->name);
933 		WARN_ON(1);
934 		goto out;
935 	}
936 
937 	omap2_clk_disable(clk);
938 
939 out:
940 	spin_unlock_irqrestore(&clockfw_lock, flags);
941 }
942 EXPORT_SYMBOL(clk_disable);
943 
944 unsigned long clk_get_rate(struct clk *clk)
945 {
946 	unsigned long flags;
947 	unsigned long ret;
948 
949 	if (clk == NULL || IS_ERR(clk))
950 		return 0;
951 
952 	spin_lock_irqsave(&clockfw_lock, flags);
953 	ret = clk->rate;
954 	spin_unlock_irqrestore(&clockfw_lock, flags);
955 
956 	return ret;
957 }
958 EXPORT_SYMBOL(clk_get_rate);
959 
960 /*
961  * Optional clock functions defined in include/linux/clk.h
962  */
963 
964 long clk_round_rate(struct clk *clk, unsigned long rate)
965 {
966 	unsigned long flags;
967 	long ret;
968 
969 	if (clk == NULL || IS_ERR(clk))
970 		return 0;
971 
972 	spin_lock_irqsave(&clockfw_lock, flags);
973 	ret = omap2_clk_round_rate(clk, rate);
974 	spin_unlock_irqrestore(&clockfw_lock, flags);
975 
976 	return ret;
977 }
978 EXPORT_SYMBOL(clk_round_rate);
979 
980 int clk_set_rate(struct clk *clk, unsigned long rate)
981 {
982 	unsigned long flags;
983 	int ret = -EINVAL;
984 
985 	if (clk == NULL || IS_ERR(clk))
986 		return ret;
987 
988 	spin_lock_irqsave(&clockfw_lock, flags);
989 	ret = omap2_clk_set_rate(clk, rate);
990 	if (ret == 0)
991 		propagate_rate(clk);
992 	spin_unlock_irqrestore(&clockfw_lock, flags);
993 
994 	return ret;
995 }
996 EXPORT_SYMBOL(clk_set_rate);
997 
998 int clk_set_parent(struct clk *clk, struct clk *parent)
999 {
1000 	unsigned long flags;
1001 	int ret = -EINVAL;
1002 
1003 	if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
1004 		return ret;
1005 
1006 	spin_lock_irqsave(&clockfw_lock, flags);
1007 	if (clk->usecount == 0) {
1008 		ret = omap2_clk_set_parent(clk, parent);
1009 		if (ret == 0)
1010 			propagate_rate(clk);
1011 	} else {
1012 		ret = -EBUSY;
1013 	}
1014 	spin_unlock_irqrestore(&clockfw_lock, flags);
1015 
1016 	return ret;
1017 }
1018 EXPORT_SYMBOL(clk_set_parent);
1019 
1020 struct clk *clk_get_parent(struct clk *clk)
1021 {
1022 	return clk->parent;
1023 }
1024 EXPORT_SYMBOL(clk_get_parent);
1025 
1026 /*
1027  * OMAP specific clock functions shared between omap1 and omap2
1028  */
1029 
1030 int __initdata mpurate;
1031 
1032 /*
1033  * By default we use the rate set by the bootloader.
1034  * You can override this with mpurate= cmdline option.
1035  */
1036 static int __init omap_clk_setup(char *str)
1037 {
1038 	get_option(&str, &mpurate);
1039 
1040 	if (!mpurate)
1041 		return 1;
1042 
1043 	if (mpurate < 1000)
1044 		mpurate *= 1000000;
1045 
1046 	return 1;
1047 }
1048 __setup("mpurate=", omap_clk_setup);
1049 
1050 /* Used for clocks that always have same value as the parent clock */
1051 unsigned long followparent_recalc(struct clk *clk)
1052 {
1053 	return clk->parent->rate;
1054 }
1055 
1056 /*
1057  * Used for clocks that have the same value as the parent clock,
1058  * divided by some factor
1059  */
1060 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
1061 {
1062 	WARN_ON(!clk->fixed_div);
1063 
1064 	return clk->parent->rate / clk->fixed_div;
1065 }
1066 
1067 void clk_reparent(struct clk *child, struct clk *parent)
1068 {
1069 	list_del_init(&child->sibling);
1070 	if (parent)
1071 		list_add(&child->sibling, &parent->children);
1072 	child->parent = parent;
1073 
1074 	/* now do the debugfs renaming to reattach the child
1075 	   to the proper parent */
1076 }
1077 
1078 /* Propagate rate to children */
1079 void propagate_rate(struct clk *tclk)
1080 {
1081 	struct clk *clkp;
1082 
1083 	list_for_each_entry(clkp, &tclk->children, sibling) {
1084 		if (clkp->recalc)
1085 			clkp->rate = clkp->recalc(clkp);
1086 		propagate_rate(clkp);
1087 	}
1088 }
1089 
1090 static LIST_HEAD(root_clks);
1091 
1092 /**
1093  * recalculate_root_clocks - recalculate and propagate all root clocks
1094  *
1095  * Recalculates all root clocks (clocks with no parent), which if the
1096  * clock's .recalc is set correctly, should also propagate their rates.
1097  * Called at init.
1098  */
1099 void recalculate_root_clocks(void)
1100 {
1101 	struct clk *clkp;
1102 
1103 	list_for_each_entry(clkp, &root_clks, sibling) {
1104 		if (clkp->recalc)
1105 			clkp->rate = clkp->recalc(clkp);
1106 		propagate_rate(clkp);
1107 	}
1108 }
1109 
1110 /**
1111  * clk_preinit - initialize any fields in the struct clk before clk init
1112  * @clk: struct clk * to initialize
1113  *
1114  * Initialize any struct clk fields needed before normal clk initialization
1115  * can run.  No return value.
1116  */
1117 void clk_preinit(struct clk *clk)
1118 {
1119 	INIT_LIST_HEAD(&clk->children);
1120 }
1121 
1122 int clk_register(struct clk *clk)
1123 {
1124 	if (clk == NULL || IS_ERR(clk))
1125 		return -EINVAL;
1126 
1127 	/*
1128 	 * trap out already registered clocks
1129 	 */
1130 	if (clk->node.next || clk->node.prev)
1131 		return 0;
1132 
1133 	mutex_lock(&clocks_mutex);
1134 	if (clk->parent)
1135 		list_add(&clk->sibling, &clk->parent->children);
1136 	else
1137 		list_add(&clk->sibling, &root_clks);
1138 
1139 	list_add(&clk->node, &clocks);
1140 	if (clk->init)
1141 		clk->init(clk);
1142 	mutex_unlock(&clocks_mutex);
1143 
1144 	return 0;
1145 }
1146 EXPORT_SYMBOL(clk_register);
1147 
1148 void clk_unregister(struct clk *clk)
1149 {
1150 	if (clk == NULL || IS_ERR(clk))
1151 		return;
1152 
1153 	mutex_lock(&clocks_mutex);
1154 	list_del(&clk->sibling);
1155 	list_del(&clk->node);
1156 	mutex_unlock(&clocks_mutex);
1157 }
1158 EXPORT_SYMBOL(clk_unregister);
1159 
1160 void clk_enable_init_clocks(void)
1161 {
1162 	struct clk *clkp;
1163 
1164 	list_for_each_entry(clkp, &clocks, node)
1165 		if (clkp->flags & ENABLE_ON_INIT)
1166 			clk_enable(clkp);
1167 }
1168 
1169 /**
1170  * omap_clk_get_by_name - locate OMAP struct clk by its name
1171  * @name: name of the struct clk to locate
1172  *
1173  * Locate an OMAP struct clk by its name.  Assumes that struct clk
1174  * names are unique.  Returns NULL if not found or a pointer to the
1175  * struct clk if found.
1176  */
1177 struct clk *omap_clk_get_by_name(const char *name)
1178 {
1179 	struct clk *c;
1180 	struct clk *ret = NULL;
1181 
1182 	mutex_lock(&clocks_mutex);
1183 
1184 	list_for_each_entry(c, &clocks, node) {
1185 		if (!strcmp(c->name, name)) {
1186 			ret = c;
1187 			break;
1188 		}
1189 	}
1190 
1191 	mutex_unlock(&clocks_mutex);
1192 
1193 	return ret;
1194 }
1195 
1196 int omap_clk_enable_autoidle_all(void)
1197 {
1198 	struct clk *c;
1199 	unsigned long flags;
1200 
1201 	spin_lock_irqsave(&clockfw_lock, flags);
1202 
1203 	list_for_each_entry(c, &clocks, node)
1204 		if (c->ops->allow_idle)
1205 			c->ops->allow_idle(c);
1206 
1207 	spin_unlock_irqrestore(&clockfw_lock, flags);
1208 
1209 	return 0;
1210 }
1211 
1212 int omap_clk_disable_autoidle_all(void)
1213 {
1214 	struct clk *c;
1215 	unsigned long flags;
1216 
1217 	spin_lock_irqsave(&clockfw_lock, flags);
1218 
1219 	list_for_each_entry(c, &clocks, node)
1220 		if (c->ops->deny_idle)
1221 			c->ops->deny_idle(c);
1222 
1223 	spin_unlock_irqrestore(&clockfw_lock, flags);
1224 
1225 	return 0;
1226 }
1227 
1228 /*
1229  * Low level helpers
1230  */
1231 static int clkll_enable_null(struct clk *clk)
1232 {
1233 	return 0;
1234 }
1235 
1236 static void clkll_disable_null(struct clk *clk)
1237 {
1238 }
1239 
1240 const struct clkops clkops_null = {
1241 	.enable		= clkll_enable_null,
1242 	.disable	= clkll_disable_null,
1243 };
1244 
1245 /*
1246  * Dummy clock
1247  *
1248  * Used for clock aliases that are needed on some OMAPs, but not others
1249  */
1250 struct clk dummy_ck = {
1251 	.name	= "dummy",
1252 	.ops	= &clkops_null,
1253 };
1254 
1255 /*
1256  *
1257  */
1258 
1259 #ifdef CONFIG_OMAP_RESET_CLOCKS
1260 /*
1261  * Disable any unused clocks left on by the bootloader
1262  */
1263 static int __init clk_disable_unused(void)
1264 {
1265 	struct clk *ck;
1266 	unsigned long flags;
1267 
1268 	pr_info("clock: disabling unused clocks to save power\n");
1269 
1270 	spin_lock_irqsave(&clockfw_lock, flags);
1271 	list_for_each_entry(ck, &clocks, node) {
1272 		if (ck->ops == &clkops_null)
1273 			continue;
1274 
1275 		if (ck->usecount > 0 || !ck->enable_reg)
1276 			continue;
1277 
1278 		omap2_clk_disable_unused(ck);
1279 	}
1280 	spin_unlock_irqrestore(&clockfw_lock, flags);
1281 
1282 	return 0;
1283 }
1284 late_initcall(clk_disable_unused);
1285 late_initcall(omap_clk_enable_autoidle_all);
1286 #endif
1287 
1288 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
1289 /*
1290  *	debugfs support to trace clock tree hierarchy and attributes
1291  */
1292 
1293 #include <linux/debugfs.h>
1294 #include <linux/seq_file.h>
1295 
1296 static struct dentry *clk_debugfs_root;
1297 
1298 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
1299 {
1300 	struct clk *c;
1301 	struct clk *pa;
1302 
1303 	mutex_lock(&clocks_mutex);
1304 	seq_printf(s, "%-30s %-30s %-10s %s\n",
1305 		   "clock-name", "parent-name", "rate", "use-count");
1306 
1307 	list_for_each_entry(c, &clocks, node) {
1308 		pa = c->parent;
1309 		seq_printf(s, "%-30s %-30s %-10lu %d\n",
1310 			   c->name, pa ? pa->name : "none", c->rate,
1311 			   c->usecount);
1312 	}
1313 	mutex_unlock(&clocks_mutex);
1314 
1315 	return 0;
1316 }
1317 
1318 static int clk_dbg_open(struct inode *inode, struct file *file)
1319 {
1320 	return single_open(file, clk_dbg_show_summary, inode->i_private);
1321 }
1322 
1323 static const struct file_operations debug_clock_fops = {
1324 	.open           = clk_dbg_open,
1325 	.read           = seq_read,
1326 	.llseek         = seq_lseek,
1327 	.release        = single_release,
1328 };
1329 
1330 static int clk_debugfs_register_one(struct clk *c)
1331 {
1332 	int err;
1333 	struct dentry *d;
1334 	struct clk *pa = c->parent;
1335 
1336 	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1337 	if (!d)
1338 		return -ENOMEM;
1339 	c->dent = d;
1340 
1341 	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1342 	if (!d) {
1343 		err = -ENOMEM;
1344 		goto err_out;
1345 	}
1346 	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1347 	if (!d) {
1348 		err = -ENOMEM;
1349 		goto err_out;
1350 	}
1351 	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1352 	if (!d) {
1353 		err = -ENOMEM;
1354 		goto err_out;
1355 	}
1356 	return 0;
1357 
1358 err_out:
1359 	debugfs_remove_recursive(c->dent);
1360 	return err;
1361 }
1362 
1363 static int clk_debugfs_register(struct clk *c)
1364 {
1365 	int err;
1366 	struct clk *pa = c->parent;
1367 
1368 	if (pa && !pa->dent) {
1369 		err = clk_debugfs_register(pa);
1370 		if (err)
1371 			return err;
1372 	}
1373 
1374 	if (!c->dent) {
1375 		err = clk_debugfs_register_one(c);
1376 		if (err)
1377 			return err;
1378 	}
1379 	return 0;
1380 }
1381 
1382 static int __init clk_debugfs_init(void)
1383 {
1384 	struct clk *c;
1385 	struct dentry *d;
1386 	int err;
1387 
1388 	d = debugfs_create_dir("clock", NULL);
1389 	if (!d)
1390 		return -ENOMEM;
1391 	clk_debugfs_root = d;
1392 
1393 	list_for_each_entry(c, &clocks, node) {
1394 		err = clk_debugfs_register(c);
1395 		if (err)
1396 			goto err_out;
1397 	}
1398 
1399 	d = debugfs_create_file("summary", S_IRUGO,
1400 		d, NULL, &debug_clock_fops);
1401 	if (!d)
1402 		return -ENOMEM;
1403 
1404 	return 0;
1405 err_out:
1406 	debugfs_remove_recursive(clk_debugfs_root);
1407 	return err;
1408 }
1409 late_initcall(clk_debugfs_init);
1410 
1411 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1412 #endif /* CONFIG_COMMON_CLK */
1413