xref: /openbmc/linux/arch/arm/mach-omap2/clock.c (revision 32cc0021)
1 /*
2  *  linux/arch/arm/mach-omap2/clock.c
3  *
4  *  Copyright (C) 2005-2008 Texas Instruments, Inc.
5  *  Copyright (C) 2004-2010 Nokia Corporation
6  *
7  *  Contacts:
8  *  Richard Woodruff <r-woodruff2@ti.com>
9  *  Paul Walmsley
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #undef DEBUG
16 
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/list.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #ifdef CONFIG_COMMON_CLK
24 #include <linux/clk-provider.h>
25 #else
26 #include <linux/clk.h>
27 #endif
28 #include <linux/io.h>
29 #include <linux/bitops.h>
30 
31 #include <asm/cpu.h>
32 
33 
34 #include <trace/events/power.h>
35 
36 #include "soc.h"
37 #include "clockdomain.h"
38 #include "clock.h"
39 #include "cm.h"
40 #include "cm2xxx.h"
41 #include "cm3xxx.h"
42 #include "cm-regbits-24xx.h"
43 #include "cm-regbits-34xx.h"
44 #include "common.h"
45 
46 /*
47  * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
48  * for a module to indicate that it is no longer in idle
49  */
50 #define MAX_MODULE_ENABLE_WAIT		100000
51 
52 u16 cpu_mask;
53 
54 /*
55  * clkdm_control: if true, then when a clock is enabled in the
56  * hardware, its clockdomain will first be enabled; and when a clock
57  * is disabled in the hardware, its clockdomain will be disabled
58  * afterwards.
59  */
60 static bool clkdm_control = true;
61 
62 static LIST_HEAD(clocks);
63 static DEFINE_MUTEX(clocks_mutex);
64 #ifndef CONFIG_COMMON_CLK
65 static DEFINE_SPINLOCK(clockfw_lock);
66 #endif
67 
68 #ifdef CONFIG_COMMON_CLK
69 
70 /*
71  * Used for clocks that have the same value as the parent clock,
72  * divided by some factor
73  */
74 unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
75 		unsigned long parent_rate)
76 {
77 	struct clk_hw_omap *oclk;
78 
79 	if (!hw) {
80 		pr_warn("%s: hw is NULL\n", __func__);
81 		return -EINVAL;
82 	}
83 
84 	oclk = to_clk_hw_omap(hw);
85 
86 	WARN_ON(!oclk->fixed_div);
87 
88 	return parent_rate / oclk->fixed_div;
89 }
90 #endif
91 
92 /*
93  * OMAP2+ specific clock functions
94  */
95 
96 /* Private functions */
97 
98 
99 /**
100  * _wait_idlest_generic - wait for a module to leave the idle state
101  * @reg: virtual address of module IDLEST register
102  * @mask: value to mask against to determine if the module is active
103  * @idlest: idle state indicator (0 or 1) for the clock
104  * @name: name of the clock (for printk)
105  *
106  * Wait for a module to leave idle, where its idle-status register is
107  * not inside the CM module.  Returns 1 if the module left idle
108  * promptly, or 0 if the module did not leave idle before the timeout
109  * elapsed.  XXX Deprecated - should be moved into drivers for the
110  * individual IP block that the IDLEST register exists in.
111  */
112 static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
113 				const char *name)
114 {
115 	int i = 0, ena = 0;
116 
117 	ena = (idlest) ? 0 : mask;
118 
119 	omap_test_timeout(((__raw_readl(reg) & mask) == ena),
120 			  MAX_MODULE_ENABLE_WAIT, i);
121 
122 	if (i < MAX_MODULE_ENABLE_WAIT)
123 		pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
124 			 name, i);
125 	else
126 		pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
127 		       name, MAX_MODULE_ENABLE_WAIT);
128 
129 	return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
130 };
131 
132 /**
133  * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
134  * @clk: struct clk * belonging to the module
135  *
136  * If the necessary clocks for the OMAP hardware IP block that
137  * corresponds to clock @clk are enabled, then wait for the module to
138  * indicate readiness (i.e., to leave IDLE).  This code does not
139  * belong in the clock code and will be moved in the medium term to
140  * module-dependent code.  No return value.
141  */
142 #ifdef CONFIG_COMMON_CLK
143 static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
144 #else
145 static void _omap2_module_wait_ready(struct clk *clk)
146 #endif
147 {
148 	void __iomem *companion_reg, *idlest_reg;
149 	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
150 	s16 prcm_mod;
151 	int r;
152 
153 	/* Not all modules have multiple clocks that their IDLEST depends on */
154 	if (clk->ops->find_companion) {
155 		clk->ops->find_companion(clk, &companion_reg, &other_bit);
156 		if (!(__raw_readl(companion_reg) & (1 << other_bit)))
157 			return;
158 	}
159 
160 	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
161 	r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
162 	if (r) {
163 		/* IDLEST register not in the CM module */
164 		_wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
165 #ifdef CONFIG_COMMON_CLK
166 				     __clk_get_name(clk->hw.clk));
167 #else
168 				     clk->name);
169 #endif
170 	} else {
171 		cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
172 	};
173 }
174 
175 /* Public functions */
176 
177 /**
178  * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
179  * @clk: OMAP clock struct ptr to use
180  *
181  * Convert a clockdomain name stored in a struct clk 'clk' into a
182  * clockdomain pointer, and save it into the struct clk.  Intended to be
183  * called during clk_register().  No return value.
184  */
185 #ifdef CONFIG_COMMON_CLK
186 void omap2_init_clk_clkdm(struct clk_hw *hw)
187 {
188 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
189 #else
190 void omap2_init_clk_clkdm(struct clk *clk)
191 {
192 #endif
193 	struct clockdomain *clkdm;
194 	const char *clk_name;
195 
196 	if (!clk->clkdm_name)
197 		return;
198 
199 #ifdef CONFIG_COMMON_CLK
200 	clk_name = __clk_get_name(hw->clk);
201 #else
202 	clk_name = __clk_get_name(clk);
203 #endif
204 
205 	clkdm = clkdm_lookup(clk->clkdm_name);
206 	if (clkdm) {
207 		pr_debug("clock: associated clk %s to clkdm %s\n",
208 			 clk_name, clk->clkdm_name);
209 		clk->clkdm = clkdm;
210 	} else {
211 		pr_debug("clock: could not associate clk %s to clkdm %s\n",
212 			 clk_name, clk->clkdm_name);
213 	}
214 }
215 
216 /**
217  * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
218  *
219  * Prevent the OMAP clock code from calling into the clockdomain code
220  * when a hardware clock in that clockdomain is enabled or disabled.
221  * Intended to be called at init time from omap*_clk_init().  No
222  * return value.
223  */
224 void __init omap2_clk_disable_clkdm_control(void)
225 {
226 	clkdm_control = false;
227 }
228 
229 /**
230  * omap2_clk_dflt_find_companion - find companion clock to @clk
231  * @clk: struct clk * to find the companion clock of
232  * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
233  * @other_bit: u8 ** to return the companion clock bit shift in
234  *
235  * Note: We don't need special code here for INVERT_ENABLE for the
236  * time being since INVERT_ENABLE only applies to clocks enabled by
237  * CM_CLKEN_PLL
238  *
239  * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes it's
240  * just a matter of XORing the bits.
241  *
242  * Some clocks don't have companion clocks.  For example, modules with
243  * only an interface clock (such as MAILBOXES) don't have a companion
244  * clock.  Right now, this code relies on the hardware exporting a bit
245  * in the correct companion register that indicates that the
246  * nonexistent 'companion clock' is active.  Future patches will
247  * associate this type of code with per-module data structures to
248  * avoid this issue, and remove the casts.  No return value.
249  */
250 #ifdef CONFIG_COMMON_CLK
251 void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
252 #else
253 void omap2_clk_dflt_find_companion(struct clk *clk,
254 #endif
255 			void __iomem **other_reg, u8 *other_bit)
256 {
257 	u32 r;
258 
259 	/*
260 	 * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes
261 	 * it's just a matter of XORing the bits.
262 	 */
263 	r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
264 
265 	*other_reg = (__force void __iomem *)r;
266 	*other_bit = clk->enable_bit;
267 }
268 
269 /**
270  * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
271  * @clk: struct clk * to find IDLEST info for
272  * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
273  * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
274  * @idlest_val: u8 * to return the idle status indicator
275  *
276  * Return the CM_IDLEST register address and bit shift corresponding
277  * to the module that "owns" this clock.  This default code assumes
278  * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
279  * the IDLEST register address ID corresponds to the CM_*CLKEN
280  * register address ID (e.g., that CM_FCLKEN2 corresponds to
281  * CM_IDLEST2).  This is not true for all modules.  No return value.
282  */
283 #ifdef CONFIG_COMMON_CLK
284 void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
285 #else
286 void omap2_clk_dflt_find_idlest(struct clk *clk,
287 #endif
288 		void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val)
289 {
290 	u32 r;
291 
292 	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
293 	*idlest_reg = (__force void __iomem *)r;
294 	*idlest_bit = clk->enable_bit;
295 
296 	/*
297 	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
298 	 * 34xx reverses this, just to keep us on our toes
299 	 * AM35xx uses both, depending on the module.
300 	 */
301 	if (cpu_is_omap24xx())
302 		*idlest_val = OMAP24XX_CM_IDLEST_VAL;
303 	else if (cpu_is_omap34xx())
304 		*idlest_val = OMAP34XX_CM_IDLEST_VAL;
305 	else
306 		BUG();
307 
308 }
309 
310 #ifdef CONFIG_COMMON_CLK
311 /**
312  * omap2_dflt_clk_enable - enable a clock in the hardware
313  * @hw: struct clk_hw * of the clock to enable
314  *
315  * Enable the clock @hw in the hardware.  We first call into the OMAP
316  * clockdomain code to "enable" the corresponding clockdomain if this
317  * is the first enabled user of the clockdomain.  Then program the
318  * hardware to enable the clock.  Then wait for the IP block that uses
319  * this clock to leave idle (if applicable).  Returns the error value
320  * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
321  * if @hw has a null clock enable_reg, or zero upon success.
322  */
323 int omap2_dflt_clk_enable(struct clk_hw *hw)
324 {
325 	struct clk_hw_omap *clk;
326 	u32 v;
327 	int ret = 0;
328 
329 	clk = to_clk_hw_omap(hw);
330 
331 	if (clkdm_control && clk->clkdm) {
332 		ret = clkdm_clk_enable(clk->clkdm, hw->clk);
333 		if (ret) {
334 			WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
335 			     __func__, __clk_get_name(hw->clk),
336 			     clk->clkdm->name, ret);
337 			return ret;
338 		}
339 	}
340 
341 	if (unlikely(clk->enable_reg == NULL)) {
342 		pr_err("%s: %s missing enable_reg\n", __func__,
343 		       __clk_get_name(hw->clk));
344 		ret = -EINVAL;
345 		goto err;
346 	}
347 
348 	/* FIXME should not have INVERT_ENABLE bit here */
349 	v = __raw_readl(clk->enable_reg);
350 	if (clk->flags & INVERT_ENABLE)
351 		v &= ~(1 << clk->enable_bit);
352 	else
353 		v |= (1 << clk->enable_bit);
354 	__raw_writel(v, clk->enable_reg);
355 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
356 
357 	if (clk->ops && clk->ops->find_idlest)
358 		_omap2_module_wait_ready(clk);
359 
360 	return 0;
361 
362 err:
363 	if (clkdm_control && clk->clkdm)
364 		clkdm_clk_disable(clk->clkdm, hw->clk);
365 	return ret;
366 }
367 
368 /**
369  * omap2_dflt_clk_disable - disable a clock in the hardware
370  * @hw: struct clk_hw * of the clock to disable
371  *
372  * Disable the clock @hw in the hardware, and call into the OMAP
373  * clockdomain code to "disable" the corresponding clockdomain if all
374  * clocks/hwmods in that clockdomain are now disabled.  No return
375  * value.
376  */
377 void omap2_dflt_clk_disable(struct clk_hw *hw)
378 {
379 	struct clk_hw_omap *clk;
380 	u32 v;
381 
382 	clk = to_clk_hw_omap(hw);
383 	if (!clk->enable_reg) {
384 		/*
385 		 * 'independent' here refers to a clock which is not
386 		 * controlled by its parent.
387 		 */
388 		pr_err("%s: independent clock %s has no enable_reg\n",
389 		       __func__, __clk_get_name(hw->clk));
390 		return;
391 	}
392 
393 	v = __raw_readl(clk->enable_reg);
394 	if (clk->flags & INVERT_ENABLE)
395 		v |= (1 << clk->enable_bit);
396 	else
397 		v &= ~(1 << clk->enable_bit);
398 	__raw_writel(v, clk->enable_reg);
399 	/* No OCP barrier needed here since it is a disable operation */
400 
401 	if (clkdm_control && clk->clkdm)
402 		clkdm_clk_disable(clk->clkdm, hw->clk);
403 }
404 
405 /**
406  * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
407  * @hw: struct clk_hw * of the clock being enabled
408  *
409  * Increment the usecount of the clockdomain of the clock pointed to
410  * by @hw; if the usecount is 1, the clockdomain will be "enabled."
411  * Only needed for clocks that don't use omap2_dflt_clk_enable() as
412  * their enable function pointer.  Passes along the return value of
413  * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
414  * clockdomain, or 0 if clock framework-based clockdomain control is
415  * not implemented.
416  */
417 int omap2_clkops_enable_clkdm(struct clk_hw *hw)
418 {
419 	struct clk_hw_omap *clk;
420 	int ret = 0;
421 
422 	clk = to_clk_hw_omap(hw);
423 
424 	if (unlikely(!clk->clkdm)) {
425 		pr_err("%s: %s: no clkdm set ?!\n", __func__,
426 		       __clk_get_name(hw->clk));
427 		return -EINVAL;
428 	}
429 
430 	if (unlikely(clk->enable_reg))
431 		pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
432 		       __clk_get_name(hw->clk));
433 
434 	if (!clkdm_control) {
435 		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
436 		       __func__, __clk_get_name(hw->clk));
437 		return 0;
438 	}
439 
440 	ret = clkdm_clk_enable(clk->clkdm, hw->clk);
441 	WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
442 	     __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);
443 
444 	return ret;
445 }
446 
447 /**
448  * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
449  * @hw: struct clk_hw * of the clock being disabled
450  *
451  * Decrement the usecount of the clockdomain of the clock pointed to
452  * by @hw; if the usecount is 0, the clockdomain will be "disabled."
453  * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
454  * disable function pointer.  No return value.
455  */
456 void omap2_clkops_disable_clkdm(struct clk_hw *hw)
457 {
458 	struct clk_hw_omap *clk;
459 
460 	clk = to_clk_hw_omap(hw);
461 
462 	if (unlikely(!clk->clkdm)) {
463 		pr_err("%s: %s: no clkdm set ?!\n", __func__,
464 		       __clk_get_name(hw->clk));
465 		return;
466 	}
467 
468 	if (unlikely(clk->enable_reg))
469 		pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
470 		       __clk_get_name(hw->clk));
471 
472 	if (!clkdm_control) {
473 		pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
474 		       __func__, __clk_get_name(hw->clk));
475 		return;
476 	}
477 
478 	clkdm_clk_disable(clk->clkdm, hw->clk);
479 }
480 
481 /**
482  * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
483  * @hw: struct clk_hw * to check
484  *
485  * Return 1 if the clock represented by @hw is enabled in the
486  * hardware, or 0 otherwise.  Intended for use in the struct
487  * clk_ops.is_enabled function pointer.
488  */
489 int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
490 {
491 	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
492 	u32 v;
493 
494 	v = __raw_readl(clk->enable_reg);
495 
496 	if (clk->flags & INVERT_ENABLE)
497 		v ^= BIT(clk->enable_bit);
498 
499 	v &= BIT(clk->enable_bit);
500 
501 	return v ? 1 : 0;
502 }
503 
504 static int __initdata mpurate;
505 
506 /*
507  * By default we use the rate set by the bootloader.
508  * You can override this with mpurate= cmdline option.
509  */
510 static int __init omap_clk_setup(char *str)
511 {
512 	get_option(&str, &mpurate);
513 
514 	if (!mpurate)
515 		return 1;
516 
517 	if (mpurate < 1000)
518 		mpurate *= 1000000;
519 
520 	return 1;
521 }
522 __setup("mpurate=", omap_clk_setup);
523 
524 const struct clk_hw_omap_ops clkhwops_wait = {
525 	.find_idlest	= omap2_clk_dflt_find_idlest,
526 	.find_companion	= omap2_clk_dflt_find_companion,
527 };
528 #else
529 int omap2_dflt_clk_enable(struct clk *clk)
530 {
531 	u32 v;
532 
533 	if (unlikely(clk->enable_reg == NULL)) {
534 		pr_err("clock.c: Enable for %s without enable code\n",
535 		       clk->name);
536 		return 0; /* REVISIT: -EINVAL */
537 	}
538 
539 	v = __raw_readl(clk->enable_reg);
540 	if (clk->flags & INVERT_ENABLE)
541 		v &= ~(1 << clk->enable_bit);
542 	else
543 		v |= (1 << clk->enable_bit);
544 	__raw_writel(v, clk->enable_reg);
545 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
546 
547 	if (clk->ops->find_idlest)
548 		_omap2_module_wait_ready(clk);
549 
550 	return 0;
551 }
552 
553 void omap2_dflt_clk_disable(struct clk *clk)
554 {
555 	u32 v;
556 
557 	if (!clk->enable_reg) {
558 		/*
559 		 * 'Independent' here refers to a clock which is not
560 		 * controlled by its parent.
561 		 */
562 		pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
563 		return;
564 	}
565 
566 	v = __raw_readl(clk->enable_reg);
567 	if (clk->flags & INVERT_ENABLE)
568 		v |= (1 << clk->enable_bit);
569 	else
570 		v &= ~(1 << clk->enable_bit);
571 	__raw_writel(v, clk->enable_reg);
572 	/* No OCP barrier needed here since it is a disable operation */
573 }
574 
575 const struct clkops clkops_omap2_dflt_wait = {
576 	.enable		= omap2_dflt_clk_enable,
577 	.disable	= omap2_dflt_clk_disable,
578 	.find_companion	= omap2_clk_dflt_find_companion,
579 	.find_idlest	= omap2_clk_dflt_find_idlest,
580 };
581 
582 const struct clkops clkops_omap2_dflt = {
583 	.enable		= omap2_dflt_clk_enable,
584 	.disable	= omap2_dflt_clk_disable,
585 };
586 
587 /**
588  * omap2_clk_disable - disable a clock, if the system is not using it
589  * @clk: struct clk * to disable
590  *
591  * Decrements the usecount on struct clk @clk.  If there are no users
592  * left, call the clkops-specific clock disable function to disable it
593  * in hardware.  If the clock is part of a clockdomain (which they all
594  * should be), request that the clockdomain be disabled.  (It too has
595  * a usecount, and so will not be disabled in the hardware until it no
596  * longer has any users.)  If the clock has a parent clock (most of
597  * them do), then call ourselves, recursing on the parent clock.  This
598  * can cause an entire branch of the clock tree to be powered off by
599  * simply disabling one clock.  Intended to be called with the clockfw_lock
600  * spinlock held.  No return value.
601  */
602 void omap2_clk_disable(struct clk *clk)
603 {
604 	if (clk->usecount == 0) {
605 		WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
606 		return;
607 	}
608 
609 	pr_debug("clock: %s: decrementing usecount\n", clk->name);
610 
611 	clk->usecount--;
612 
613 	if (clk->usecount > 0)
614 		return;
615 
616 	pr_debug("clock: %s: disabling in hardware\n", clk->name);
617 
618 	if (clk->ops && clk->ops->disable) {
619 		trace_clock_disable(clk->name, 0, smp_processor_id());
620 		clk->ops->disable(clk);
621 	}
622 
623 	if (clkdm_control && clk->clkdm)
624 		clkdm_clk_disable(clk->clkdm, clk);
625 
626 	if (clk->parent)
627 		omap2_clk_disable(clk->parent);
628 }
629 
630 /**
631  * omap2_clk_enable - request that the system enable a clock
632  * @clk: struct clk * to enable
633  *
634  * Increments the usecount on struct clk @clk.  If there were no users
635  * previously, then recurse up the clock tree, enabling all of the
636  * clock's parents and all of the parent clockdomains, and finally,
637  * enabling @clk's clockdomain, and @clk itself.  Intended to be
638  * called with the clockfw_lock spinlock held.  Returns 0 upon success
639  * or a negative error code upon failure.
640  */
641 int omap2_clk_enable(struct clk *clk)
642 {
643 	int ret;
644 
645 	pr_debug("clock: %s: incrementing usecount\n", clk->name);
646 
647 	clk->usecount++;
648 
649 	if (clk->usecount > 1)
650 		return 0;
651 
652 	pr_debug("clock: %s: enabling in hardware\n", clk->name);
653 
654 	if (clk->parent) {
655 		ret = omap2_clk_enable(clk->parent);
656 		if (ret) {
657 			WARN(1, "clock: %s: could not enable parent %s: %d\n",
658 			     clk->name, clk->parent->name, ret);
659 			goto oce_err1;
660 		}
661 	}
662 
663 	if (clkdm_control && clk->clkdm) {
664 		ret = clkdm_clk_enable(clk->clkdm, clk);
665 		if (ret) {
666 			WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
667 			     clk->name, clk->clkdm->name, ret);
668 			goto oce_err2;
669 		}
670 	}
671 
672 	if (clk->ops && clk->ops->enable) {
673 		trace_clock_enable(clk->name, 1, smp_processor_id());
674 		ret = clk->ops->enable(clk);
675 		if (ret) {
676 			WARN(1, "clock: %s: could not enable: %d\n",
677 			     clk->name, ret);
678 			goto oce_err3;
679 		}
680 	}
681 
682 	return 0;
683 
684 oce_err3:
685 	if (clkdm_control && clk->clkdm)
686 		clkdm_clk_disable(clk->clkdm, clk);
687 oce_err2:
688 	if (clk->parent)
689 		omap2_clk_disable(clk->parent);
690 oce_err1:
691 	clk->usecount--;
692 
693 	return ret;
694 }
695 
696 /* Given a clock and a rate apply a clock specific rounding function */
697 long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
698 {
699 	if (clk->round_rate)
700 		return clk->round_rate(clk, rate);
701 
702 	return clk->rate;
703 }
704 
705 /* Set the clock rate for a clock source */
706 int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
707 {
708 	int ret = -EINVAL;
709 
710 	pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
711 
712 	/* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
713 	if (clk->set_rate) {
714 		trace_clock_set_rate(clk->name, rate, smp_processor_id());
715 		ret = clk->set_rate(clk, rate);
716 	}
717 
718 	return ret;
719 }
720 
721 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
722 {
723 	if (!clk->clksel)
724 		return -EINVAL;
725 
726 	if (clk->parent == new_parent)
727 		return 0;
728 
729 	return omap2_clksel_set_parent(clk, new_parent);
730 }
731 
732 /*
733  * OMAP2+ clock reset and init functions
734  */
735 
736 #ifdef CONFIG_OMAP_RESET_CLOCKS
737 void omap2_clk_disable_unused(struct clk *clk)
738 {
739 	u32 regval32, v;
740 
741 	v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
742 
743 	regval32 = __raw_readl(clk->enable_reg);
744 	if ((regval32 & (1 << clk->enable_bit)) == v)
745 		return;
746 
747 	pr_debug("Disabling unused clock \"%s\"\n", clk->name);
748 	if (cpu_is_omap34xx()) {
749 		omap2_clk_enable(clk);
750 		omap2_clk_disable(clk);
751 	} else {
752 		clk->ops->disable(clk);
753 	}
754 	if (clk->clkdm != NULL)
755 		pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
756 }
757 #endif
758 
759 #endif /* CONFIG_COMMON_CLK */
760 
761 /**
762  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
763  * @mpurate_ck_name: clk name of the clock to change rate
764  *
765  * Change the ARM MPU clock rate to the rate specified on the command
766  * line, if one was specified.  @mpurate_ck_name should be
767  * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
768  * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
769  * handled by the virt_prcm_set clock, but this should be handled by
770  * the OPP layer.  XXX This is intended to be handled by the OPP layer
771  * code in the near future and should be removed from the clock code.
772  * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
773  * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
774  * cannot be found, or 0 upon success.
775  */
776 int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
777 {
778 	struct clk *mpurate_ck;
779 	int r;
780 
781 	if (!mpurate)
782 		return -EINVAL;
783 
784 	mpurate_ck = clk_get(NULL, mpurate_ck_name);
785 	if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
786 		return -ENOENT;
787 
788 	r = clk_set_rate(mpurate_ck, mpurate);
789 	if (IS_ERR_VALUE(r)) {
790 		WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
791 		     mpurate_ck_name, mpurate, r);
792 		clk_put(mpurate_ck);
793 		return -EINVAL;
794 	}
795 
796 	calibrate_delay();
797 #ifndef CONFIG_COMMON_CLK
798 	recalculate_root_clocks();
799 #endif
800 
801 	clk_put(mpurate_ck);
802 
803 	return 0;
804 }
805 
806 /**
807  * omap2_clk_print_new_rates - print summary of current clock tree rates
808  * @hfclkin_ck_name: clk name for the off-chip HF oscillator
809  * @core_ck_name: clk name for the on-chip CORE_CLK
810  * @mpu_ck_name: clk name for the ARM MPU clock
811  *
812  * Prints a short message to the console with the HFCLKIN oscillator
813  * rate, the rate of the CORE clock, and the rate of the ARM MPU clock.
814  * Called by the boot-time MPU rate switching code.   XXX This is intended
815  * to be handled by the OPP layer code in the near future and should be
816  * removed from the clock code.  No return value.
817  */
818 void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
819 				      const char *core_ck_name,
820 				      const char *mpu_ck_name)
821 {
822 	struct clk *hfclkin_ck, *core_ck, *mpu_ck;
823 	unsigned long hfclkin_rate;
824 
825 	mpu_ck = clk_get(NULL, mpu_ck_name);
826 	if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
827 		return;
828 
829 	core_ck = clk_get(NULL, core_ck_name);
830 	if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
831 		return;
832 
833 	hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
834 	if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
835 		return;
836 
837 	hfclkin_rate = clk_get_rate(hfclkin_ck);
838 
839 	pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
840 		(hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
841 		(clk_get_rate(core_ck) / 1000000),
842 		(clk_get_rate(mpu_ck) / 1000000));
843 }
844 
845 #ifndef CONFIG_COMMON_CLK
846 /* Common data */
847 int clk_enable(struct clk *clk)
848 {
849 	unsigned long flags;
850 	int ret;
851 
852 	if (clk == NULL || IS_ERR(clk))
853 		return -EINVAL;
854 
855 	spin_lock_irqsave(&clockfw_lock, flags);
856 	ret = omap2_clk_enable(clk);
857 	spin_unlock_irqrestore(&clockfw_lock, flags);
858 
859 	return ret;
860 }
861 EXPORT_SYMBOL(clk_enable);
862 
863 void clk_disable(struct clk *clk)
864 {
865 	unsigned long flags;
866 
867 	if (clk == NULL || IS_ERR(clk))
868 		return;
869 
870 	spin_lock_irqsave(&clockfw_lock, flags);
871 	if (clk->usecount == 0) {
872 		pr_err("Trying disable clock %s with 0 usecount\n",
873 		       clk->name);
874 		WARN_ON(1);
875 		goto out;
876 	}
877 
878 	omap2_clk_disable(clk);
879 
880 out:
881 	spin_unlock_irqrestore(&clockfw_lock, flags);
882 }
883 EXPORT_SYMBOL(clk_disable);
884 
885 unsigned long clk_get_rate(struct clk *clk)
886 {
887 	unsigned long flags;
888 	unsigned long ret;
889 
890 	if (clk == NULL || IS_ERR(clk))
891 		return 0;
892 
893 	spin_lock_irqsave(&clockfw_lock, flags);
894 	ret = clk->rate;
895 	spin_unlock_irqrestore(&clockfw_lock, flags);
896 
897 	return ret;
898 }
899 EXPORT_SYMBOL(clk_get_rate);
900 
901 /*
902  * Optional clock functions defined in include/linux/clk.h
903  */
904 
905 long clk_round_rate(struct clk *clk, unsigned long rate)
906 {
907 	unsigned long flags;
908 	long ret;
909 
910 	if (clk == NULL || IS_ERR(clk))
911 		return 0;
912 
913 	spin_lock_irqsave(&clockfw_lock, flags);
914 	ret = omap2_clk_round_rate(clk, rate);
915 	spin_unlock_irqrestore(&clockfw_lock, flags);
916 
917 	return ret;
918 }
919 EXPORT_SYMBOL(clk_round_rate);
920 
921 int clk_set_rate(struct clk *clk, unsigned long rate)
922 {
923 	unsigned long flags;
924 	int ret = -EINVAL;
925 
926 	if (clk == NULL || IS_ERR(clk))
927 		return ret;
928 
929 	spin_lock_irqsave(&clockfw_lock, flags);
930 	ret = omap2_clk_set_rate(clk, rate);
931 	if (ret == 0)
932 		propagate_rate(clk);
933 	spin_unlock_irqrestore(&clockfw_lock, flags);
934 
935 	return ret;
936 }
937 EXPORT_SYMBOL(clk_set_rate);
938 
939 int clk_set_parent(struct clk *clk, struct clk *parent)
940 {
941 	unsigned long flags;
942 	int ret = -EINVAL;
943 
944 	if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
945 		return ret;
946 
947 	spin_lock_irqsave(&clockfw_lock, flags);
948 	if (clk->usecount == 0) {
949 		ret = omap2_clk_set_parent(clk, parent);
950 		if (ret == 0)
951 			propagate_rate(clk);
952 	} else {
953 		ret = -EBUSY;
954 	}
955 	spin_unlock_irqrestore(&clockfw_lock, flags);
956 
957 	return ret;
958 }
959 EXPORT_SYMBOL(clk_set_parent);
960 
961 struct clk *clk_get_parent(struct clk *clk)
962 {
963 	return clk->parent;
964 }
965 EXPORT_SYMBOL(clk_get_parent);
966 
967 /*
968  * OMAP specific clock functions shared between omap1 and omap2
969  */
970 
971 int __initdata mpurate;
972 
973 /*
974  * By default we use the rate set by the bootloader.
975  * You can override this with mpurate= cmdline option.
976  */
977 static int __init omap_clk_setup(char *str)
978 {
979 	get_option(&str, &mpurate);
980 
981 	if (!mpurate)
982 		return 1;
983 
984 	if (mpurate < 1000)
985 		mpurate *= 1000000;
986 
987 	return 1;
988 }
989 __setup("mpurate=", omap_clk_setup);
990 
991 /* Used for clocks that always have same value as the parent clock */
992 unsigned long followparent_recalc(struct clk *clk)
993 {
994 	return clk->parent->rate;
995 }
996 
997 /*
998  * Used for clocks that have the same value as the parent clock,
999  * divided by some factor
1000  */
1001 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
1002 {
1003 	WARN_ON(!clk->fixed_div);
1004 
1005 	return clk->parent->rate / clk->fixed_div;
1006 }
1007 
1008 void clk_reparent(struct clk *child, struct clk *parent)
1009 {
1010 	list_del_init(&child->sibling);
1011 	if (parent)
1012 		list_add(&child->sibling, &parent->children);
1013 	child->parent = parent;
1014 
1015 	/* now do the debugfs renaming to reattach the child
1016 	   to the proper parent */
1017 }
1018 
1019 /* Propagate rate to children */
1020 void propagate_rate(struct clk *tclk)
1021 {
1022 	struct clk *clkp;
1023 
1024 	list_for_each_entry(clkp, &tclk->children, sibling) {
1025 		if (clkp->recalc)
1026 			clkp->rate = clkp->recalc(clkp);
1027 		propagate_rate(clkp);
1028 	}
1029 }
1030 
1031 static LIST_HEAD(root_clks);
1032 
1033 /**
1034  * recalculate_root_clocks - recalculate and propagate all root clocks
1035  *
1036  * Recalculates all root clocks (clocks with no parent), which if the
1037  * clock's .recalc is set correctly, should also propagate their rates.
1038  * Called at init.
1039  */
1040 void recalculate_root_clocks(void)
1041 {
1042 	struct clk *clkp;
1043 
1044 	list_for_each_entry(clkp, &root_clks, sibling) {
1045 		if (clkp->recalc)
1046 			clkp->rate = clkp->recalc(clkp);
1047 		propagate_rate(clkp);
1048 	}
1049 }
1050 
1051 /**
1052  * clk_preinit - initialize any fields in the struct clk before clk init
1053  * @clk: struct clk * to initialize
1054  *
1055  * Initialize any struct clk fields needed before normal clk initialization
1056  * can run.  No return value.
1057  */
1058 void clk_preinit(struct clk *clk)
1059 {
1060 	INIT_LIST_HEAD(&clk->children);
1061 }
1062 
1063 int clk_register(struct clk *clk)
1064 {
1065 	if (clk == NULL || IS_ERR(clk))
1066 		return -EINVAL;
1067 
1068 	/*
1069 	 * trap out already registered clocks
1070 	 */
1071 	if (clk->node.next || clk->node.prev)
1072 		return 0;
1073 
1074 	mutex_lock(&clocks_mutex);
1075 	if (clk->parent)
1076 		list_add(&clk->sibling, &clk->parent->children);
1077 	else
1078 		list_add(&clk->sibling, &root_clks);
1079 
1080 	list_add(&clk->node, &clocks);
1081 	if (clk->init)
1082 		clk->init(clk);
1083 	mutex_unlock(&clocks_mutex);
1084 
1085 	return 0;
1086 }
1087 EXPORT_SYMBOL(clk_register);
1088 
1089 void clk_unregister(struct clk *clk)
1090 {
1091 	if (clk == NULL || IS_ERR(clk))
1092 		return;
1093 
1094 	mutex_lock(&clocks_mutex);
1095 	list_del(&clk->sibling);
1096 	list_del(&clk->node);
1097 	mutex_unlock(&clocks_mutex);
1098 }
1099 EXPORT_SYMBOL(clk_unregister);
1100 
1101 void clk_enable_init_clocks(void)
1102 {
1103 	struct clk *clkp;
1104 
1105 	list_for_each_entry(clkp, &clocks, node)
1106 		if (clkp->flags & ENABLE_ON_INIT)
1107 			clk_enable(clkp);
1108 }
1109 
1110 /**
1111  * omap_clk_get_by_name - locate OMAP struct clk by its name
1112  * @name: name of the struct clk to locate
1113  *
1114  * Locate an OMAP struct clk by its name.  Assumes that struct clk
1115  * names are unique.  Returns NULL if not found or a pointer to the
1116  * struct clk if found.
1117  */
1118 struct clk *omap_clk_get_by_name(const char *name)
1119 {
1120 	struct clk *c;
1121 	struct clk *ret = NULL;
1122 
1123 	mutex_lock(&clocks_mutex);
1124 
1125 	list_for_each_entry(c, &clocks, node) {
1126 		if (!strcmp(c->name, name)) {
1127 			ret = c;
1128 			break;
1129 		}
1130 	}
1131 
1132 	mutex_unlock(&clocks_mutex);
1133 
1134 	return ret;
1135 }
1136 
1137 int omap_clk_enable_autoidle_all(void)
1138 {
1139 	struct clk *c;
1140 	unsigned long flags;
1141 
1142 	spin_lock_irqsave(&clockfw_lock, flags);
1143 
1144 	list_for_each_entry(c, &clocks, node)
1145 		if (c->ops->allow_idle)
1146 			c->ops->allow_idle(c);
1147 
1148 	spin_unlock_irqrestore(&clockfw_lock, flags);
1149 
1150 	return 0;
1151 }
1152 
1153 int omap_clk_disable_autoidle_all(void)
1154 {
1155 	struct clk *c;
1156 	unsigned long flags;
1157 
1158 	spin_lock_irqsave(&clockfw_lock, flags);
1159 
1160 	list_for_each_entry(c, &clocks, node)
1161 		if (c->ops->deny_idle)
1162 			c->ops->deny_idle(c);
1163 
1164 	spin_unlock_irqrestore(&clockfw_lock, flags);
1165 
1166 	return 0;
1167 }
1168 
1169 /*
1170  * Low level helpers
1171  */
1172 static int clkll_enable_null(struct clk *clk)
1173 {
1174 	return 0;
1175 }
1176 
1177 static void clkll_disable_null(struct clk *clk)
1178 {
1179 }
1180 
1181 const struct clkops clkops_null = {
1182 	.enable		= clkll_enable_null,
1183 	.disable	= clkll_disable_null,
1184 };
1185 
1186 /*
1187  * Dummy clock
1188  *
1189  * Used for clock aliases that are needed on some OMAPs, but not others
1190  */
1191 struct clk dummy_ck = {
1192 	.name	= "dummy",
1193 	.ops	= &clkops_null,
1194 };
1195 
1196 /*
1197  *
1198  */
1199 
1200 #ifdef CONFIG_OMAP_RESET_CLOCKS
1201 /*
1202  * Disable any unused clocks left on by the bootloader
1203  */
1204 static int __init clk_disable_unused(void)
1205 {
1206 	struct clk *ck;
1207 	unsigned long flags;
1208 
1209 	pr_info("clock: disabling unused clocks to save power\n");
1210 
1211 	spin_lock_irqsave(&clockfw_lock, flags);
1212 	list_for_each_entry(ck, &clocks, node) {
1213 		if (ck->ops == &clkops_null)
1214 			continue;
1215 
1216 		if (ck->usecount > 0 || !ck->enable_reg)
1217 			continue;
1218 
1219 		omap2_clk_disable_unused(ck);
1220 	}
1221 	spin_unlock_irqrestore(&clockfw_lock, flags);
1222 
1223 	return 0;
1224 }
1225 late_initcall(clk_disable_unused);
1226 late_initcall(omap_clk_enable_autoidle_all);
1227 #endif
1228 
1229 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
1230 /*
1231  *	debugfs support to trace clock tree hierarchy and attributes
1232  */
1233 
1234 #include <linux/debugfs.h>
1235 #include <linux/seq_file.h>
1236 
1237 static struct dentry *clk_debugfs_root;
1238 
1239 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
1240 {
1241 	struct clk *c;
1242 	struct clk *pa;
1243 
1244 	mutex_lock(&clocks_mutex);
1245 	seq_printf(s, "%-30s %-30s %-10s %s\n",
1246 		   "clock-name", "parent-name", "rate", "use-count");
1247 
1248 	list_for_each_entry(c, &clocks, node) {
1249 		pa = c->parent;
1250 		seq_printf(s, "%-30s %-30s %-10lu %d\n",
1251 			   c->name, pa ? pa->name : "none", c->rate,
1252 			   c->usecount);
1253 	}
1254 	mutex_unlock(&clocks_mutex);
1255 
1256 	return 0;
1257 }
1258 
1259 static int clk_dbg_open(struct inode *inode, struct file *file)
1260 {
1261 	return single_open(file, clk_dbg_show_summary, inode->i_private);
1262 }
1263 
1264 static const struct file_operations debug_clock_fops = {
1265 	.open           = clk_dbg_open,
1266 	.read           = seq_read,
1267 	.llseek         = seq_lseek,
1268 	.release        = single_release,
1269 };
1270 
1271 static int clk_debugfs_register_one(struct clk *c)
1272 {
1273 	int err;
1274 	struct dentry *d;
1275 	struct clk *pa = c->parent;
1276 
1277 	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1278 	if (!d)
1279 		return -ENOMEM;
1280 	c->dent = d;
1281 
1282 	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1283 	if (!d) {
1284 		err = -ENOMEM;
1285 		goto err_out;
1286 	}
1287 	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1288 	if (!d) {
1289 		err = -ENOMEM;
1290 		goto err_out;
1291 	}
1292 	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1293 	if (!d) {
1294 		err = -ENOMEM;
1295 		goto err_out;
1296 	}
1297 	return 0;
1298 
1299 err_out:
1300 	debugfs_remove_recursive(c->dent);
1301 	return err;
1302 }
1303 
1304 static int clk_debugfs_register(struct clk *c)
1305 {
1306 	int err;
1307 	struct clk *pa = c->parent;
1308 
1309 	if (pa && !pa->dent) {
1310 		err = clk_debugfs_register(pa);
1311 		if (err)
1312 			return err;
1313 	}
1314 
1315 	if (!c->dent) {
1316 		err = clk_debugfs_register_one(c);
1317 		if (err)
1318 			return err;
1319 	}
1320 	return 0;
1321 }
1322 
1323 static int __init clk_debugfs_init(void)
1324 {
1325 	struct clk *c;
1326 	struct dentry *d;
1327 	int err;
1328 
1329 	d = debugfs_create_dir("clock", NULL);
1330 	if (!d)
1331 		return -ENOMEM;
1332 	clk_debugfs_root = d;
1333 
1334 	list_for_each_entry(c, &clocks, node) {
1335 		err = clk_debugfs_register(c);
1336 		if (err)
1337 			goto err_out;
1338 	}
1339 
1340 	d = debugfs_create_file("summary", S_IRUGO,
1341 		d, NULL, &debug_clock_fops);
1342 	if (!d)
1343 		return -ENOMEM;
1344 
1345 	return 0;
1346 err_out:
1347 	debugfs_remove_recursive(clk_debugfs_root);
1348 	return err;
1349 }
1350 late_initcall(clk_debugfs_init);
1351 
1352 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1353 #endif /* CONFIG_COMMON_CLK */
1354