xref: /openbmc/linux/arch/arm/mach-omap2/clock.c (revision b99db36c)
1 /*
2  *  linux/arch/arm/mach-omap2/clock.c
3  *
4  *  Copyright (C) 2005-2008 Texas Instruments, Inc.
5  *  Copyright (C) 2004-2010 Nokia Corporation
6  *
7  *  Contacts:
8  *  Richard Woodruff <r-woodruff2@ti.com>
9  *  Paul Walmsley
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #undef DEBUG
16 
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/list.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
24 #include <linux/io.h>
25 #include <linux/bitops.h>
26 
27 #include <asm/cpu.h>
28 
29 
30 #include <trace/events/power.h>
31 
32 #include "soc.h"
33 #include "clockdomain.h"
34 #include "clock.h"
35 #include "cm.h"
36 #include "cm2xxx.h"
37 #include "cm3xxx.h"
38 #include "cm-regbits-24xx.h"
39 #include "cm-regbits-34xx.h"
40 #include "common.h"
41 
42 /*
43  * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
44  * for a module to indicate that it is no longer in idle
45  */
46 #define MAX_MODULE_ENABLE_WAIT		100000
47 
48 u16 cpu_mask;
49 
50 /*
51  * clkdm_control: if true, then when a clock is enabled in the
52  * hardware, its clockdomain will first be enabled; and when a clock
53  * is disabled in the hardware, its clockdomain will be disabled
54  * afterwards.
55  */
56 static bool clkdm_control = true;
57 
58 static LIST_HEAD(clocks);
59 static DEFINE_MUTEX(clocks_mutex);
60 static DEFINE_SPINLOCK(clockfw_lock);
61 
62 /*
63  * OMAP2+ specific clock functions
64  */
65 
66 /* Private functions */
67 
68 
69 /**
70  * _wait_idlest_generic - wait for a module to leave the idle state
71  * @reg: virtual address of module IDLEST register
72  * @mask: value to mask against to determine if the module is active
73  * @idlest: idle state indicator (0 or 1) for the clock
74  * @name: name of the clock (for printk)
75  *
76  * Wait for a module to leave idle, where its idle-status register is
77  * not inside the CM module.  Returns 1 if the module left idle
78  * promptly, or 0 if the module did not leave idle before the timeout
79  * elapsed.  XXX Deprecated - should be moved into drivers for the
80  * individual IP block that the IDLEST register exists in.
81  */
82 static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
83 				const char *name)
84 {
85 	int i = 0, ena = 0;
86 
87 	ena = (idlest) ? 0 : mask;
88 
89 	omap_test_timeout(((__raw_readl(reg) & mask) == ena),
90 			  MAX_MODULE_ENABLE_WAIT, i);
91 
92 	if (i < MAX_MODULE_ENABLE_WAIT)
93 		pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
94 			 name, i);
95 	else
96 		pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
97 		       name, MAX_MODULE_ENABLE_WAIT);
98 
99 	return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
100 };
101 
102 /**
103  * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
104  * @clk: struct clk * belonging to the module
105  *
106  * If the necessary clocks for the OMAP hardware IP block that
107  * corresponds to clock @clk are enabled, then wait for the module to
108  * indicate readiness (i.e., to leave IDLE).  This code does not
109  * belong in the clock code and will be moved in the medium term to
110  * module-dependent code.  No return value.
111  */
112 static void _omap2_module_wait_ready(struct clk *clk)
113 {
114 	void __iomem *companion_reg, *idlest_reg;
115 	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
116 	s16 prcm_mod;
117 	int r;
118 
119 	/* Not all modules have multiple clocks that their IDLEST depends on */
120 	if (clk->ops->find_companion) {
121 		clk->ops->find_companion(clk, &companion_reg, &other_bit);
122 		if (!(__raw_readl(companion_reg) & (1 << other_bit)))
123 			return;
124 	}
125 
126 	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
127 
128 	r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
129 	if (r) {
130 		/* IDLEST register not in the CM module */
131 		_wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
132 				     clk->name);
133 	} else {
134 		cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
135 	};
136 }
137 
138 /* Public functions */
139 
140 /**
141  * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
142  * @clk: OMAP clock struct ptr to use
143  *
144  * Convert a clockdomain name stored in a struct clk 'clk' into a
145  * clockdomain pointer, and save it into the struct clk.  Intended to be
146  * called during clk_register().  No return value.
147  */
148 void omap2_init_clk_clkdm(struct clk *clk)
149 {
150 	struct clockdomain *clkdm;
151 	const char *clk_name;
152 
153 	if (!clk->clkdm_name)
154 		return;
155 
156 	clk_name = __clk_get_name(clk);
157 
158 	clkdm = clkdm_lookup(clk->clkdm_name);
159 	if (clkdm) {
160 		pr_debug("clock: associated clk %s to clkdm %s\n",
161 			 clk_name, clk->clkdm_name);
162 		clk->clkdm = clkdm;
163 	} else {
164 		pr_debug("clock: could not associate clk %s to clkdm %s\n",
165 			 clk_name, clk->clkdm_name);
166 	}
167 }
168 
169 /**
170  * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
171  *
172  * Prevent the OMAP clock code from calling into the clockdomain code
173  * when a hardware clock in that clockdomain is enabled or disabled.
174  * Intended to be called at init time from omap*_clk_init().  No
175  * return value.
176  */
177 void __init omap2_clk_disable_clkdm_control(void)
178 {
179 	clkdm_control = false;
180 }
181 
182 /**
183  * omap2_clk_dflt_find_companion - find companion clock to @clk
184  * @clk: struct clk * to find the companion clock of
185  * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
186  * @other_bit: u8 ** to return the companion clock bit shift in
187  *
188  * Note: We don't need special code here for INVERT_ENABLE for the
189  * time being since INVERT_ENABLE only applies to clocks enabled by
190  * CM_CLKEN_PLL
191  *
192  * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes it's
193  * just a matter of XORing the bits.
194  *
195  * Some clocks don't have companion clocks.  For example, modules with
196  * only an interface clock (such as MAILBOXES) don't have a companion
197  * clock.  Right now, this code relies on the hardware exporting a bit
198  * in the correct companion register that indicates that the
199  * nonexistent 'companion clock' is active.  Future patches will
200  * associate this type of code with per-module data structures to
201  * avoid this issue, and remove the casts.  No return value.
202  */
203 void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
204 				   u8 *other_bit)
205 {
206 	u32 r;
207 
208 	/*
209 	 * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes
210 	 * it's just a matter of XORing the bits.
211 	 */
212 	r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
213 
214 	*other_reg = (__force void __iomem *)r;
215 	*other_bit = clk->enable_bit;
216 }
217 
218 /**
219  * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
220  * @clk: struct clk * to find IDLEST info for
221  * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
222  * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
223  * @idlest_val: u8 * to return the idle status indicator
224  *
225  * Return the CM_IDLEST register address and bit shift corresponding
226  * to the module that "owns" this clock.  This default code assumes
227  * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
228  * the IDLEST register address ID corresponds to the CM_*CLKEN
229  * register address ID (e.g., that CM_FCLKEN2 corresponds to
230  * CM_IDLEST2).  This is not true for all modules.  No return value.
231  */
232 void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
233 				u8 *idlest_bit, u8 *idlest_val)
234 {
235 	u32 r;
236 
237 	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
238 	*idlest_reg = (__force void __iomem *)r;
239 	*idlest_bit = clk->enable_bit;
240 
241 	/*
242 	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
243 	 * 34xx reverses this, just to keep us on our toes
244 	 * AM35xx uses both, depending on the module.
245 	 */
246 	if (cpu_is_omap24xx())
247 		*idlest_val = OMAP24XX_CM_IDLEST_VAL;
248 	else if (cpu_is_omap34xx())
249 		*idlest_val = OMAP34XX_CM_IDLEST_VAL;
250 	else
251 		BUG();
252 
253 }
254 
255 int omap2_dflt_clk_enable(struct clk *clk)
256 {
257 	u32 v;
258 
259 	if (unlikely(clk->enable_reg == NULL)) {
260 		pr_err("clock.c: Enable for %s without enable code\n",
261 		       clk->name);
262 		return 0; /* REVISIT: -EINVAL */
263 	}
264 
265 	v = __raw_readl(clk->enable_reg);
266 	if (clk->flags & INVERT_ENABLE)
267 		v &= ~(1 << clk->enable_bit);
268 	else
269 		v |= (1 << clk->enable_bit);
270 	__raw_writel(v, clk->enable_reg);
271 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
272 
273 	if (clk->ops->find_idlest)
274 		_omap2_module_wait_ready(clk);
275 
276 	return 0;
277 }
278 
279 void omap2_dflt_clk_disable(struct clk *clk)
280 {
281 	u32 v;
282 
283 	if (!clk->enable_reg) {
284 		/*
285 		 * 'Independent' here refers to a clock which is not
286 		 * controlled by its parent.
287 		 */
288 		pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
289 		return;
290 	}
291 
292 	v = __raw_readl(clk->enable_reg);
293 	if (clk->flags & INVERT_ENABLE)
294 		v |= (1 << clk->enable_bit);
295 	else
296 		v &= ~(1 << clk->enable_bit);
297 	__raw_writel(v, clk->enable_reg);
298 	/* No OCP barrier needed here since it is a disable operation */
299 }
300 
301 const struct clkops clkops_omap2_dflt_wait = {
302 	.enable		= omap2_dflt_clk_enable,
303 	.disable	= omap2_dflt_clk_disable,
304 	.find_companion	= omap2_clk_dflt_find_companion,
305 	.find_idlest	= omap2_clk_dflt_find_idlest,
306 };
307 
308 const struct clkops clkops_omap2_dflt = {
309 	.enable		= omap2_dflt_clk_enable,
310 	.disable	= omap2_dflt_clk_disable,
311 };
312 
313 /**
314  * omap2_clk_disable - disable a clock, if the system is not using it
315  * @clk: struct clk * to disable
316  *
317  * Decrements the usecount on struct clk @clk.  If there are no users
318  * left, call the clkops-specific clock disable function to disable it
319  * in hardware.  If the clock is part of a clockdomain (which they all
320  * should be), request that the clockdomain be disabled.  (It too has
321  * a usecount, and so will not be disabled in the hardware until it no
322  * longer has any users.)  If the clock has a parent clock (most of
323  * them do), then call ourselves, recursing on the parent clock.  This
324  * can cause an entire branch of the clock tree to be powered off by
325  * simply disabling one clock.  Intended to be called with the clockfw_lock
326  * spinlock held.  No return value.
327  */
328 void omap2_clk_disable(struct clk *clk)
329 {
330 	if (clk->usecount == 0) {
331 		WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
332 		return;
333 	}
334 
335 	pr_debug("clock: %s: decrementing usecount\n", clk->name);
336 
337 	clk->usecount--;
338 
339 	if (clk->usecount > 0)
340 		return;
341 
342 	pr_debug("clock: %s: disabling in hardware\n", clk->name);
343 
344 	if (clk->ops && clk->ops->disable) {
345 		trace_clock_disable(clk->name, 0, smp_processor_id());
346 		clk->ops->disable(clk);
347 	}
348 
349 	if (clkdm_control && clk->clkdm)
350 		clkdm_clk_disable(clk->clkdm, clk);
351 
352 	if (clk->parent)
353 		omap2_clk_disable(clk->parent);
354 }
355 
356 /**
357  * omap2_clk_enable - request that the system enable a clock
358  * @clk: struct clk * to enable
359  *
360  * Increments the usecount on struct clk @clk.  If there were no users
361  * previously, then recurse up the clock tree, enabling all of the
362  * clock's parents and all of the parent clockdomains, and finally,
363  * enabling @clk's clockdomain, and @clk itself.  Intended to be
364  * called with the clockfw_lock spinlock held.  Returns 0 upon success
365  * or a negative error code upon failure.
366  */
367 int omap2_clk_enable(struct clk *clk)
368 {
369 	int ret;
370 
371 	pr_debug("clock: %s: incrementing usecount\n", clk->name);
372 
373 	clk->usecount++;
374 
375 	if (clk->usecount > 1)
376 		return 0;
377 
378 	pr_debug("clock: %s: enabling in hardware\n", clk->name);
379 
380 	if (clk->parent) {
381 		ret = omap2_clk_enable(clk->parent);
382 		if (ret) {
383 			WARN(1, "clock: %s: could not enable parent %s: %d\n",
384 			     clk->name, clk->parent->name, ret);
385 			goto oce_err1;
386 		}
387 	}
388 
389 	if (clkdm_control && clk->clkdm) {
390 		ret = clkdm_clk_enable(clk->clkdm, clk);
391 		if (ret) {
392 			WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
393 			     clk->name, clk->clkdm->name, ret);
394 			goto oce_err2;
395 		}
396 	}
397 
398 	if (clk->ops && clk->ops->enable) {
399 		trace_clock_enable(clk->name, 1, smp_processor_id());
400 		ret = clk->ops->enable(clk);
401 		if (ret) {
402 			WARN(1, "clock: %s: could not enable: %d\n",
403 			     clk->name, ret);
404 			goto oce_err3;
405 		}
406 	}
407 
408 	return 0;
409 
410 oce_err3:
411 	if (clkdm_control && clk->clkdm)
412 		clkdm_clk_disable(clk->clkdm, clk);
413 oce_err2:
414 	if (clk->parent)
415 		omap2_clk_disable(clk->parent);
416 oce_err1:
417 	clk->usecount--;
418 
419 	return ret;
420 }
421 
422 /* Given a clock and a rate apply a clock specific rounding function */
423 long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
424 {
425 	if (clk->round_rate)
426 		return clk->round_rate(clk, rate);
427 
428 	return clk->rate;
429 }
430 
431 /* Set the clock rate for a clock source */
432 int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
433 {
434 	int ret = -EINVAL;
435 
436 	pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
437 
438 	/* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
439 	if (clk->set_rate) {
440 		trace_clock_set_rate(clk->name, rate, smp_processor_id());
441 		ret = clk->set_rate(clk, rate);
442 	}
443 
444 	return ret;
445 }
446 
447 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
448 {
449 	if (!clk->clksel)
450 		return -EINVAL;
451 
452 	if (clk->parent == new_parent)
453 		return 0;
454 
455 	return omap2_clksel_set_parent(clk, new_parent);
456 }
457 
458 /*
459  * OMAP2+ clock reset and init functions
460  */
461 
462 #ifdef CONFIG_OMAP_RESET_CLOCKS
463 void omap2_clk_disable_unused(struct clk *clk)
464 {
465 	u32 regval32, v;
466 
467 	v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
468 
469 	regval32 = __raw_readl(clk->enable_reg);
470 	if ((regval32 & (1 << clk->enable_bit)) == v)
471 		return;
472 
473 	pr_debug("Disabling unused clock \"%s\"\n", clk->name);
474 	if (cpu_is_omap34xx()) {
475 		omap2_clk_enable(clk);
476 		omap2_clk_disable(clk);
477 	} else {
478 		clk->ops->disable(clk);
479 	}
480 	if (clk->clkdm != NULL)
481 		pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
482 }
483 #endif
484 
485 /**
486  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
487  * @mpurate_ck_name: clk name of the clock to change rate
488  *
489  * Change the ARM MPU clock rate to the rate specified on the command
490  * line, if one was specified.  @mpurate_ck_name should be
491  * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
492  * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
493  * handled by the virt_prcm_set clock, but this should be handled by
494  * the OPP layer.  XXX This is intended to be handled by the OPP layer
495  * code in the near future and should be removed from the clock code.
496  * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
497  * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
498  * cannot be found, or 0 upon success.
499  */
500 int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
501 {
502 	struct clk *mpurate_ck;
503 	int r;
504 
505 	if (!mpurate)
506 		return -EINVAL;
507 
508 	mpurate_ck = clk_get(NULL, mpurate_ck_name);
509 	if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
510 		return -ENOENT;
511 
512 	r = clk_set_rate(mpurate_ck, mpurate);
513 	if (IS_ERR_VALUE(r)) {
514 		WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
515 		     mpurate_ck->name, mpurate, r);
516 		clk_put(mpurate_ck);
517 		return -EINVAL;
518 	}
519 
520 	calibrate_delay();
521 	recalculate_root_clocks();
522 
523 	clk_put(mpurate_ck);
524 
525 	return 0;
526 }
527 
528 /**
529  * omap2_clk_print_new_rates - print summary of current clock tree rates
530  * @hfclkin_ck_name: clk name for the off-chip HF oscillator
531  * @core_ck_name: clk name for the on-chip CORE_CLK
532  * @mpu_ck_name: clk name for the ARM MPU clock
533  *
534  * Prints a short message to the console with the HFCLKIN oscillator
535  * rate, the rate of the CORE clock, and the rate of the ARM MPU clock.
536  * Called by the boot-time MPU rate switching code.   XXX This is intended
537  * to be handled by the OPP layer code in the near future and should be
538  * removed from the clock code.  No return value.
539  */
540 void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
541 				      const char *core_ck_name,
542 				      const char *mpu_ck_name)
543 {
544 	struct clk *hfclkin_ck, *core_ck, *mpu_ck;
545 	unsigned long hfclkin_rate;
546 
547 	mpu_ck = clk_get(NULL, mpu_ck_name);
548 	if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
549 		return;
550 
551 	core_ck = clk_get(NULL, core_ck_name);
552 	if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
553 		return;
554 
555 	hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
556 	if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
557 		return;
558 
559 	hfclkin_rate = clk_get_rate(hfclkin_ck);
560 
561 	pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
562 		(hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
563 		(clk_get_rate(core_ck) / 1000000),
564 		(clk_get_rate(mpu_ck) / 1000000));
565 }
566 
567 /* Common data */
568 
569 int clk_enable(struct clk *clk)
570 {
571 	unsigned long flags;
572 	int ret;
573 
574 	if (clk == NULL || IS_ERR(clk))
575 		return -EINVAL;
576 
577 	spin_lock_irqsave(&clockfw_lock, flags);
578 	ret = omap2_clk_enable(clk);
579 	spin_unlock_irqrestore(&clockfw_lock, flags);
580 
581 	return ret;
582 }
583 EXPORT_SYMBOL(clk_enable);
584 
585 void clk_disable(struct clk *clk)
586 {
587 	unsigned long flags;
588 
589 	if (clk == NULL || IS_ERR(clk))
590 		return;
591 
592 	spin_lock_irqsave(&clockfw_lock, flags);
593 	if (clk->usecount == 0) {
594 		pr_err("Trying disable clock %s with 0 usecount\n",
595 		       clk->name);
596 		WARN_ON(1);
597 		goto out;
598 	}
599 
600 	omap2_clk_disable(clk);
601 
602 out:
603 	spin_unlock_irqrestore(&clockfw_lock, flags);
604 }
605 EXPORT_SYMBOL(clk_disable);
606 
607 unsigned long clk_get_rate(struct clk *clk)
608 {
609 	unsigned long flags;
610 	unsigned long ret;
611 
612 	if (clk == NULL || IS_ERR(clk))
613 		return 0;
614 
615 	spin_lock_irqsave(&clockfw_lock, flags);
616 	ret = clk->rate;
617 	spin_unlock_irqrestore(&clockfw_lock, flags);
618 
619 	return ret;
620 }
621 EXPORT_SYMBOL(clk_get_rate);
622 
623 /*
624  * Optional clock functions defined in include/linux/clk.h
625  */
626 
627 long clk_round_rate(struct clk *clk, unsigned long rate)
628 {
629 	unsigned long flags;
630 	long ret;
631 
632 	if (clk == NULL || IS_ERR(clk))
633 		return 0;
634 
635 	spin_lock_irqsave(&clockfw_lock, flags);
636 	ret = omap2_clk_round_rate(clk, rate);
637 	spin_unlock_irqrestore(&clockfw_lock, flags);
638 
639 	return ret;
640 }
641 EXPORT_SYMBOL(clk_round_rate);
642 
643 int clk_set_rate(struct clk *clk, unsigned long rate)
644 {
645 	unsigned long flags;
646 	int ret = -EINVAL;
647 
648 	if (clk == NULL || IS_ERR(clk))
649 		return ret;
650 
651 	spin_lock_irqsave(&clockfw_lock, flags);
652 	ret = omap2_clk_set_rate(clk, rate);
653 	if (ret == 0)
654 		propagate_rate(clk);
655 	spin_unlock_irqrestore(&clockfw_lock, flags);
656 
657 	return ret;
658 }
659 EXPORT_SYMBOL(clk_set_rate);
660 
661 int clk_set_parent(struct clk *clk, struct clk *parent)
662 {
663 	unsigned long flags;
664 	int ret = -EINVAL;
665 
666 	if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
667 		return ret;
668 
669 	spin_lock_irqsave(&clockfw_lock, flags);
670 	if (clk->usecount == 0) {
671 		ret = omap2_clk_set_parent(clk, parent);
672 		if (ret == 0)
673 			propagate_rate(clk);
674 	} else {
675 		ret = -EBUSY;
676 	}
677 	spin_unlock_irqrestore(&clockfw_lock, flags);
678 
679 	return ret;
680 }
681 EXPORT_SYMBOL(clk_set_parent);
682 
683 struct clk *clk_get_parent(struct clk *clk)
684 {
685 	return clk->parent;
686 }
687 EXPORT_SYMBOL(clk_get_parent);
688 
689 /*
690  * OMAP specific clock functions shared between omap1 and omap2
691  */
692 
693 int __initdata mpurate;
694 
695 /*
696  * By default we use the rate set by the bootloader.
697  * You can override this with mpurate= cmdline option.
698  */
699 static int __init omap_clk_setup(char *str)
700 {
701 	get_option(&str, &mpurate);
702 
703 	if (!mpurate)
704 		return 1;
705 
706 	if (mpurate < 1000)
707 		mpurate *= 1000000;
708 
709 	return 1;
710 }
711 __setup("mpurate=", omap_clk_setup);
712 
713 /* Used for clocks that always have same value as the parent clock */
714 unsigned long followparent_recalc(struct clk *clk)
715 {
716 	return clk->parent->rate;
717 }
718 
719 /*
720  * Used for clocks that have the same value as the parent clock,
721  * divided by some factor
722  */
723 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
724 {
725 	WARN_ON(!clk->fixed_div);
726 
727 	return clk->parent->rate / clk->fixed_div;
728 }
729 
730 void clk_reparent(struct clk *child, struct clk *parent)
731 {
732 	list_del_init(&child->sibling);
733 	if (parent)
734 		list_add(&child->sibling, &parent->children);
735 	child->parent = parent;
736 
737 	/* now do the debugfs renaming to reattach the child
738 	   to the proper parent */
739 }
740 
741 /* Propagate rate to children */
742 void propagate_rate(struct clk *tclk)
743 {
744 	struct clk *clkp;
745 
746 	list_for_each_entry(clkp, &tclk->children, sibling) {
747 		if (clkp->recalc)
748 			clkp->rate = clkp->recalc(clkp);
749 		propagate_rate(clkp);
750 	}
751 }
752 
753 static LIST_HEAD(root_clks);
754 
755 /**
756  * recalculate_root_clocks - recalculate and propagate all root clocks
757  *
758  * Recalculates all root clocks (clocks with no parent), which if the
759  * clock's .recalc is set correctly, should also propagate their rates.
760  * Called at init.
761  */
762 void recalculate_root_clocks(void)
763 {
764 	struct clk *clkp;
765 
766 	list_for_each_entry(clkp, &root_clks, sibling) {
767 		if (clkp->recalc)
768 			clkp->rate = clkp->recalc(clkp);
769 		propagate_rate(clkp);
770 	}
771 }
772 
773 /**
774  * clk_preinit - initialize any fields in the struct clk before clk init
775  * @clk: struct clk * to initialize
776  *
777  * Initialize any struct clk fields needed before normal clk initialization
778  * can run.  No return value.
779  */
780 void clk_preinit(struct clk *clk)
781 {
782 	INIT_LIST_HEAD(&clk->children);
783 }
784 
785 int clk_register(struct clk *clk)
786 {
787 	if (clk == NULL || IS_ERR(clk))
788 		return -EINVAL;
789 
790 	/*
791 	 * trap out already registered clocks
792 	 */
793 	if (clk->node.next || clk->node.prev)
794 		return 0;
795 
796 	mutex_lock(&clocks_mutex);
797 	if (clk->parent)
798 		list_add(&clk->sibling, &clk->parent->children);
799 	else
800 		list_add(&clk->sibling, &root_clks);
801 
802 	list_add(&clk->node, &clocks);
803 	if (clk->init)
804 		clk->init(clk);
805 	mutex_unlock(&clocks_mutex);
806 
807 	return 0;
808 }
809 EXPORT_SYMBOL(clk_register);
810 
811 void clk_unregister(struct clk *clk)
812 {
813 	if (clk == NULL || IS_ERR(clk))
814 		return;
815 
816 	mutex_lock(&clocks_mutex);
817 	list_del(&clk->sibling);
818 	list_del(&clk->node);
819 	mutex_unlock(&clocks_mutex);
820 }
821 EXPORT_SYMBOL(clk_unregister);
822 
823 void clk_enable_init_clocks(void)
824 {
825 	struct clk *clkp;
826 
827 	list_for_each_entry(clkp, &clocks, node)
828 		if (clkp->flags & ENABLE_ON_INIT)
829 			clk_enable(clkp);
830 }
831 
832 /**
833  * omap_clk_get_by_name - locate OMAP struct clk by its name
834  * @name: name of the struct clk to locate
835  *
836  * Locate an OMAP struct clk by its name.  Assumes that struct clk
837  * names are unique.  Returns NULL if not found or a pointer to the
838  * struct clk if found.
839  */
840 struct clk *omap_clk_get_by_name(const char *name)
841 {
842 	struct clk *c;
843 	struct clk *ret = NULL;
844 
845 	mutex_lock(&clocks_mutex);
846 
847 	list_for_each_entry(c, &clocks, node) {
848 		if (!strcmp(c->name, name)) {
849 			ret = c;
850 			break;
851 		}
852 	}
853 
854 	mutex_unlock(&clocks_mutex);
855 
856 	return ret;
857 }
858 
859 int omap_clk_enable_autoidle_all(void)
860 {
861 	struct clk *c;
862 	unsigned long flags;
863 
864 	spin_lock_irqsave(&clockfw_lock, flags);
865 
866 	list_for_each_entry(c, &clocks, node)
867 		if (c->ops->allow_idle)
868 			c->ops->allow_idle(c);
869 
870 	spin_unlock_irqrestore(&clockfw_lock, flags);
871 
872 	return 0;
873 }
874 
875 int omap_clk_disable_autoidle_all(void)
876 {
877 	struct clk *c;
878 	unsigned long flags;
879 
880 	spin_lock_irqsave(&clockfw_lock, flags);
881 
882 	list_for_each_entry(c, &clocks, node)
883 		if (c->ops->deny_idle)
884 			c->ops->deny_idle(c);
885 
886 	spin_unlock_irqrestore(&clockfw_lock, flags);
887 
888 	return 0;
889 }
890 
891 /*
892  * Low level helpers
893  */
894 static int clkll_enable_null(struct clk *clk)
895 {
896 	return 0;
897 }
898 
899 static void clkll_disable_null(struct clk *clk)
900 {
901 }
902 
903 const struct clkops clkops_null = {
904 	.enable		= clkll_enable_null,
905 	.disable	= clkll_disable_null,
906 };
907 
908 /*
909  * Dummy clock
910  *
911  * Used for clock aliases that are needed on some OMAPs, but not others
912  */
913 struct clk dummy_ck = {
914 	.name	= "dummy",
915 	.ops	= &clkops_null,
916 };
917 
918 /*
919  *
920  */
921 
922 #ifdef CONFIG_OMAP_RESET_CLOCKS
923 /*
924  * Disable any unused clocks left on by the bootloader
925  */
926 static int __init clk_disable_unused(void)
927 {
928 	struct clk *ck;
929 	unsigned long flags;
930 
931 	pr_info("clock: disabling unused clocks to save power\n");
932 
933 	spin_lock_irqsave(&clockfw_lock, flags);
934 	list_for_each_entry(ck, &clocks, node) {
935 		if (ck->ops == &clkops_null)
936 			continue;
937 
938 		if (ck->usecount > 0 || !ck->enable_reg)
939 			continue;
940 
941 		omap2_clk_disable_unused(ck);
942 	}
943 	spin_unlock_irqrestore(&clockfw_lock, flags);
944 
945 	return 0;
946 }
947 late_initcall(clk_disable_unused);
948 late_initcall(omap_clk_enable_autoidle_all);
949 #endif
950 
951 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
952 /*
953  *	debugfs support to trace clock tree hierarchy and attributes
954  */
955 
956 #include <linux/debugfs.h>
957 #include <linux/seq_file.h>
958 
959 static struct dentry *clk_debugfs_root;
960 
961 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
962 {
963 	struct clk *c;
964 	struct clk *pa;
965 
966 	mutex_lock(&clocks_mutex);
967 	seq_printf(s, "%-30s %-30s %-10s %s\n",
968 		   "clock-name", "parent-name", "rate", "use-count");
969 
970 	list_for_each_entry(c, &clocks, node) {
971 		pa = c->parent;
972 		seq_printf(s, "%-30s %-30s %-10lu %d\n",
973 			   c->name, pa ? pa->name : "none", c->rate,
974 			   c->usecount);
975 	}
976 	mutex_unlock(&clocks_mutex);
977 
978 	return 0;
979 }
980 
981 static int clk_dbg_open(struct inode *inode, struct file *file)
982 {
983 	return single_open(file, clk_dbg_show_summary, inode->i_private);
984 }
985 
986 static const struct file_operations debug_clock_fops = {
987 	.open           = clk_dbg_open,
988 	.read           = seq_read,
989 	.llseek         = seq_lseek,
990 	.release        = single_release,
991 };
992 
993 static int clk_debugfs_register_one(struct clk *c)
994 {
995 	int err;
996 	struct dentry *d;
997 	struct clk *pa = c->parent;
998 
999 	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1000 	if (!d)
1001 		return -ENOMEM;
1002 	c->dent = d;
1003 
1004 	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1005 	if (!d) {
1006 		err = -ENOMEM;
1007 		goto err_out;
1008 	}
1009 	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1010 	if (!d) {
1011 		err = -ENOMEM;
1012 		goto err_out;
1013 	}
1014 	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1015 	if (!d) {
1016 		err = -ENOMEM;
1017 		goto err_out;
1018 	}
1019 	return 0;
1020 
1021 err_out:
1022 	debugfs_remove_recursive(c->dent);
1023 	return err;
1024 }
1025 
1026 static int clk_debugfs_register(struct clk *c)
1027 {
1028 	int err;
1029 	struct clk *pa = c->parent;
1030 
1031 	if (pa && !pa->dent) {
1032 		err = clk_debugfs_register(pa);
1033 		if (err)
1034 			return err;
1035 	}
1036 
1037 	if (!c->dent) {
1038 		err = clk_debugfs_register_one(c);
1039 		if (err)
1040 			return err;
1041 	}
1042 	return 0;
1043 }
1044 
1045 static int __init clk_debugfs_init(void)
1046 {
1047 	struct clk *c;
1048 	struct dentry *d;
1049 	int err;
1050 
1051 	d = debugfs_create_dir("clock", NULL);
1052 	if (!d)
1053 		return -ENOMEM;
1054 	clk_debugfs_root = d;
1055 
1056 	list_for_each_entry(c, &clocks, node) {
1057 		err = clk_debugfs_register(c);
1058 		if (err)
1059 			goto err_out;
1060 	}
1061 
1062 	d = debugfs_create_file("summary", S_IRUGO,
1063 		d, NULL, &debug_clock_fops);
1064 	if (!d)
1065 		return -ENOMEM;
1066 
1067 	return 0;
1068 err_out:
1069 	debugfs_remove_recursive(clk_debugfs_root);
1070 	return err;
1071 }
1072 late_initcall(clk_debugfs_init);
1073 
1074 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1075 
1076