xref: /openbmc/linux/arch/arm/mach-omap2/clock.c (revision c4ceedcb)
1 /*
2  *  linux/arch/arm/mach-omap2/clock.c
3  *
4  *  Copyright (C) 2005-2008 Texas Instruments, Inc.
5  *  Copyright (C) 2004-2010 Nokia Corporation
6  *
7  *  Contacts:
8  *  Richard Woodruff <r-woodruff2@ti.com>
9  *  Paul Walmsley
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #undef DEBUG
16 
17 #include <linux/kernel.h>
18 #include <linux/export.h>
19 #include <linux/list.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/clk.h>
24 #include <linux/io.h>
25 #include <linux/bitops.h>
26 
27 #include <asm/cpu.h>
28 
29 #include <plat/prcm.h>
30 
31 #include <trace/events/power.h>
32 
33 #include "soc.h"
34 #include "clockdomain.h"
35 #include "clock.h"
36 #include "cm.h"
37 #include "cm2xxx.h"
38 #include "cm3xxx.h"
39 #include "cm-regbits-24xx.h"
40 #include "cm-regbits-34xx.h"
41 #include "common.h"
42 
43 /*
44  * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
45  * for a module to indicate that it is no longer in idle
46  */
47 #define MAX_MODULE_ENABLE_WAIT		100000
48 
49 u16 cpu_mask;
50 
51 /*
52  * clkdm_control: if true, then when a clock is enabled in the
53  * hardware, its clockdomain will first be enabled; and when a clock
54  * is disabled in the hardware, its clockdomain will be disabled
55  * afterwards.
56  */
57 static bool clkdm_control = true;
58 
59 static LIST_HEAD(clocks);
60 static DEFINE_MUTEX(clocks_mutex);
61 static DEFINE_SPINLOCK(clockfw_lock);
62 
63 /*
64  * OMAP2+ specific clock functions
65  */
66 
67 /* Private functions */
68 
69 
70 /**
71  * _wait_idlest_generic - wait for a module to leave the idle state
72  * @reg: virtual address of module IDLEST register
73  * @mask: value to mask against to determine if the module is active
74  * @idlest: idle state indicator (0 or 1) for the clock
75  * @name: name of the clock (for printk)
76  *
77  * Wait for a module to leave idle, where its idle-status register is
78  * not inside the CM module.  Returns 1 if the module left idle
79  * promptly, or 0 if the module did not leave idle before the timeout
80  * elapsed.  XXX Deprecated - should be moved into drivers for the
81  * individual IP block that the IDLEST register exists in.
82  */
83 static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
84 				const char *name)
85 {
86 	int i = 0, ena = 0;
87 
88 	ena = (idlest) ? 0 : mask;
89 
90 	omap_test_timeout(((__raw_readl(reg) & mask) == ena),
91 			  MAX_MODULE_ENABLE_WAIT, i);
92 
93 	if (i < MAX_MODULE_ENABLE_WAIT)
94 		pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
95 			 name, i);
96 	else
97 		pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
98 		       name, MAX_MODULE_ENABLE_WAIT);
99 
100 	return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
101 };
102 
103 /**
104  * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
105  * @clk: struct clk * belonging to the module
106  *
107  * If the necessary clocks for the OMAP hardware IP block that
108  * corresponds to clock @clk are enabled, then wait for the module to
109  * indicate readiness (i.e., to leave IDLE).  This code does not
110  * belong in the clock code and will be moved in the medium term to
111  * module-dependent code.  No return value.
112  */
113 static void _omap2_module_wait_ready(struct clk *clk)
114 {
115 	void __iomem *companion_reg, *idlest_reg;
116 	u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
117 	s16 prcm_mod;
118 	int r;
119 
120 	/* Not all modules have multiple clocks that their IDLEST depends on */
121 	if (clk->ops->find_companion) {
122 		clk->ops->find_companion(clk, &companion_reg, &other_bit);
123 		if (!(__raw_readl(companion_reg) & (1 << other_bit)))
124 			return;
125 	}
126 
127 	clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
128 
129 	r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
130 	if (r) {
131 		/* IDLEST register not in the CM module */
132 		_wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
133 				     clk->name);
134 	} else {
135 		cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
136 	};
137 }
138 
139 /* Public functions */
140 
141 /**
142  * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
143  * @clk: OMAP clock struct ptr to use
144  *
145  * Convert a clockdomain name stored in a struct clk 'clk' into a
146  * clockdomain pointer, and save it into the struct clk.  Intended to be
147  * called during clk_register().  No return value.
148  */
149 void omap2_init_clk_clkdm(struct clk *clk)
150 {
151 	struct clockdomain *clkdm;
152 	const char *clk_name;
153 
154 	if (!clk->clkdm_name)
155 		return;
156 
157 	clk_name = __clk_get_name(clk);
158 
159 	clkdm = clkdm_lookup(clk->clkdm_name);
160 	if (clkdm) {
161 		pr_debug("clock: associated clk %s to clkdm %s\n",
162 			 clk_name, clk->clkdm_name);
163 		clk->clkdm = clkdm;
164 	} else {
165 		pr_debug("clock: could not associate clk %s to clkdm %s\n",
166 			 clk_name, clk->clkdm_name);
167 	}
168 }
169 
170 /**
171  * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
172  *
173  * Prevent the OMAP clock code from calling into the clockdomain code
174  * when a hardware clock in that clockdomain is enabled or disabled.
175  * Intended to be called at init time from omap*_clk_init().  No
176  * return value.
177  */
178 void __init omap2_clk_disable_clkdm_control(void)
179 {
180 	clkdm_control = false;
181 }
182 
183 /**
184  * omap2_clk_dflt_find_companion - find companion clock to @clk
185  * @clk: struct clk * to find the companion clock of
186  * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
187  * @other_bit: u8 ** to return the companion clock bit shift in
188  *
189  * Note: We don't need special code here for INVERT_ENABLE for the
190  * time being since INVERT_ENABLE only applies to clocks enabled by
191  * CM_CLKEN_PLL
192  *
193  * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes it's
194  * just a matter of XORing the bits.
195  *
196  * Some clocks don't have companion clocks.  For example, modules with
197  * only an interface clock (such as MAILBOXES) don't have a companion
198  * clock.  Right now, this code relies on the hardware exporting a bit
199  * in the correct companion register that indicates that the
200  * nonexistent 'companion clock' is active.  Future patches will
201  * associate this type of code with per-module data structures to
202  * avoid this issue, and remove the casts.  No return value.
203  */
204 void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
205 				   u8 *other_bit)
206 {
207 	u32 r;
208 
209 	/*
210 	 * Convert CM_ICLKEN* <-> CM_FCLKEN*.  This conversion assumes
211 	 * it's just a matter of XORing the bits.
212 	 */
213 	r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
214 
215 	*other_reg = (__force void __iomem *)r;
216 	*other_bit = clk->enable_bit;
217 }
218 
219 /**
220  * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
221  * @clk: struct clk * to find IDLEST info for
222  * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
223  * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
224  * @idlest_val: u8 * to return the idle status indicator
225  *
226  * Return the CM_IDLEST register address and bit shift corresponding
227  * to the module that "owns" this clock.  This default code assumes
228  * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
229  * the IDLEST register address ID corresponds to the CM_*CLKEN
230  * register address ID (e.g., that CM_FCLKEN2 corresponds to
231  * CM_IDLEST2).  This is not true for all modules.  No return value.
232  */
233 void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
234 				u8 *idlest_bit, u8 *idlest_val)
235 {
236 	u32 r;
237 
238 	r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
239 	*idlest_reg = (__force void __iomem *)r;
240 	*idlest_bit = clk->enable_bit;
241 
242 	/*
243 	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
244 	 * 34xx reverses this, just to keep us on our toes
245 	 * AM35xx uses both, depending on the module.
246 	 */
247 	if (cpu_is_omap24xx())
248 		*idlest_val = OMAP24XX_CM_IDLEST_VAL;
249 	else if (cpu_is_omap34xx())
250 		*idlest_val = OMAP34XX_CM_IDLEST_VAL;
251 	else
252 		BUG();
253 
254 }
255 
256 int omap2_dflt_clk_enable(struct clk *clk)
257 {
258 	u32 v;
259 
260 	if (unlikely(clk->enable_reg == NULL)) {
261 		pr_err("clock.c: Enable for %s without enable code\n",
262 		       clk->name);
263 		return 0; /* REVISIT: -EINVAL */
264 	}
265 
266 	v = __raw_readl(clk->enable_reg);
267 	if (clk->flags & INVERT_ENABLE)
268 		v &= ~(1 << clk->enable_bit);
269 	else
270 		v |= (1 << clk->enable_bit);
271 	__raw_writel(v, clk->enable_reg);
272 	v = __raw_readl(clk->enable_reg); /* OCP barrier */
273 
274 	if (clk->ops->find_idlest)
275 		_omap2_module_wait_ready(clk);
276 
277 	return 0;
278 }
279 
280 void omap2_dflt_clk_disable(struct clk *clk)
281 {
282 	u32 v;
283 
284 	if (!clk->enable_reg) {
285 		/*
286 		 * 'Independent' here refers to a clock which is not
287 		 * controlled by its parent.
288 		 */
289 		pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
290 		return;
291 	}
292 
293 	v = __raw_readl(clk->enable_reg);
294 	if (clk->flags & INVERT_ENABLE)
295 		v |= (1 << clk->enable_bit);
296 	else
297 		v &= ~(1 << clk->enable_bit);
298 	__raw_writel(v, clk->enable_reg);
299 	/* No OCP barrier needed here since it is a disable operation */
300 }
301 
302 const struct clkops clkops_omap2_dflt_wait = {
303 	.enable		= omap2_dflt_clk_enable,
304 	.disable	= omap2_dflt_clk_disable,
305 	.find_companion	= omap2_clk_dflt_find_companion,
306 	.find_idlest	= omap2_clk_dflt_find_idlest,
307 };
308 
309 const struct clkops clkops_omap2_dflt = {
310 	.enable		= omap2_dflt_clk_enable,
311 	.disable	= omap2_dflt_clk_disable,
312 };
313 
314 /**
315  * omap2_clk_disable - disable a clock, if the system is not using it
316  * @clk: struct clk * to disable
317  *
318  * Decrements the usecount on struct clk @clk.  If there are no users
319  * left, call the clkops-specific clock disable function to disable it
320  * in hardware.  If the clock is part of a clockdomain (which they all
321  * should be), request that the clockdomain be disabled.  (It too has
322  * a usecount, and so will not be disabled in the hardware until it no
323  * longer has any users.)  If the clock has a parent clock (most of
324  * them do), then call ourselves, recursing on the parent clock.  This
325  * can cause an entire branch of the clock tree to be powered off by
326  * simply disabling one clock.  Intended to be called with the clockfw_lock
327  * spinlock held.  No return value.
328  */
329 void omap2_clk_disable(struct clk *clk)
330 {
331 	if (clk->usecount == 0) {
332 		WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
333 		return;
334 	}
335 
336 	pr_debug("clock: %s: decrementing usecount\n", clk->name);
337 
338 	clk->usecount--;
339 
340 	if (clk->usecount > 0)
341 		return;
342 
343 	pr_debug("clock: %s: disabling in hardware\n", clk->name);
344 
345 	if (clk->ops && clk->ops->disable) {
346 		trace_clock_disable(clk->name, 0, smp_processor_id());
347 		clk->ops->disable(clk);
348 	}
349 
350 	if (clkdm_control && clk->clkdm)
351 		clkdm_clk_disable(clk->clkdm, clk);
352 
353 	if (clk->parent)
354 		omap2_clk_disable(clk->parent);
355 }
356 
357 /**
358  * omap2_clk_enable - request that the system enable a clock
359  * @clk: struct clk * to enable
360  *
361  * Increments the usecount on struct clk @clk.  If there were no users
362  * previously, then recurse up the clock tree, enabling all of the
363  * clock's parents and all of the parent clockdomains, and finally,
364  * enabling @clk's clockdomain, and @clk itself.  Intended to be
365  * called with the clockfw_lock spinlock held.  Returns 0 upon success
366  * or a negative error code upon failure.
367  */
368 int omap2_clk_enable(struct clk *clk)
369 {
370 	int ret;
371 
372 	pr_debug("clock: %s: incrementing usecount\n", clk->name);
373 
374 	clk->usecount++;
375 
376 	if (clk->usecount > 1)
377 		return 0;
378 
379 	pr_debug("clock: %s: enabling in hardware\n", clk->name);
380 
381 	if (clk->parent) {
382 		ret = omap2_clk_enable(clk->parent);
383 		if (ret) {
384 			WARN(1, "clock: %s: could not enable parent %s: %d\n",
385 			     clk->name, clk->parent->name, ret);
386 			goto oce_err1;
387 		}
388 	}
389 
390 	if (clkdm_control && clk->clkdm) {
391 		ret = clkdm_clk_enable(clk->clkdm, clk);
392 		if (ret) {
393 			WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
394 			     clk->name, clk->clkdm->name, ret);
395 			goto oce_err2;
396 		}
397 	}
398 
399 	if (clk->ops && clk->ops->enable) {
400 		trace_clock_enable(clk->name, 1, smp_processor_id());
401 		ret = clk->ops->enable(clk);
402 		if (ret) {
403 			WARN(1, "clock: %s: could not enable: %d\n",
404 			     clk->name, ret);
405 			goto oce_err3;
406 		}
407 	}
408 
409 	return 0;
410 
411 oce_err3:
412 	if (clkdm_control && clk->clkdm)
413 		clkdm_clk_disable(clk->clkdm, clk);
414 oce_err2:
415 	if (clk->parent)
416 		omap2_clk_disable(clk->parent);
417 oce_err1:
418 	clk->usecount--;
419 
420 	return ret;
421 }
422 
423 /* Given a clock and a rate apply a clock specific rounding function */
424 long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
425 {
426 	if (clk->round_rate)
427 		return clk->round_rate(clk, rate);
428 
429 	return clk->rate;
430 }
431 
432 /* Set the clock rate for a clock source */
433 int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
434 {
435 	int ret = -EINVAL;
436 
437 	pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
438 
439 	/* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
440 	if (clk->set_rate) {
441 		trace_clock_set_rate(clk->name, rate, smp_processor_id());
442 		ret = clk->set_rate(clk, rate);
443 	}
444 
445 	return ret;
446 }
447 
448 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
449 {
450 	if (!clk->clksel)
451 		return -EINVAL;
452 
453 	if (clk->parent == new_parent)
454 		return 0;
455 
456 	return omap2_clksel_set_parent(clk, new_parent);
457 }
458 
459 /*
460  * OMAP2+ clock reset and init functions
461  */
462 
463 #ifdef CONFIG_OMAP_RESET_CLOCKS
464 void omap2_clk_disable_unused(struct clk *clk)
465 {
466 	u32 regval32, v;
467 
468 	v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
469 
470 	regval32 = __raw_readl(clk->enable_reg);
471 	if ((regval32 & (1 << clk->enable_bit)) == v)
472 		return;
473 
474 	pr_debug("Disabling unused clock \"%s\"\n", clk->name);
475 	if (cpu_is_omap34xx()) {
476 		omap2_clk_enable(clk);
477 		omap2_clk_disable(clk);
478 	} else {
479 		clk->ops->disable(clk);
480 	}
481 	if (clk->clkdm != NULL)
482 		pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
483 }
484 #endif
485 
486 /**
487  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
488  * @mpurate_ck_name: clk name of the clock to change rate
489  *
490  * Change the ARM MPU clock rate to the rate specified on the command
491  * line, if one was specified.  @mpurate_ck_name should be
492  * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
493  * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
494  * handled by the virt_prcm_set clock, but this should be handled by
495  * the OPP layer.  XXX This is intended to be handled by the OPP layer
496  * code in the near future and should be removed from the clock code.
497  * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
498  * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
499  * cannot be found, or 0 upon success.
500  */
501 int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
502 {
503 	struct clk *mpurate_ck;
504 	int r;
505 
506 	if (!mpurate)
507 		return -EINVAL;
508 
509 	mpurate_ck = clk_get(NULL, mpurate_ck_name);
510 	if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
511 		return -ENOENT;
512 
513 	r = clk_set_rate(mpurate_ck, mpurate);
514 	if (IS_ERR_VALUE(r)) {
515 		WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
516 		     mpurate_ck->name, mpurate, r);
517 		clk_put(mpurate_ck);
518 		return -EINVAL;
519 	}
520 
521 	calibrate_delay();
522 	recalculate_root_clocks();
523 
524 	clk_put(mpurate_ck);
525 
526 	return 0;
527 }
528 
529 /**
530  * omap2_clk_print_new_rates - print summary of current clock tree rates
531  * @hfclkin_ck_name: clk name for the off-chip HF oscillator
532  * @core_ck_name: clk name for the on-chip CORE_CLK
533  * @mpu_ck_name: clk name for the ARM MPU clock
534  *
535  * Prints a short message to the console with the HFCLKIN oscillator
536  * rate, the rate of the CORE clock, and the rate of the ARM MPU clock.
537  * Called by the boot-time MPU rate switching code.   XXX This is intended
538  * to be handled by the OPP layer code in the near future and should be
539  * removed from the clock code.  No return value.
540  */
541 void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
542 				      const char *core_ck_name,
543 				      const char *mpu_ck_name)
544 {
545 	struct clk *hfclkin_ck, *core_ck, *mpu_ck;
546 	unsigned long hfclkin_rate;
547 
548 	mpu_ck = clk_get(NULL, mpu_ck_name);
549 	if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
550 		return;
551 
552 	core_ck = clk_get(NULL, core_ck_name);
553 	if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
554 		return;
555 
556 	hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
557 	if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
558 		return;
559 
560 	hfclkin_rate = clk_get_rate(hfclkin_ck);
561 
562 	pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
563 		(hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
564 		(clk_get_rate(core_ck) / 1000000),
565 		(clk_get_rate(mpu_ck) / 1000000));
566 }
567 
568 /* Common data */
569 
570 int clk_enable(struct clk *clk)
571 {
572 	unsigned long flags;
573 	int ret;
574 
575 	if (clk == NULL || IS_ERR(clk))
576 		return -EINVAL;
577 
578 	spin_lock_irqsave(&clockfw_lock, flags);
579 	ret = omap2_clk_enable(clk);
580 	spin_unlock_irqrestore(&clockfw_lock, flags);
581 
582 	return ret;
583 }
584 EXPORT_SYMBOL(clk_enable);
585 
586 void clk_disable(struct clk *clk)
587 {
588 	unsigned long flags;
589 
590 	if (clk == NULL || IS_ERR(clk))
591 		return;
592 
593 	spin_lock_irqsave(&clockfw_lock, flags);
594 	if (clk->usecount == 0) {
595 		pr_err("Trying disable clock %s with 0 usecount\n",
596 		       clk->name);
597 		WARN_ON(1);
598 		goto out;
599 	}
600 
601 	omap2_clk_disable(clk);
602 
603 out:
604 	spin_unlock_irqrestore(&clockfw_lock, flags);
605 }
606 EXPORT_SYMBOL(clk_disable);
607 
608 unsigned long clk_get_rate(struct clk *clk)
609 {
610 	unsigned long flags;
611 	unsigned long ret;
612 
613 	if (clk == NULL || IS_ERR(clk))
614 		return 0;
615 
616 	spin_lock_irqsave(&clockfw_lock, flags);
617 	ret = clk->rate;
618 	spin_unlock_irqrestore(&clockfw_lock, flags);
619 
620 	return ret;
621 }
622 EXPORT_SYMBOL(clk_get_rate);
623 
624 /*
625  * Optional clock functions defined in include/linux/clk.h
626  */
627 
628 long clk_round_rate(struct clk *clk, unsigned long rate)
629 {
630 	unsigned long flags;
631 	long ret;
632 
633 	if (clk == NULL || IS_ERR(clk))
634 		return 0;
635 
636 	spin_lock_irqsave(&clockfw_lock, flags);
637 	ret = omap2_clk_round_rate(clk, rate);
638 	spin_unlock_irqrestore(&clockfw_lock, flags);
639 
640 	return ret;
641 }
642 EXPORT_SYMBOL(clk_round_rate);
643 
644 int clk_set_rate(struct clk *clk, unsigned long rate)
645 {
646 	unsigned long flags;
647 	int ret = -EINVAL;
648 
649 	if (clk == NULL || IS_ERR(clk))
650 		return ret;
651 
652 	spin_lock_irqsave(&clockfw_lock, flags);
653 	ret = omap2_clk_set_rate(clk, rate);
654 	if (ret == 0)
655 		propagate_rate(clk);
656 	spin_unlock_irqrestore(&clockfw_lock, flags);
657 
658 	return ret;
659 }
660 EXPORT_SYMBOL(clk_set_rate);
661 
662 int clk_set_parent(struct clk *clk, struct clk *parent)
663 {
664 	unsigned long flags;
665 	int ret = -EINVAL;
666 
667 	if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
668 		return ret;
669 
670 	spin_lock_irqsave(&clockfw_lock, flags);
671 	if (clk->usecount == 0) {
672 		ret = omap2_clk_set_parent(clk, parent);
673 		if (ret == 0)
674 			propagate_rate(clk);
675 	} else {
676 		ret = -EBUSY;
677 	}
678 	spin_unlock_irqrestore(&clockfw_lock, flags);
679 
680 	return ret;
681 }
682 EXPORT_SYMBOL(clk_set_parent);
683 
684 struct clk *clk_get_parent(struct clk *clk)
685 {
686 	return clk->parent;
687 }
688 EXPORT_SYMBOL(clk_get_parent);
689 
690 /*
691  * OMAP specific clock functions shared between omap1 and omap2
692  */
693 
694 int __initdata mpurate;
695 
696 /*
697  * By default we use the rate set by the bootloader.
698  * You can override this with mpurate= cmdline option.
699  */
700 static int __init omap_clk_setup(char *str)
701 {
702 	get_option(&str, &mpurate);
703 
704 	if (!mpurate)
705 		return 1;
706 
707 	if (mpurate < 1000)
708 		mpurate *= 1000000;
709 
710 	return 1;
711 }
712 __setup("mpurate=", omap_clk_setup);
713 
714 /* Used for clocks that always have same value as the parent clock */
715 unsigned long followparent_recalc(struct clk *clk)
716 {
717 	return clk->parent->rate;
718 }
719 
720 /*
721  * Used for clocks that have the same value as the parent clock,
722  * divided by some factor
723  */
724 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
725 {
726 	WARN_ON(!clk->fixed_div);
727 
728 	return clk->parent->rate / clk->fixed_div;
729 }
730 
731 void clk_reparent(struct clk *child, struct clk *parent)
732 {
733 	list_del_init(&child->sibling);
734 	if (parent)
735 		list_add(&child->sibling, &parent->children);
736 	child->parent = parent;
737 
738 	/* now do the debugfs renaming to reattach the child
739 	   to the proper parent */
740 }
741 
742 /* Propagate rate to children */
743 void propagate_rate(struct clk *tclk)
744 {
745 	struct clk *clkp;
746 
747 	list_for_each_entry(clkp, &tclk->children, sibling) {
748 		if (clkp->recalc)
749 			clkp->rate = clkp->recalc(clkp);
750 		propagate_rate(clkp);
751 	}
752 }
753 
754 static LIST_HEAD(root_clks);
755 
756 /**
757  * recalculate_root_clocks - recalculate and propagate all root clocks
758  *
759  * Recalculates all root clocks (clocks with no parent), which if the
760  * clock's .recalc is set correctly, should also propagate their rates.
761  * Called at init.
762  */
763 void recalculate_root_clocks(void)
764 {
765 	struct clk *clkp;
766 
767 	list_for_each_entry(clkp, &root_clks, sibling) {
768 		if (clkp->recalc)
769 			clkp->rate = clkp->recalc(clkp);
770 		propagate_rate(clkp);
771 	}
772 }
773 
774 /**
775  * clk_preinit - initialize any fields in the struct clk before clk init
776  * @clk: struct clk * to initialize
777  *
778  * Initialize any struct clk fields needed before normal clk initialization
779  * can run.  No return value.
780  */
781 void clk_preinit(struct clk *clk)
782 {
783 	INIT_LIST_HEAD(&clk->children);
784 }
785 
786 int clk_register(struct clk *clk)
787 {
788 	if (clk == NULL || IS_ERR(clk))
789 		return -EINVAL;
790 
791 	/*
792 	 * trap out already registered clocks
793 	 */
794 	if (clk->node.next || clk->node.prev)
795 		return 0;
796 
797 	mutex_lock(&clocks_mutex);
798 	if (clk->parent)
799 		list_add(&clk->sibling, &clk->parent->children);
800 	else
801 		list_add(&clk->sibling, &root_clks);
802 
803 	list_add(&clk->node, &clocks);
804 	if (clk->init)
805 		clk->init(clk);
806 	mutex_unlock(&clocks_mutex);
807 
808 	return 0;
809 }
810 EXPORT_SYMBOL(clk_register);
811 
812 void clk_unregister(struct clk *clk)
813 {
814 	if (clk == NULL || IS_ERR(clk))
815 		return;
816 
817 	mutex_lock(&clocks_mutex);
818 	list_del(&clk->sibling);
819 	list_del(&clk->node);
820 	mutex_unlock(&clocks_mutex);
821 }
822 EXPORT_SYMBOL(clk_unregister);
823 
824 void clk_enable_init_clocks(void)
825 {
826 	struct clk *clkp;
827 
828 	list_for_each_entry(clkp, &clocks, node)
829 		if (clkp->flags & ENABLE_ON_INIT)
830 			clk_enable(clkp);
831 }
832 
833 /**
834  * omap_clk_get_by_name - locate OMAP struct clk by its name
835  * @name: name of the struct clk to locate
836  *
837  * Locate an OMAP struct clk by its name.  Assumes that struct clk
838  * names are unique.  Returns NULL if not found or a pointer to the
839  * struct clk if found.
840  */
841 struct clk *omap_clk_get_by_name(const char *name)
842 {
843 	struct clk *c;
844 	struct clk *ret = NULL;
845 
846 	mutex_lock(&clocks_mutex);
847 
848 	list_for_each_entry(c, &clocks, node) {
849 		if (!strcmp(c->name, name)) {
850 			ret = c;
851 			break;
852 		}
853 	}
854 
855 	mutex_unlock(&clocks_mutex);
856 
857 	return ret;
858 }
859 
860 int omap_clk_enable_autoidle_all(void)
861 {
862 	struct clk *c;
863 	unsigned long flags;
864 
865 	spin_lock_irqsave(&clockfw_lock, flags);
866 
867 	list_for_each_entry(c, &clocks, node)
868 		if (c->ops->allow_idle)
869 			c->ops->allow_idle(c);
870 
871 	spin_unlock_irqrestore(&clockfw_lock, flags);
872 
873 	return 0;
874 }
875 
876 int omap_clk_disable_autoidle_all(void)
877 {
878 	struct clk *c;
879 	unsigned long flags;
880 
881 	spin_lock_irqsave(&clockfw_lock, flags);
882 
883 	list_for_each_entry(c, &clocks, node)
884 		if (c->ops->deny_idle)
885 			c->ops->deny_idle(c);
886 
887 	spin_unlock_irqrestore(&clockfw_lock, flags);
888 
889 	return 0;
890 }
891 
892 /*
893  * Low level helpers
894  */
895 static int clkll_enable_null(struct clk *clk)
896 {
897 	return 0;
898 }
899 
900 static void clkll_disable_null(struct clk *clk)
901 {
902 }
903 
904 const struct clkops clkops_null = {
905 	.enable		= clkll_enable_null,
906 	.disable	= clkll_disable_null,
907 };
908 
909 /*
910  * Dummy clock
911  *
912  * Used for clock aliases that are needed on some OMAPs, but not others
913  */
914 struct clk dummy_ck = {
915 	.name	= "dummy",
916 	.ops	= &clkops_null,
917 };
918 
919 /*
920  *
921  */
922 
923 #ifdef CONFIG_OMAP_RESET_CLOCKS
924 /*
925  * Disable any unused clocks left on by the bootloader
926  */
927 static int __init clk_disable_unused(void)
928 {
929 	struct clk *ck;
930 	unsigned long flags;
931 
932 	pr_info("clock: disabling unused clocks to save power\n");
933 
934 	spin_lock_irqsave(&clockfw_lock, flags);
935 	list_for_each_entry(ck, &clocks, node) {
936 		if (ck->ops == &clkops_null)
937 			continue;
938 
939 		if (ck->usecount > 0 || !ck->enable_reg)
940 			continue;
941 
942 		omap2_clk_disable_unused(ck);
943 	}
944 	spin_unlock_irqrestore(&clockfw_lock, flags);
945 
946 	return 0;
947 }
948 late_initcall(clk_disable_unused);
949 late_initcall(omap_clk_enable_autoidle_all);
950 #endif
951 
952 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
953 /*
954  *	debugfs support to trace clock tree hierarchy and attributes
955  */
956 
957 #include <linux/debugfs.h>
958 #include <linux/seq_file.h>
959 
960 static struct dentry *clk_debugfs_root;
961 
962 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
963 {
964 	struct clk *c;
965 	struct clk *pa;
966 
967 	mutex_lock(&clocks_mutex);
968 	seq_printf(s, "%-30s %-30s %-10s %s\n",
969 		   "clock-name", "parent-name", "rate", "use-count");
970 
971 	list_for_each_entry(c, &clocks, node) {
972 		pa = c->parent;
973 		seq_printf(s, "%-30s %-30s %-10lu %d\n",
974 			   c->name, pa ? pa->name : "none", c->rate,
975 			   c->usecount);
976 	}
977 	mutex_unlock(&clocks_mutex);
978 
979 	return 0;
980 }
981 
982 static int clk_dbg_open(struct inode *inode, struct file *file)
983 {
984 	return single_open(file, clk_dbg_show_summary, inode->i_private);
985 }
986 
987 static const struct file_operations debug_clock_fops = {
988 	.open           = clk_dbg_open,
989 	.read           = seq_read,
990 	.llseek         = seq_lseek,
991 	.release        = single_release,
992 };
993 
994 static int clk_debugfs_register_one(struct clk *c)
995 {
996 	int err;
997 	struct dentry *d;
998 	struct clk *pa = c->parent;
999 
1000 	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1001 	if (!d)
1002 		return -ENOMEM;
1003 	c->dent = d;
1004 
1005 	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1006 	if (!d) {
1007 		err = -ENOMEM;
1008 		goto err_out;
1009 	}
1010 	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1011 	if (!d) {
1012 		err = -ENOMEM;
1013 		goto err_out;
1014 	}
1015 	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1016 	if (!d) {
1017 		err = -ENOMEM;
1018 		goto err_out;
1019 	}
1020 	return 0;
1021 
1022 err_out:
1023 	debugfs_remove_recursive(c->dent);
1024 	return err;
1025 }
1026 
1027 static int clk_debugfs_register(struct clk *c)
1028 {
1029 	int err;
1030 	struct clk *pa = c->parent;
1031 
1032 	if (pa && !pa->dent) {
1033 		err = clk_debugfs_register(pa);
1034 		if (err)
1035 			return err;
1036 	}
1037 
1038 	if (!c->dent) {
1039 		err = clk_debugfs_register_one(c);
1040 		if (err)
1041 			return err;
1042 	}
1043 	return 0;
1044 }
1045 
1046 static int __init clk_debugfs_init(void)
1047 {
1048 	struct clk *c;
1049 	struct dentry *d;
1050 	int err;
1051 
1052 	d = debugfs_create_dir("clock", NULL);
1053 	if (!d)
1054 		return -ENOMEM;
1055 	clk_debugfs_root = d;
1056 
1057 	list_for_each_entry(c, &clocks, node) {
1058 		err = clk_debugfs_register(c);
1059 		if (err)
1060 			goto err_out;
1061 	}
1062 
1063 	d = debugfs_create_file("summary", S_IRUGO,
1064 		d, NULL, &debug_clock_fops);
1065 	if (!d)
1066 		return -ENOMEM;
1067 
1068 	return 0;
1069 err_out:
1070 	debugfs_remove_recursive(clk_debugfs_root);
1071 	return err;
1072 }
1073 late_initcall(clk_debugfs_init);
1074 
1075 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1076 
1077