xref: /openbmc/linux/arch/arm/mach-omap1/clock.c (revision a135eaae)
1 /*
2  *  linux/arch/arm/mach-omap1/clock.c
3  *
4  *  Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
5  *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6  *
7  *  Modified to use omap shared clock framework by
8  *  Tony Lindgren <tony@atomide.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/io.h>
20 #include <linux/clk.h>
21 #include <linux/clkdev.h>
22 
23 #include <asm/mach-types.h>
24 
25 #include <plat/cpu.h>
26 #include <plat/usb.h>
27 #include <plat/clkdev_omap.h>
28 
29 #include <mach/hardware.h>
30 
31 #include "../plat-omap/sram.h"
32 
33 #include "iomap.h"
34 #include "clock.h"
35 #include "opp.h"
36 
37 __u32 arm_idlect1_mask;
38 struct clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
39 
40 static LIST_HEAD(clocks);
41 static DEFINE_MUTEX(clocks_mutex);
42 static DEFINE_SPINLOCK(clockfw_lock);
43 
44 /*
45  * Omap1 specific clock functions
46  */
47 
48 unsigned long omap1_uart_recalc(struct clk *clk)
49 {
50 	unsigned int val = __raw_readl(clk->enable_reg);
51 	return val & clk->enable_bit ? 48000000 : 12000000;
52 }
53 
54 unsigned long omap1_sossi_recalc(struct clk *clk)
55 {
56 	u32 div = omap_readl(MOD_CONF_CTRL_1);
57 
58 	div = (div >> 17) & 0x7;
59 	div++;
60 
61 	return clk->parent->rate / div;
62 }
63 
64 static void omap1_clk_allow_idle(struct clk *clk)
65 {
66 	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
67 
68 	if (!(clk->flags & CLOCK_IDLE_CONTROL))
69 		return;
70 
71 	if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
72 		arm_idlect1_mask |= 1 << iclk->idlect_shift;
73 }
74 
75 static void omap1_clk_deny_idle(struct clk *clk)
76 {
77 	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
78 
79 	if (!(clk->flags & CLOCK_IDLE_CONTROL))
80 		return;
81 
82 	if (iclk->no_idle_count++ == 0)
83 		arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
84 }
85 
86 static __u16 verify_ckctl_value(__u16 newval)
87 {
88 	/* This function checks for following limitations set
89 	 * by the hardware (all conditions must be true):
90 	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
91 	 * ARM_CK >= TC_CK
92 	 * DSP_CK >= TC_CK
93 	 * DSPMMU_CK >= TC_CK
94 	 *
95 	 * In addition following rules are enforced:
96 	 * LCD_CK <= TC_CK
97 	 * ARMPER_CK <= TC_CK
98 	 *
99 	 * However, maximum frequencies are not checked for!
100 	 */
101 	__u8 per_exp;
102 	__u8 lcd_exp;
103 	__u8 arm_exp;
104 	__u8 dsp_exp;
105 	__u8 tc_exp;
106 	__u8 dspmmu_exp;
107 
108 	per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
109 	lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
110 	arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
111 	dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
112 	tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
113 	dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
114 
115 	if (dspmmu_exp < dsp_exp)
116 		dspmmu_exp = dsp_exp;
117 	if (dspmmu_exp > dsp_exp+1)
118 		dspmmu_exp = dsp_exp+1;
119 	if (tc_exp < arm_exp)
120 		tc_exp = arm_exp;
121 	if (tc_exp < dspmmu_exp)
122 		tc_exp = dspmmu_exp;
123 	if (tc_exp > lcd_exp)
124 		lcd_exp = tc_exp;
125 	if (tc_exp > per_exp)
126 		per_exp = tc_exp;
127 
128 	newval &= 0xf000;
129 	newval |= per_exp << CKCTL_PERDIV_OFFSET;
130 	newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
131 	newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
132 	newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
133 	newval |= tc_exp << CKCTL_TCDIV_OFFSET;
134 	newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
135 
136 	return newval;
137 }
138 
139 static int calc_dsor_exp(struct clk *clk, unsigned long rate)
140 {
141 	/* Note: If target frequency is too low, this function will return 4,
142 	 * which is invalid value. Caller must check for this value and act
143 	 * accordingly.
144 	 *
145 	 * Note: This function does not check for following limitations set
146 	 * by the hardware (all conditions must be true):
147 	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
148 	 * ARM_CK >= TC_CK
149 	 * DSP_CK >= TC_CK
150 	 * DSPMMU_CK >= TC_CK
151 	 */
152 	unsigned long realrate;
153 	struct clk * parent;
154 	unsigned  dsor_exp;
155 
156 	parent = clk->parent;
157 	if (unlikely(parent == NULL))
158 		return -EIO;
159 
160 	realrate = parent->rate;
161 	for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
162 		if (realrate <= rate)
163 			break;
164 
165 		realrate /= 2;
166 	}
167 
168 	return dsor_exp;
169 }
170 
171 unsigned long omap1_ckctl_recalc(struct clk *clk)
172 {
173 	/* Calculate divisor encoded as 2-bit exponent */
174 	int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
175 
176 	return clk->parent->rate / dsor;
177 }
178 
179 unsigned long omap1_ckctl_recalc_dsp_domain(struct clk *clk)
180 {
181 	int dsor;
182 
183 	/* Calculate divisor encoded as 2-bit exponent
184 	 *
185 	 * The clock control bits are in DSP domain,
186 	 * so api_ck is needed for access.
187 	 * Note that DSP_CKCTL virt addr = phys addr, so
188 	 * we must use __raw_readw() instead of omap_readw().
189 	 */
190 	omap1_clk_enable(api_ck_p);
191 	dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
192 	omap1_clk_disable(api_ck_p);
193 
194 	return clk->parent->rate / dsor;
195 }
196 
197 /* MPU virtual clock functions */
198 int omap1_select_table_rate(struct clk *clk, unsigned long rate)
199 {
200 	/* Find the highest supported frequency <= rate and switch to it */
201 	struct mpu_rate * ptr;
202 	unsigned long ref_rate;
203 
204 	ref_rate = ck_ref_p->rate;
205 
206 	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
207 		if (!(ptr->flags & cpu_mask))
208 			continue;
209 
210 		if (ptr->xtal != ref_rate)
211 			continue;
212 
213 		/* Can check only after xtal frequency check */
214 		if (ptr->rate <= rate)
215 			break;
216 	}
217 
218 	if (!ptr->rate)
219 		return -EINVAL;
220 
221 	/*
222 	 * In most cases we should not need to reprogram DPLL.
223 	 * Reprogramming the DPLL is tricky, it must be done from SRAM.
224 	 */
225 	omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
226 
227 	/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
228 	ck_dpll1_p->rate = ptr->pll_rate;
229 
230 	return 0;
231 }
232 
233 int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
234 {
235 	int dsor_exp;
236 	u16 regval;
237 
238 	dsor_exp = calc_dsor_exp(clk, rate);
239 	if (dsor_exp > 3)
240 		dsor_exp = -EINVAL;
241 	if (dsor_exp < 0)
242 		return dsor_exp;
243 
244 	regval = __raw_readw(DSP_CKCTL);
245 	regval &= ~(3 << clk->rate_offset);
246 	regval |= dsor_exp << clk->rate_offset;
247 	__raw_writew(regval, DSP_CKCTL);
248 	clk->rate = clk->parent->rate / (1 << dsor_exp);
249 
250 	return 0;
251 }
252 
253 long omap1_clk_round_rate_ckctl_arm(struct clk *clk, unsigned long rate)
254 {
255 	int dsor_exp = calc_dsor_exp(clk, rate);
256 	if (dsor_exp < 0)
257 		return dsor_exp;
258 	if (dsor_exp > 3)
259 		dsor_exp = 3;
260 	return clk->parent->rate / (1 << dsor_exp);
261 }
262 
263 int omap1_clk_set_rate_ckctl_arm(struct clk *clk, unsigned long rate)
264 {
265 	int dsor_exp;
266 	u16 regval;
267 
268 	dsor_exp = calc_dsor_exp(clk, rate);
269 	if (dsor_exp > 3)
270 		dsor_exp = -EINVAL;
271 	if (dsor_exp < 0)
272 		return dsor_exp;
273 
274 	regval = omap_readw(ARM_CKCTL);
275 	regval &= ~(3 << clk->rate_offset);
276 	regval |= dsor_exp << clk->rate_offset;
277 	regval = verify_ckctl_value(regval);
278 	omap_writew(regval, ARM_CKCTL);
279 	clk->rate = clk->parent->rate / (1 << dsor_exp);
280 	return 0;
281 }
282 
283 long omap1_round_to_table_rate(struct clk *clk, unsigned long rate)
284 {
285 	/* Find the highest supported frequency <= rate */
286 	struct mpu_rate * ptr;
287 	long highest_rate;
288 	unsigned long ref_rate;
289 
290 	ref_rate = ck_ref_p->rate;
291 
292 	highest_rate = -EINVAL;
293 
294 	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
295 		if (!(ptr->flags & cpu_mask))
296 			continue;
297 
298 		if (ptr->xtal != ref_rate)
299 			continue;
300 
301 		highest_rate = ptr->rate;
302 
303 		/* Can check only after xtal frequency check */
304 		if (ptr->rate <= rate)
305 			break;
306 	}
307 
308 	return highest_rate;
309 }
310 
311 static unsigned calc_ext_dsor(unsigned long rate)
312 {
313 	unsigned dsor;
314 
315 	/* MCLK and BCLK divisor selection is not linear:
316 	 * freq = 96MHz / dsor
317 	 *
318 	 * RATIO_SEL range: dsor <-> RATIO_SEL
319 	 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
320 	 * 6..48:  (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
321 	 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
322 	 * can not be used.
323 	 */
324 	for (dsor = 2; dsor < 96; ++dsor) {
325 		if ((dsor & 1) && dsor > 8)
326 			continue;
327 		if (rate >= 96000000 / dsor)
328 			break;
329 	}
330 	return dsor;
331 }
332 
333 /* XXX Only needed on 1510 */
334 int omap1_set_uart_rate(struct clk *clk, unsigned long rate)
335 {
336 	unsigned int val;
337 
338 	val = __raw_readl(clk->enable_reg);
339 	if (rate == 12000000)
340 		val &= ~(1 << clk->enable_bit);
341 	else if (rate == 48000000)
342 		val |= (1 << clk->enable_bit);
343 	else
344 		return -EINVAL;
345 	__raw_writel(val, clk->enable_reg);
346 	clk->rate = rate;
347 
348 	return 0;
349 }
350 
351 /* External clock (MCLK & BCLK) functions */
352 int omap1_set_ext_clk_rate(struct clk *clk, unsigned long rate)
353 {
354 	unsigned dsor;
355 	__u16 ratio_bits;
356 
357 	dsor = calc_ext_dsor(rate);
358 	clk->rate = 96000000 / dsor;
359 	if (dsor > 8)
360 		ratio_bits = ((dsor - 8) / 2 + 6) << 2;
361 	else
362 		ratio_bits = (dsor - 2) << 2;
363 
364 	ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
365 	__raw_writew(ratio_bits, clk->enable_reg);
366 
367 	return 0;
368 }
369 
370 int omap1_set_sossi_rate(struct clk *clk, unsigned long rate)
371 {
372 	u32 l;
373 	int div;
374 	unsigned long p_rate;
375 
376 	p_rate = clk->parent->rate;
377 	/* Round towards slower frequency */
378 	div = (p_rate + rate - 1) / rate;
379 	div--;
380 	if (div < 0 || div > 7)
381 		return -EINVAL;
382 
383 	l = omap_readl(MOD_CONF_CTRL_1);
384 	l &= ~(7 << 17);
385 	l |= div << 17;
386 	omap_writel(l, MOD_CONF_CTRL_1);
387 
388 	clk->rate = p_rate / (div + 1);
389 
390 	return 0;
391 }
392 
393 long omap1_round_ext_clk_rate(struct clk *clk, unsigned long rate)
394 {
395 	return 96000000 / calc_ext_dsor(rate);
396 }
397 
398 void omap1_init_ext_clk(struct clk *clk)
399 {
400 	unsigned dsor;
401 	__u16 ratio_bits;
402 
403 	/* Determine current rate and ensure clock is based on 96MHz APLL */
404 	ratio_bits = __raw_readw(clk->enable_reg) & ~1;
405 	__raw_writew(ratio_bits, clk->enable_reg);
406 
407 	ratio_bits = (ratio_bits & 0xfc) >> 2;
408 	if (ratio_bits > 6)
409 		dsor = (ratio_bits - 6) * 2 + 8;
410 	else
411 		dsor = ratio_bits + 2;
412 
413 	clk-> rate = 96000000 / dsor;
414 }
415 
416 int omap1_clk_enable(struct clk *clk)
417 {
418 	int ret = 0;
419 
420 	if (clk->usecount++ == 0) {
421 		if (clk->parent) {
422 			ret = omap1_clk_enable(clk->parent);
423 			if (ret)
424 				goto err;
425 
426 			if (clk->flags & CLOCK_NO_IDLE_PARENT)
427 				omap1_clk_deny_idle(clk->parent);
428 		}
429 
430 		ret = clk->ops->enable(clk);
431 		if (ret) {
432 			if (clk->parent)
433 				omap1_clk_disable(clk->parent);
434 			goto err;
435 		}
436 	}
437 	return ret;
438 
439 err:
440 	clk->usecount--;
441 	return ret;
442 }
443 
444 void omap1_clk_disable(struct clk *clk)
445 {
446 	if (clk->usecount > 0 && !(--clk->usecount)) {
447 		clk->ops->disable(clk);
448 		if (likely(clk->parent)) {
449 			omap1_clk_disable(clk->parent);
450 			if (clk->flags & CLOCK_NO_IDLE_PARENT)
451 				omap1_clk_allow_idle(clk->parent);
452 		}
453 	}
454 }
455 
456 static int omap1_clk_enable_generic(struct clk *clk)
457 {
458 	__u16 regval16;
459 	__u32 regval32;
460 
461 	if (unlikely(clk->enable_reg == NULL)) {
462 		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
463 		       clk->name);
464 		return -EINVAL;
465 	}
466 
467 	if (clk->flags & ENABLE_REG_32BIT) {
468 		regval32 = __raw_readl(clk->enable_reg);
469 		regval32 |= (1 << clk->enable_bit);
470 		__raw_writel(regval32, clk->enable_reg);
471 	} else {
472 		regval16 = __raw_readw(clk->enable_reg);
473 		regval16 |= (1 << clk->enable_bit);
474 		__raw_writew(regval16, clk->enable_reg);
475 	}
476 
477 	return 0;
478 }
479 
480 static void omap1_clk_disable_generic(struct clk *clk)
481 {
482 	__u16 regval16;
483 	__u32 regval32;
484 
485 	if (clk->enable_reg == NULL)
486 		return;
487 
488 	if (clk->flags & ENABLE_REG_32BIT) {
489 		regval32 = __raw_readl(clk->enable_reg);
490 		regval32 &= ~(1 << clk->enable_bit);
491 		__raw_writel(regval32, clk->enable_reg);
492 	} else {
493 		regval16 = __raw_readw(clk->enable_reg);
494 		regval16 &= ~(1 << clk->enable_bit);
495 		__raw_writew(regval16, clk->enable_reg);
496 	}
497 }
498 
499 const struct clkops clkops_generic = {
500 	.enable		= omap1_clk_enable_generic,
501 	.disable	= omap1_clk_disable_generic,
502 };
503 
504 static int omap1_clk_enable_dsp_domain(struct clk *clk)
505 {
506 	int retval;
507 
508 	retval = omap1_clk_enable(api_ck_p);
509 	if (!retval) {
510 		retval = omap1_clk_enable_generic(clk);
511 		omap1_clk_disable(api_ck_p);
512 	}
513 
514 	return retval;
515 }
516 
517 static void omap1_clk_disable_dsp_domain(struct clk *clk)
518 {
519 	if (omap1_clk_enable(api_ck_p) == 0) {
520 		omap1_clk_disable_generic(clk);
521 		omap1_clk_disable(api_ck_p);
522 	}
523 }
524 
525 const struct clkops clkops_dspck = {
526 	.enable		= omap1_clk_enable_dsp_domain,
527 	.disable	= omap1_clk_disable_dsp_domain,
528 };
529 
530 /* XXX SYSC register handling does not belong in the clock framework */
531 static int omap1_clk_enable_uart_functional_16xx(struct clk *clk)
532 {
533 	int ret;
534 	struct uart_clk *uclk;
535 
536 	ret = omap1_clk_enable_generic(clk);
537 	if (ret == 0) {
538 		/* Set smart idle acknowledgement mode */
539 		uclk = (struct uart_clk *)clk;
540 		omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
541 			    uclk->sysc_addr);
542 	}
543 
544 	return ret;
545 }
546 
547 /* XXX SYSC register handling does not belong in the clock framework */
548 static void omap1_clk_disable_uart_functional_16xx(struct clk *clk)
549 {
550 	struct uart_clk *uclk;
551 
552 	/* Set force idle acknowledgement mode */
553 	uclk = (struct uart_clk *)clk;
554 	omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
555 
556 	omap1_clk_disable_generic(clk);
557 }
558 
559 /* XXX SYSC register handling does not belong in the clock framework */
560 const struct clkops clkops_uart_16xx = {
561 	.enable		= omap1_clk_enable_uart_functional_16xx,
562 	.disable	= omap1_clk_disable_uart_functional_16xx,
563 };
564 
565 long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
566 {
567 	if (clk->round_rate != NULL)
568 		return clk->round_rate(clk, rate);
569 
570 	return clk->rate;
571 }
572 
573 int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
574 {
575 	int  ret = -EINVAL;
576 
577 	if (clk->set_rate)
578 		ret = clk->set_rate(clk, rate);
579 	return ret;
580 }
581 
582 /*
583  * Omap1 clock reset and init functions
584  */
585 
586 #ifdef CONFIG_OMAP_RESET_CLOCKS
587 
588 void omap1_clk_disable_unused(struct clk *clk)
589 {
590 	__u32 regval32;
591 
592 	/* Clocks in the DSP domain need api_ck. Just assume bootloader
593 	 * has not enabled any DSP clocks */
594 	if (clk->enable_reg == DSP_IDLECT2) {
595 		pr_info("Skipping reset check for DSP domain clock \"%s\"\n",
596 			clk->name);
597 		return;
598 	}
599 
600 	/* Is the clock already disabled? */
601 	if (clk->flags & ENABLE_REG_32BIT)
602 		regval32 = __raw_readl(clk->enable_reg);
603 	else
604 		regval32 = __raw_readw(clk->enable_reg);
605 
606 	if ((regval32 & (1 << clk->enable_bit)) == 0)
607 		return;
608 
609 	printk(KERN_INFO "Disabling unused clock \"%s\"... ", clk->name);
610 	clk->ops->disable(clk);
611 	printk(" done\n");
612 }
613 
614 #endif
615 
616 
617 int clk_enable(struct clk *clk)
618 {
619 	unsigned long flags;
620 	int ret;
621 
622 	if (clk == NULL || IS_ERR(clk))
623 		return -EINVAL;
624 
625 	spin_lock_irqsave(&clockfw_lock, flags);
626 	ret = omap1_clk_enable(clk);
627 	spin_unlock_irqrestore(&clockfw_lock, flags);
628 
629 	return ret;
630 }
631 EXPORT_SYMBOL(clk_enable);
632 
633 void clk_disable(struct clk *clk)
634 {
635 	unsigned long flags;
636 
637 	if (clk == NULL || IS_ERR(clk))
638 		return;
639 
640 	spin_lock_irqsave(&clockfw_lock, flags);
641 	if (clk->usecount == 0) {
642 		pr_err("Trying disable clock %s with 0 usecount\n",
643 		       clk->name);
644 		WARN_ON(1);
645 		goto out;
646 	}
647 
648 	omap1_clk_disable(clk);
649 
650 out:
651 	spin_unlock_irqrestore(&clockfw_lock, flags);
652 }
653 EXPORT_SYMBOL(clk_disable);
654 
655 unsigned long clk_get_rate(struct clk *clk)
656 {
657 	unsigned long flags;
658 	unsigned long ret;
659 
660 	if (clk == NULL || IS_ERR(clk))
661 		return 0;
662 
663 	spin_lock_irqsave(&clockfw_lock, flags);
664 	ret = clk->rate;
665 	spin_unlock_irqrestore(&clockfw_lock, flags);
666 
667 	return ret;
668 }
669 EXPORT_SYMBOL(clk_get_rate);
670 
671 /*
672  * Optional clock functions defined in include/linux/clk.h
673  */
674 
675 long clk_round_rate(struct clk *clk, unsigned long rate)
676 {
677 	unsigned long flags;
678 	long ret;
679 
680 	if (clk == NULL || IS_ERR(clk))
681 		return 0;
682 
683 	spin_lock_irqsave(&clockfw_lock, flags);
684 	ret = omap1_clk_round_rate(clk, rate);
685 	spin_unlock_irqrestore(&clockfw_lock, flags);
686 
687 	return ret;
688 }
689 EXPORT_SYMBOL(clk_round_rate);
690 
691 int clk_set_rate(struct clk *clk, unsigned long rate)
692 {
693 	unsigned long flags;
694 	int ret = -EINVAL;
695 
696 	if (clk == NULL || IS_ERR(clk))
697 		return ret;
698 
699 	spin_lock_irqsave(&clockfw_lock, flags);
700 	ret = omap1_clk_set_rate(clk, rate);
701 	if (ret == 0)
702 		propagate_rate(clk);
703 	spin_unlock_irqrestore(&clockfw_lock, flags);
704 
705 	return ret;
706 }
707 EXPORT_SYMBOL(clk_set_rate);
708 
709 int clk_set_parent(struct clk *clk, struct clk *parent)
710 {
711 	WARN_ONCE(1, "clk_set_parent() not implemented for OMAP1\n");
712 
713 	return -EINVAL;
714 }
715 EXPORT_SYMBOL(clk_set_parent);
716 
717 struct clk *clk_get_parent(struct clk *clk)
718 {
719 	return clk->parent;
720 }
721 EXPORT_SYMBOL(clk_get_parent);
722 
723 /*
724  * OMAP specific clock functions shared between omap1 and omap2
725  */
726 
727 int __initdata mpurate;
728 
729 /*
730  * By default we use the rate set by the bootloader.
731  * You can override this with mpurate= cmdline option.
732  */
733 static int __init omap_clk_setup(char *str)
734 {
735 	get_option(&str, &mpurate);
736 
737 	if (!mpurate)
738 		return 1;
739 
740 	if (mpurate < 1000)
741 		mpurate *= 1000000;
742 
743 	return 1;
744 }
745 __setup("mpurate=", omap_clk_setup);
746 
747 /* Used for clocks that always have same value as the parent clock */
748 unsigned long followparent_recalc(struct clk *clk)
749 {
750 	return clk->parent->rate;
751 }
752 
753 /*
754  * Used for clocks that have the same value as the parent clock,
755  * divided by some factor
756  */
757 unsigned long omap_fixed_divisor_recalc(struct clk *clk)
758 {
759 	WARN_ON(!clk->fixed_div);
760 
761 	return clk->parent->rate / clk->fixed_div;
762 }
763 
764 void clk_reparent(struct clk *child, struct clk *parent)
765 {
766 	list_del_init(&child->sibling);
767 	if (parent)
768 		list_add(&child->sibling, &parent->children);
769 	child->parent = parent;
770 
771 	/* now do the debugfs renaming to reattach the child
772 	   to the proper parent */
773 }
774 
775 /* Propagate rate to children */
776 void propagate_rate(struct clk *tclk)
777 {
778 	struct clk *clkp;
779 
780 	list_for_each_entry(clkp, &tclk->children, sibling) {
781 		if (clkp->recalc)
782 			clkp->rate = clkp->recalc(clkp);
783 		propagate_rate(clkp);
784 	}
785 }
786 
787 static LIST_HEAD(root_clks);
788 
789 /**
790  * recalculate_root_clocks - recalculate and propagate all root clocks
791  *
792  * Recalculates all root clocks (clocks with no parent), which if the
793  * clock's .recalc is set correctly, should also propagate their rates.
794  * Called at init.
795  */
796 void recalculate_root_clocks(void)
797 {
798 	struct clk *clkp;
799 
800 	list_for_each_entry(clkp, &root_clks, sibling) {
801 		if (clkp->recalc)
802 			clkp->rate = clkp->recalc(clkp);
803 		propagate_rate(clkp);
804 	}
805 }
806 
807 /**
808  * clk_preinit - initialize any fields in the struct clk before clk init
809  * @clk: struct clk * to initialize
810  *
811  * Initialize any struct clk fields needed before normal clk initialization
812  * can run.  No return value.
813  */
814 void clk_preinit(struct clk *clk)
815 {
816 	INIT_LIST_HEAD(&clk->children);
817 }
818 
819 int clk_register(struct clk *clk)
820 {
821 	if (clk == NULL || IS_ERR(clk))
822 		return -EINVAL;
823 
824 	/*
825 	 * trap out already registered clocks
826 	 */
827 	if (clk->node.next || clk->node.prev)
828 		return 0;
829 
830 	mutex_lock(&clocks_mutex);
831 	if (clk->parent)
832 		list_add(&clk->sibling, &clk->parent->children);
833 	else
834 		list_add(&clk->sibling, &root_clks);
835 
836 	list_add(&clk->node, &clocks);
837 	if (clk->init)
838 		clk->init(clk);
839 	mutex_unlock(&clocks_mutex);
840 
841 	return 0;
842 }
843 EXPORT_SYMBOL(clk_register);
844 
845 void clk_unregister(struct clk *clk)
846 {
847 	if (clk == NULL || IS_ERR(clk))
848 		return;
849 
850 	mutex_lock(&clocks_mutex);
851 	list_del(&clk->sibling);
852 	list_del(&clk->node);
853 	mutex_unlock(&clocks_mutex);
854 }
855 EXPORT_SYMBOL(clk_unregister);
856 
857 void clk_enable_init_clocks(void)
858 {
859 	struct clk *clkp;
860 
861 	list_for_each_entry(clkp, &clocks, node)
862 		if (clkp->flags & ENABLE_ON_INIT)
863 			clk_enable(clkp);
864 }
865 
866 /**
867  * omap_clk_get_by_name - locate OMAP struct clk by its name
868  * @name: name of the struct clk to locate
869  *
870  * Locate an OMAP struct clk by its name.  Assumes that struct clk
871  * names are unique.  Returns NULL if not found or a pointer to the
872  * struct clk if found.
873  */
874 struct clk *omap_clk_get_by_name(const char *name)
875 {
876 	struct clk *c;
877 	struct clk *ret = NULL;
878 
879 	mutex_lock(&clocks_mutex);
880 
881 	list_for_each_entry(c, &clocks, node) {
882 		if (!strcmp(c->name, name)) {
883 			ret = c;
884 			break;
885 		}
886 	}
887 
888 	mutex_unlock(&clocks_mutex);
889 
890 	return ret;
891 }
892 
893 int omap_clk_enable_autoidle_all(void)
894 {
895 	struct clk *c;
896 	unsigned long flags;
897 
898 	spin_lock_irqsave(&clockfw_lock, flags);
899 
900 	list_for_each_entry(c, &clocks, node)
901 		if (c->ops->allow_idle)
902 			c->ops->allow_idle(c);
903 
904 	spin_unlock_irqrestore(&clockfw_lock, flags);
905 
906 	return 0;
907 }
908 
909 int omap_clk_disable_autoidle_all(void)
910 {
911 	struct clk *c;
912 	unsigned long flags;
913 
914 	spin_lock_irqsave(&clockfw_lock, flags);
915 
916 	list_for_each_entry(c, &clocks, node)
917 		if (c->ops->deny_idle)
918 			c->ops->deny_idle(c);
919 
920 	spin_unlock_irqrestore(&clockfw_lock, flags);
921 
922 	return 0;
923 }
924 
925 /*
926  * Low level helpers
927  */
928 static int clkll_enable_null(struct clk *clk)
929 {
930 	return 0;
931 }
932 
933 static void clkll_disable_null(struct clk *clk)
934 {
935 }
936 
937 const struct clkops clkops_null = {
938 	.enable		= clkll_enable_null,
939 	.disable	= clkll_disable_null,
940 };
941 
942 /*
943  * Dummy clock
944  *
945  * Used for clock aliases that are needed on some OMAPs, but not others
946  */
947 struct clk dummy_ck = {
948 	.name	= "dummy",
949 	.ops	= &clkops_null,
950 };
951 
952 /*
953  *
954  */
955 
956 #ifdef CONFIG_OMAP_RESET_CLOCKS
957 /*
958  * Disable any unused clocks left on by the bootloader
959  */
960 static int __init clk_disable_unused(void)
961 {
962 	struct clk *ck;
963 	unsigned long flags;
964 
965 	pr_info("clock: disabling unused clocks to save power\n");
966 
967 	spin_lock_irqsave(&clockfw_lock, flags);
968 	list_for_each_entry(ck, &clocks, node) {
969 		if (ck->ops == &clkops_null)
970 			continue;
971 
972 		if (ck->usecount > 0 || !ck->enable_reg)
973 			continue;
974 
975 		omap1_clk_disable_unused(ck);
976 	}
977 	spin_unlock_irqrestore(&clockfw_lock, flags);
978 
979 	return 0;
980 }
981 late_initcall(clk_disable_unused);
982 late_initcall(omap_clk_enable_autoidle_all);
983 #endif
984 
985 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
986 /*
987  *	debugfs support to trace clock tree hierarchy and attributes
988  */
989 
990 #include <linux/debugfs.h>
991 #include <linux/seq_file.h>
992 
993 static struct dentry *clk_debugfs_root;
994 
995 static int clk_dbg_show_summary(struct seq_file *s, void *unused)
996 {
997 	struct clk *c;
998 	struct clk *pa;
999 
1000 	mutex_lock(&clocks_mutex);
1001 	seq_printf(s, "%-30s %-30s %-10s %s\n",
1002 		   "clock-name", "parent-name", "rate", "use-count");
1003 
1004 	list_for_each_entry(c, &clocks, node) {
1005 		pa = c->parent;
1006 		seq_printf(s, "%-30s %-30s %-10lu %d\n",
1007 			   c->name, pa ? pa->name : "none", c->rate,
1008 			   c->usecount);
1009 	}
1010 	mutex_unlock(&clocks_mutex);
1011 
1012 	return 0;
1013 }
1014 
1015 static int clk_dbg_open(struct inode *inode, struct file *file)
1016 {
1017 	return single_open(file, clk_dbg_show_summary, inode->i_private);
1018 }
1019 
1020 static const struct file_operations debug_clock_fops = {
1021 	.open           = clk_dbg_open,
1022 	.read           = seq_read,
1023 	.llseek         = seq_lseek,
1024 	.release        = single_release,
1025 };
1026 
1027 static int clk_debugfs_register_one(struct clk *c)
1028 {
1029 	int err;
1030 	struct dentry *d;
1031 	struct clk *pa = c->parent;
1032 
1033 	d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
1034 	if (!d)
1035 		return -ENOMEM;
1036 	c->dent = d;
1037 
1038 	d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
1039 	if (!d) {
1040 		err = -ENOMEM;
1041 		goto err_out;
1042 	}
1043 	d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
1044 	if (!d) {
1045 		err = -ENOMEM;
1046 		goto err_out;
1047 	}
1048 	d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1049 	if (!d) {
1050 		err = -ENOMEM;
1051 		goto err_out;
1052 	}
1053 	return 0;
1054 
1055 err_out:
1056 	debugfs_remove_recursive(c->dent);
1057 	return err;
1058 }
1059 
1060 static int clk_debugfs_register(struct clk *c)
1061 {
1062 	int err;
1063 	struct clk *pa = c->parent;
1064 
1065 	if (pa && !pa->dent) {
1066 		err = clk_debugfs_register(pa);
1067 		if (err)
1068 			return err;
1069 	}
1070 
1071 	if (!c->dent) {
1072 		err = clk_debugfs_register_one(c);
1073 		if (err)
1074 			return err;
1075 	}
1076 	return 0;
1077 }
1078 
1079 static int __init clk_debugfs_init(void)
1080 {
1081 	struct clk *c;
1082 	struct dentry *d;
1083 	int err;
1084 
1085 	d = debugfs_create_dir("clock", NULL);
1086 	if (!d)
1087 		return -ENOMEM;
1088 	clk_debugfs_root = d;
1089 
1090 	list_for_each_entry(c, &clocks, node) {
1091 		err = clk_debugfs_register(c);
1092 		if (err)
1093 			goto err_out;
1094 	}
1095 
1096 	d = debugfs_create_file("summary", S_IRUGO,
1097 		d, NULL, &debug_clock_fops);
1098 	if (!d)
1099 		return -ENOMEM;
1100 
1101 	return 0;
1102 err_out:
1103 	debugfs_remove_recursive(clk_debugfs_root);
1104 	return err;
1105 }
1106 late_initcall(clk_debugfs_init);
1107 
1108 #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
1109