xref: /openbmc/linux/drivers/sh/clk/core.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 /*
2  * SuperH clock framework
3  *
4  *  Copyright (C) 2005 - 2010  Paul Mundt
5  *
6  * This clock framework is derived from the OMAP version by:
7  *
8  *	Copyright (C) 2004 - 2008 Nokia Corporation
9  *	Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10  *
11  *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12  *
13  * This file is subject to the terms and conditions of the GNU General Public
14  * License.  See the file "COPYING" in the main directory of this archive
15  * for more details.
16  */
17 #define pr_fmt(fmt) "clock: " fmt
18 
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/list.h>
24 #include <linux/kobject.h>
25 #include <linux/sysdev.h>
26 #include <linux/seq_file.h>
27 #include <linux/err.h>
28 #include <linux/io.h>
29 #include <linux/debugfs.h>
30 #include <linux/cpufreq.h>
31 #include <linux/clk.h>
32 #include <linux/sh_clk.h>
33 
34 static LIST_HEAD(clock_list);
35 static DEFINE_SPINLOCK(clock_lock);
36 static DEFINE_MUTEX(clock_list_sem);
37 
38 void clk_rate_table_build(struct clk *clk,
39 			  struct cpufreq_frequency_table *freq_table,
40 			  int nr_freqs,
41 			  struct clk_div_mult_table *src_table,
42 			  unsigned long *bitmap)
43 {
44 	unsigned long mult, div;
45 	unsigned long freq;
46 	int i;
47 
48 	clk->nr_freqs = nr_freqs;
49 
50 	for (i = 0; i < nr_freqs; i++) {
51 		div = 1;
52 		mult = 1;
53 
54 		if (src_table->divisors && i < src_table->nr_divisors)
55 			div = src_table->divisors[i];
56 
57 		if (src_table->multipliers && i < src_table->nr_multipliers)
58 			mult = src_table->multipliers[i];
59 
60 		if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
61 			freq = CPUFREQ_ENTRY_INVALID;
62 		else
63 			freq = clk->parent->rate * mult / div;
64 
65 		freq_table[i].index = i;
66 		freq_table[i].frequency = freq;
67 	}
68 
69 	/* Termination entry */
70 	freq_table[i].index = i;
71 	freq_table[i].frequency = CPUFREQ_TABLE_END;
72 }
73 
74 struct clk_rate_round_data;
75 
76 struct clk_rate_round_data {
77 	unsigned long rate;
78 	unsigned int min, max;
79 	long (*func)(unsigned int, struct clk_rate_round_data *);
80 	void *arg;
81 };
82 
83 #define for_each_frequency(pos, r, freq)			\
84 	for (pos = r->min, freq = r->func(pos, r);		\
85 	     pos <= r->max; pos++, freq = r->func(pos, r))	\
86 		if (unlikely(freq == 0))			\
87 			;					\
88 		else
89 
90 static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
91 {
92 	unsigned long rate_error, rate_error_prev = ~0UL;
93 	unsigned long highest, lowest, freq;
94 	long rate_best_fit = -ENOENT;
95 	int i;
96 
97 	highest = 0;
98 	lowest = ~0UL;
99 
100 	for_each_frequency(i, rounder, freq) {
101 		if (freq > highest)
102 			highest = freq;
103 		if (freq < lowest)
104 			lowest = freq;
105 
106 		rate_error = abs(freq - rounder->rate);
107 		if (rate_error < rate_error_prev) {
108 			rate_best_fit = freq;
109 			rate_error_prev = rate_error;
110 		}
111 
112 		if (rate_error == 0)
113 			break;
114 	}
115 
116 	if (rounder->rate >= highest)
117 		rate_best_fit = highest;
118 	if (rounder->rate <= lowest)
119 		rate_best_fit = lowest;
120 
121 	return rate_best_fit;
122 }
123 
124 static long clk_rate_table_iter(unsigned int pos,
125 				struct clk_rate_round_data *rounder)
126 {
127 	struct cpufreq_frequency_table *freq_table = rounder->arg;
128 	unsigned long freq = freq_table[pos].frequency;
129 
130 	if (freq == CPUFREQ_ENTRY_INVALID)
131 		freq = 0;
132 
133 	return freq;
134 }
135 
136 long clk_rate_table_round(struct clk *clk,
137 			  struct cpufreq_frequency_table *freq_table,
138 			  unsigned long rate)
139 {
140 	struct clk_rate_round_data table_round = {
141 		.min	= 0,
142 		.max	= clk->nr_freqs - 1,
143 		.func	= clk_rate_table_iter,
144 		.arg	= freq_table,
145 		.rate	= rate,
146 	};
147 
148 	if (clk->nr_freqs < 1)
149 		return -ENOSYS;
150 
151 	return clk_rate_round_helper(&table_round);
152 }
153 
154 static long clk_rate_div_range_iter(unsigned int pos,
155 				    struct clk_rate_round_data *rounder)
156 {
157 	return clk_get_rate(rounder->arg) / pos;
158 }
159 
160 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
161 			      unsigned int div_max, unsigned long rate)
162 {
163 	struct clk_rate_round_data div_range_round = {
164 		.min	= div_min,
165 		.max	= div_max,
166 		.func	= clk_rate_div_range_iter,
167 		.arg	= clk_get_parent(clk),
168 		.rate	= rate,
169 	};
170 
171 	return clk_rate_round_helper(&div_range_round);
172 }
173 
174 int clk_rate_table_find(struct clk *clk,
175 			struct cpufreq_frequency_table *freq_table,
176 			unsigned long rate)
177 {
178 	int i;
179 
180 	for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
181 		unsigned long freq = freq_table[i].frequency;
182 
183 		if (freq == CPUFREQ_ENTRY_INVALID)
184 			continue;
185 
186 		if (freq == rate)
187 			return i;
188 	}
189 
190 	return -ENOENT;
191 }
192 
193 /* Used for clocks that always have same value as the parent clock */
194 unsigned long followparent_recalc(struct clk *clk)
195 {
196 	return clk->parent ? clk->parent->rate : 0;
197 }
198 
199 int clk_reparent(struct clk *child, struct clk *parent)
200 {
201 	list_del_init(&child->sibling);
202 	if (parent)
203 		list_add(&child->sibling, &parent->children);
204 	child->parent = parent;
205 
206 	/* now do the debugfs renaming to reattach the child
207 	   to the proper parent */
208 
209 	return 0;
210 }
211 
212 /* Propagate rate to children */
213 void propagate_rate(struct clk *tclk)
214 {
215 	struct clk *clkp;
216 
217 	list_for_each_entry(clkp, &tclk->children, sibling) {
218 		if (clkp->ops && clkp->ops->recalc)
219 			clkp->rate = clkp->ops->recalc(clkp);
220 
221 		propagate_rate(clkp);
222 	}
223 }
224 
225 static void __clk_disable(struct clk *clk)
226 {
227 	if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
228 		 clk))
229 		return;
230 
231 	if (!(--clk->usecount)) {
232 		if (likely(clk->ops && clk->ops->disable))
233 			clk->ops->disable(clk);
234 		if (likely(clk->parent))
235 			__clk_disable(clk->parent);
236 	}
237 }
238 
239 void clk_disable(struct clk *clk)
240 {
241 	unsigned long flags;
242 
243 	if (!clk)
244 		return;
245 
246 	spin_lock_irqsave(&clock_lock, flags);
247 	__clk_disable(clk);
248 	spin_unlock_irqrestore(&clock_lock, flags);
249 }
250 EXPORT_SYMBOL_GPL(clk_disable);
251 
252 static int __clk_enable(struct clk *clk)
253 {
254 	int ret = 0;
255 
256 	if (clk->usecount++ == 0) {
257 		if (clk->parent) {
258 			ret = __clk_enable(clk->parent);
259 			if (unlikely(ret))
260 				goto err;
261 		}
262 
263 		if (clk->ops && clk->ops->enable) {
264 			ret = clk->ops->enable(clk);
265 			if (ret) {
266 				if (clk->parent)
267 					__clk_disable(clk->parent);
268 				goto err;
269 			}
270 		}
271 	}
272 
273 	return ret;
274 err:
275 	clk->usecount--;
276 	return ret;
277 }
278 
279 int clk_enable(struct clk *clk)
280 {
281 	unsigned long flags;
282 	int ret;
283 
284 	if (!clk)
285 		return -EINVAL;
286 
287 	spin_lock_irqsave(&clock_lock, flags);
288 	ret = __clk_enable(clk);
289 	spin_unlock_irqrestore(&clock_lock, flags);
290 
291 	return ret;
292 }
293 EXPORT_SYMBOL_GPL(clk_enable);
294 
295 static LIST_HEAD(root_clks);
296 
297 /**
298  * recalculate_root_clocks - recalculate and propagate all root clocks
299  *
300  * Recalculates all root clocks (clocks with no parent), which if the
301  * clock's .recalc is set correctly, should also propagate their rates.
302  * Called at init.
303  */
304 void recalculate_root_clocks(void)
305 {
306 	struct clk *clkp;
307 
308 	list_for_each_entry(clkp, &root_clks, sibling) {
309 		if (clkp->ops && clkp->ops->recalc)
310 			clkp->rate = clkp->ops->recalc(clkp);
311 		propagate_rate(clkp);
312 	}
313 }
314 
315 static struct clk_mapping dummy_mapping;
316 
317 static struct clk *lookup_root_clock(struct clk *clk)
318 {
319 	while (clk->parent)
320 		clk = clk->parent;
321 
322 	return clk;
323 }
324 
325 static int clk_establish_mapping(struct clk *clk)
326 {
327 	struct clk_mapping *mapping = clk->mapping;
328 
329 	/*
330 	 * Propagate mappings.
331 	 */
332 	if (!mapping) {
333 		struct clk *clkp;
334 
335 		/*
336 		 * dummy mapping for root clocks with no specified ranges
337 		 */
338 		if (!clk->parent) {
339 			clk->mapping = &dummy_mapping;
340 			return 0;
341 		}
342 
343 		/*
344 		 * If we're on a child clock and it provides no mapping of its
345 		 * own, inherit the mapping from its root clock.
346 		 */
347 		clkp = lookup_root_clock(clk);
348 		mapping = clkp->mapping;
349 		BUG_ON(!mapping);
350 	}
351 
352 	/*
353 	 * Establish initial mapping.
354 	 */
355 	if (!mapping->base && mapping->phys) {
356 		kref_init(&mapping->ref);
357 
358 		mapping->base = ioremap_nocache(mapping->phys, mapping->len);
359 		if (unlikely(!mapping->base))
360 			return -ENXIO;
361 	} else if (mapping->base) {
362 		/*
363 		 * Bump the refcount for an existing mapping
364 		 */
365 		kref_get(&mapping->ref);
366 	}
367 
368 	clk->mapping = mapping;
369 	return 0;
370 }
371 
372 static void clk_destroy_mapping(struct kref *kref)
373 {
374 	struct clk_mapping *mapping;
375 
376 	mapping = container_of(kref, struct clk_mapping, ref);
377 
378 	iounmap(mapping->base);
379 }
380 
381 static void clk_teardown_mapping(struct clk *clk)
382 {
383 	struct clk_mapping *mapping = clk->mapping;
384 
385 	/* Nothing to do */
386 	if (mapping == &dummy_mapping)
387 		return;
388 
389 	kref_put(&mapping->ref, clk_destroy_mapping);
390 	clk->mapping = NULL;
391 }
392 
393 int clk_register(struct clk *clk)
394 {
395 	int ret;
396 
397 	if (clk == NULL || IS_ERR(clk))
398 		return -EINVAL;
399 
400 	/*
401 	 * trap out already registered clocks
402 	 */
403 	if (clk->node.next || clk->node.prev)
404 		return 0;
405 
406 	mutex_lock(&clock_list_sem);
407 
408 	INIT_LIST_HEAD(&clk->children);
409 	clk->usecount = 0;
410 
411 	ret = clk_establish_mapping(clk);
412 	if (unlikely(ret))
413 		goto out_unlock;
414 
415 	if (clk->parent)
416 		list_add(&clk->sibling, &clk->parent->children);
417 	else
418 		list_add(&clk->sibling, &root_clks);
419 
420 	list_add(&clk->node, &clock_list);
421 
422 #ifdef CONFIG_SH_CLK_CPG_LEGACY
423 	if (clk->ops && clk->ops->init)
424 		clk->ops->init(clk);
425 #endif
426 
427 out_unlock:
428 	mutex_unlock(&clock_list_sem);
429 
430 	return ret;
431 }
432 EXPORT_SYMBOL_GPL(clk_register);
433 
434 void clk_unregister(struct clk *clk)
435 {
436 	mutex_lock(&clock_list_sem);
437 	list_del(&clk->sibling);
438 	list_del(&clk->node);
439 	clk_teardown_mapping(clk);
440 	mutex_unlock(&clock_list_sem);
441 }
442 EXPORT_SYMBOL_GPL(clk_unregister);
443 
444 void clk_enable_init_clocks(void)
445 {
446 	struct clk *clkp;
447 
448 	list_for_each_entry(clkp, &clock_list, node)
449 		if (clkp->flags & CLK_ENABLE_ON_INIT)
450 			clk_enable(clkp);
451 }
452 
453 unsigned long clk_get_rate(struct clk *clk)
454 {
455 	return clk->rate;
456 }
457 EXPORT_SYMBOL_GPL(clk_get_rate);
458 
459 int clk_set_rate(struct clk *clk, unsigned long rate)
460 {
461 	int ret = -EOPNOTSUPP;
462 	unsigned long flags;
463 
464 	spin_lock_irqsave(&clock_lock, flags);
465 
466 	if (likely(clk->ops && clk->ops->set_rate)) {
467 		ret = clk->ops->set_rate(clk, rate);
468 		if (ret != 0)
469 			goto out_unlock;
470 	} else {
471 		clk->rate = rate;
472 		ret = 0;
473 	}
474 
475 	if (clk->ops && clk->ops->recalc)
476 		clk->rate = clk->ops->recalc(clk);
477 
478 	propagate_rate(clk);
479 
480 out_unlock:
481 	spin_unlock_irqrestore(&clock_lock, flags);
482 
483 	return ret;
484 }
485 EXPORT_SYMBOL_GPL(clk_set_rate);
486 
487 int clk_set_parent(struct clk *clk, struct clk *parent)
488 {
489 	unsigned long flags;
490 	int ret = -EINVAL;
491 
492 	if (!parent || !clk)
493 		return ret;
494 	if (clk->parent == parent)
495 		return 0;
496 
497 	spin_lock_irqsave(&clock_lock, flags);
498 	if (clk->usecount == 0) {
499 		if (clk->ops->set_parent)
500 			ret = clk->ops->set_parent(clk, parent);
501 		else
502 			ret = clk_reparent(clk, parent);
503 
504 		if (ret == 0) {
505 			if (clk->ops->recalc)
506 				clk->rate = clk->ops->recalc(clk);
507 			pr_debug("set parent of %p to %p (new rate %ld)\n",
508 				 clk, clk->parent, clk->rate);
509 			propagate_rate(clk);
510 		}
511 	} else
512 		ret = -EBUSY;
513 	spin_unlock_irqrestore(&clock_lock, flags);
514 
515 	return ret;
516 }
517 EXPORT_SYMBOL_GPL(clk_set_parent);
518 
519 struct clk *clk_get_parent(struct clk *clk)
520 {
521 	return clk->parent;
522 }
523 EXPORT_SYMBOL_GPL(clk_get_parent);
524 
525 long clk_round_rate(struct clk *clk, unsigned long rate)
526 {
527 	if (likely(clk->ops && clk->ops->round_rate)) {
528 		unsigned long flags, rounded;
529 
530 		spin_lock_irqsave(&clock_lock, flags);
531 		rounded = clk->ops->round_rate(clk, rate);
532 		spin_unlock_irqrestore(&clock_lock, flags);
533 
534 		return rounded;
535 	}
536 
537 	return clk_get_rate(clk);
538 }
539 EXPORT_SYMBOL_GPL(clk_round_rate);
540 
541 long clk_round_parent(struct clk *clk, unsigned long target,
542 		      unsigned long *best_freq, unsigned long *parent_freq,
543 		      unsigned int div_min, unsigned int div_max)
544 {
545 	struct cpufreq_frequency_table *freq, *best = NULL;
546 	unsigned long error = ULONG_MAX, freq_high, freq_low, div;
547 	struct clk *parent = clk_get_parent(clk);
548 
549 	if (!parent) {
550 		*parent_freq = 0;
551 		*best_freq = clk_round_rate(clk, target);
552 		return abs(target - *best_freq);
553 	}
554 
555 	for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
556 	     freq++) {
557 		if (freq->frequency == CPUFREQ_ENTRY_INVALID)
558 			continue;
559 
560 		if (unlikely(freq->frequency / target <= div_min - 1)) {
561 			unsigned long freq_max;
562 
563 			freq_max = (freq->frequency + div_min / 2) / div_min;
564 			if (error > target - freq_max) {
565 				error = target - freq_max;
566 				best = freq;
567 				if (best_freq)
568 					*best_freq = freq_max;
569 			}
570 
571 			pr_debug("too low freq %u, error %lu\n", freq->frequency,
572 				 target - freq_max);
573 
574 			if (!error)
575 				break;
576 
577 			continue;
578 		}
579 
580 		if (unlikely(freq->frequency / target >= div_max)) {
581 			unsigned long freq_min;
582 
583 			freq_min = (freq->frequency + div_max / 2) / div_max;
584 			if (error > freq_min - target) {
585 				error = freq_min - target;
586 				best = freq;
587 				if (best_freq)
588 					*best_freq = freq_min;
589 			}
590 
591 			pr_debug("too high freq %u, error %lu\n", freq->frequency,
592 				 freq_min - target);
593 
594 			if (!error)
595 				break;
596 
597 			continue;
598 		}
599 
600 		div = freq->frequency / target;
601 		freq_high = freq->frequency / div;
602 		freq_low = freq->frequency / (div + 1);
603 
604 		if (freq_high - target < error) {
605 			error = freq_high - target;
606 			best = freq;
607 			if (best_freq)
608 				*best_freq = freq_high;
609 		}
610 
611 		if (target - freq_low < error) {
612 			error = target - freq_low;
613 			best = freq;
614 			if (best_freq)
615 				*best_freq = freq_low;
616 		}
617 
618 		pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
619 			 freq->frequency, div, freq_high, div + 1, freq_low,
620 			 *best_freq, best->frequency);
621 
622 		if (!error)
623 			break;
624 	}
625 
626 	if (parent_freq)
627 		*parent_freq = best->frequency;
628 
629 	return error;
630 }
631 EXPORT_SYMBOL_GPL(clk_round_parent);
632 
633 #ifdef CONFIG_PM
634 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
635 {
636 	static pm_message_t prev_state;
637 	struct clk *clkp;
638 
639 	switch (state.event) {
640 	case PM_EVENT_ON:
641 		/* Resumeing from hibernation */
642 		if (prev_state.event != PM_EVENT_FREEZE)
643 			break;
644 
645 		list_for_each_entry(clkp, &clock_list, node) {
646 			if (likely(clkp->ops)) {
647 				unsigned long rate = clkp->rate;
648 
649 				if (likely(clkp->ops->set_parent))
650 					clkp->ops->set_parent(clkp,
651 						clkp->parent);
652 				if (likely(clkp->ops->set_rate))
653 					clkp->ops->set_rate(clkp, rate);
654 				else if (likely(clkp->ops->recalc))
655 					clkp->rate = clkp->ops->recalc(clkp);
656 			}
657 		}
658 		break;
659 	case PM_EVENT_FREEZE:
660 		break;
661 	case PM_EVENT_SUSPEND:
662 		break;
663 	}
664 
665 	prev_state = state;
666 	return 0;
667 }
668 
669 static int clks_sysdev_resume(struct sys_device *dev)
670 {
671 	return clks_sysdev_suspend(dev, PMSG_ON);
672 }
673 
674 static struct sysdev_class clks_sysdev_class = {
675 	.name = "clks",
676 };
677 
678 static struct sysdev_driver clks_sysdev_driver = {
679 	.suspend = clks_sysdev_suspend,
680 	.resume = clks_sysdev_resume,
681 };
682 
683 static struct sys_device clks_sysdev_dev = {
684 	.cls = &clks_sysdev_class,
685 };
686 
687 static int __init clk_sysdev_init(void)
688 {
689 	sysdev_class_register(&clks_sysdev_class);
690 	sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
691 	sysdev_register(&clks_sysdev_dev);
692 
693 	return 0;
694 }
695 subsys_initcall(clk_sysdev_init);
696 #endif
697 
698 /*
699  *	debugfs support to trace clock tree hierarchy and attributes
700  */
701 static struct dentry *clk_debugfs_root;
702 
703 static int clk_debugfs_register_one(struct clk *c)
704 {
705 	int err;
706 	struct dentry *d, *child, *child_tmp;
707 	struct clk *pa = c->parent;
708 	char s[255];
709 	char *p = s;
710 
711 	p += sprintf(p, "%p", c);
712 	d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
713 	if (!d)
714 		return -ENOMEM;
715 	c->dentry = d;
716 
717 	d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
718 	if (!d) {
719 		err = -ENOMEM;
720 		goto err_out;
721 	}
722 	d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
723 	if (!d) {
724 		err = -ENOMEM;
725 		goto err_out;
726 	}
727 	d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
728 	if (!d) {
729 		err = -ENOMEM;
730 		goto err_out;
731 	}
732 	return 0;
733 
734 err_out:
735 	d = c->dentry;
736 	list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
737 		debugfs_remove(child);
738 	debugfs_remove(c->dentry);
739 	return err;
740 }
741 
742 static int clk_debugfs_register(struct clk *c)
743 {
744 	int err;
745 	struct clk *pa = c->parent;
746 
747 	if (pa && !pa->dentry) {
748 		err = clk_debugfs_register(pa);
749 		if (err)
750 			return err;
751 	}
752 
753 	if (!c->dentry) {
754 		err = clk_debugfs_register_one(c);
755 		if (err)
756 			return err;
757 	}
758 	return 0;
759 }
760 
761 static int __init clk_debugfs_init(void)
762 {
763 	struct clk *c;
764 	struct dentry *d;
765 	int err;
766 
767 	d = debugfs_create_dir("clock", NULL);
768 	if (!d)
769 		return -ENOMEM;
770 	clk_debugfs_root = d;
771 
772 	list_for_each_entry(c, &clock_list, node) {
773 		err = clk_debugfs_register(c);
774 		if (err)
775 			goto err_out;
776 	}
777 	return 0;
778 err_out:
779 	debugfs_remove_recursive(clk_debugfs_root);
780 	return err;
781 }
782 late_initcall(clk_debugfs_init);
783