xref: /openbmc/linux/drivers/sh/clk/cpg.c (revision f7018c21)
1 /*
2  * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3  *
4  *  Copyright (C) 2010  Magnus Damm
5  *  Copyright (C) 2010 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16 
17 #define CPG_CKSTP_BIT	BIT(8)
18 
19 static unsigned int sh_clk_read(struct clk *clk)
20 {
21 	if (clk->flags & CLK_ENABLE_REG_8BIT)
22 		return ioread8(clk->mapped_reg);
23 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
24 		return ioread16(clk->mapped_reg);
25 
26 	return ioread32(clk->mapped_reg);
27 }
28 
29 static void sh_clk_write(int value, struct clk *clk)
30 {
31 	if (clk->flags & CLK_ENABLE_REG_8BIT)
32 		iowrite8(value, clk->mapped_reg);
33 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
34 		iowrite16(value, clk->mapped_reg);
35 	else
36 		iowrite32(value, clk->mapped_reg);
37 }
38 
39 static unsigned int r8(const void __iomem *addr)
40 {
41 	return ioread8(addr);
42 }
43 
44 static unsigned int r16(const void __iomem *addr)
45 {
46 	return ioread16(addr);
47 }
48 
49 static unsigned int r32(const void __iomem *addr)
50 {
51 	return ioread32(addr);
52 }
53 
54 static int sh_clk_mstp_enable(struct clk *clk)
55 {
56 	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
57 	if (clk->status_reg) {
58 		unsigned int (*read)(const void __iomem *addr);
59 		int i;
60 		void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
61 			(phys_addr_t)clk->enable_reg + clk->mapped_reg;
62 
63 		if (clk->flags & CLK_ENABLE_REG_8BIT)
64 			read = r8;
65 		else if (clk->flags & CLK_ENABLE_REG_16BIT)
66 			read = r16;
67 		else
68 			read = r32;
69 
70 		for (i = 1000;
71 		     (read(mapped_status) & (1 << clk->enable_bit)) && i;
72 		     i--)
73 			cpu_relax();
74 		if (!i) {
75 			pr_err("cpg: failed to enable %p[%d]\n",
76 			       clk->enable_reg, clk->enable_bit);
77 			return -ETIMEDOUT;
78 		}
79 	}
80 	return 0;
81 }
82 
83 static void sh_clk_mstp_disable(struct clk *clk)
84 {
85 	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
86 }
87 
88 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
89 	.enable		= sh_clk_mstp_enable,
90 	.disable	= sh_clk_mstp_disable,
91 	.recalc		= followparent_recalc,
92 };
93 
94 int __init sh_clk_mstp_register(struct clk *clks, int nr)
95 {
96 	struct clk *clkp;
97 	int ret = 0;
98 	int k;
99 
100 	for (k = 0; !ret && (k < nr); k++) {
101 		clkp = clks + k;
102 		clkp->ops = &sh_clk_mstp_clk_ops;
103 		ret |= clk_register(clkp);
104 	}
105 
106 	return ret;
107 }
108 
109 /*
110  * Div/mult table lookup helpers
111  */
112 static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
113 {
114 	return clk->priv;
115 }
116 
117 static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
118 {
119 	return clk_to_div_table(clk)->div_mult_table;
120 }
121 
122 /*
123  * Common div ops
124  */
125 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
126 {
127 	return clk_rate_table_round(clk, clk->freq_table, rate);
128 }
129 
130 static unsigned long sh_clk_div_recalc(struct clk *clk)
131 {
132 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
133 	unsigned int idx;
134 
135 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
136 			     table, clk->arch_flags ? &clk->arch_flags : NULL);
137 
138 	idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
139 
140 	return clk->freq_table[idx].frequency;
141 }
142 
143 static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
144 {
145 	struct clk_div_table *dt = clk_to_div_table(clk);
146 	unsigned long value;
147 	int idx;
148 
149 	idx = clk_rate_table_find(clk, clk->freq_table, rate);
150 	if (idx < 0)
151 		return idx;
152 
153 	value = sh_clk_read(clk);
154 	value &= ~(clk->div_mask << clk->enable_bit);
155 	value |= (idx << clk->enable_bit);
156 	sh_clk_write(value, clk);
157 
158 	/* XXX: Should use a post-change notifier */
159 	if (dt->kick)
160 		dt->kick(clk);
161 
162 	return 0;
163 }
164 
165 static int sh_clk_div_enable(struct clk *clk)
166 {
167 	if (clk->div_mask == SH_CLK_DIV6_MSK) {
168 		int ret = sh_clk_div_set_rate(clk, clk->rate);
169 		if (ret < 0)
170 			return ret;
171 	}
172 
173 	sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
174 	return 0;
175 }
176 
177 static void sh_clk_div_disable(struct clk *clk)
178 {
179 	unsigned int val;
180 
181 	val = sh_clk_read(clk);
182 	val |= CPG_CKSTP_BIT;
183 
184 	/*
185 	 * div6 clocks require the divisor field to be non-zero or the
186 	 * above CKSTP toggle silently fails. Ensure that the divisor
187 	 * array is reset to its initial state on disable.
188 	 */
189 	if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
190 		val |= clk->div_mask;
191 
192 	sh_clk_write(val, clk);
193 }
194 
195 static struct sh_clk_ops sh_clk_div_clk_ops = {
196 	.recalc		= sh_clk_div_recalc,
197 	.set_rate	= sh_clk_div_set_rate,
198 	.round_rate	= sh_clk_div_round_rate,
199 };
200 
201 static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
202 	.recalc		= sh_clk_div_recalc,
203 	.set_rate	= sh_clk_div_set_rate,
204 	.round_rate	= sh_clk_div_round_rate,
205 	.enable		= sh_clk_div_enable,
206 	.disable	= sh_clk_div_disable,
207 };
208 
209 static int __init sh_clk_init_parent(struct clk *clk)
210 {
211 	u32 val;
212 
213 	if (clk->parent)
214 		return 0;
215 
216 	if (!clk->parent_table || !clk->parent_num)
217 		return 0;
218 
219 	if (!clk->src_width) {
220 		pr_err("sh_clk_init_parent: cannot select parent clock\n");
221 		return -EINVAL;
222 	}
223 
224 	val  = (sh_clk_read(clk) >> clk->src_shift);
225 	val &= (1 << clk->src_width) - 1;
226 
227 	if (val >= clk->parent_num) {
228 		pr_err("sh_clk_init_parent: parent table size failed\n");
229 		return -EINVAL;
230 	}
231 
232 	clk_reparent(clk, clk->parent_table[val]);
233 	if (!clk->parent) {
234 		pr_err("sh_clk_init_parent: unable to set parent");
235 		return -EINVAL;
236 	}
237 
238 	return 0;
239 }
240 
241 static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
242 			struct clk_div_table *table, struct sh_clk_ops *ops)
243 {
244 	struct clk *clkp;
245 	void *freq_table;
246 	int nr_divs = table->div_mult_table->nr_divisors;
247 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
248 	int ret = 0;
249 	int k;
250 
251 	freq_table_size *= (nr_divs + 1);
252 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
253 	if (!freq_table) {
254 		pr_err("%s: unable to alloc memory\n", __func__);
255 		return -ENOMEM;
256 	}
257 
258 	for (k = 0; !ret && (k < nr); k++) {
259 		clkp = clks + k;
260 
261 		clkp->ops = ops;
262 		clkp->priv = table;
263 
264 		clkp->freq_table = freq_table + (k * freq_table_size);
265 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
266 
267 		ret = clk_register(clkp);
268 		if (ret == 0)
269 			ret = sh_clk_init_parent(clkp);
270 	}
271 
272 	return ret;
273 }
274 
275 /*
276  * div6 support
277  */
278 static int sh_clk_div6_divisors[64] = {
279 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
280 	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
281 	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
282 	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
283 };
284 
285 static struct clk_div_mult_table div6_div_mult_table = {
286 	.divisors = sh_clk_div6_divisors,
287 	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
288 };
289 
290 static struct clk_div_table sh_clk_div6_table = {
291 	.div_mult_table	= &div6_div_mult_table,
292 };
293 
294 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
295 {
296 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
297 	u32 value;
298 	int ret, i;
299 
300 	if (!clk->parent_table || !clk->parent_num)
301 		return -EINVAL;
302 
303 	/* Search the parent */
304 	for (i = 0; i < clk->parent_num; i++)
305 		if (clk->parent_table[i] == parent)
306 			break;
307 
308 	if (i == clk->parent_num)
309 		return -ENODEV;
310 
311 	ret = clk_reparent(clk, parent);
312 	if (ret < 0)
313 		return ret;
314 
315 	value = sh_clk_read(clk) &
316 		~(((1 << clk->src_width) - 1) << clk->src_shift);
317 
318 	sh_clk_write(value | (i << clk->src_shift), clk);
319 
320 	/* Rebuild the frequency table */
321 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
322 			     table, NULL);
323 
324 	return 0;
325 }
326 
327 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
328 	.recalc		= sh_clk_div_recalc,
329 	.round_rate	= sh_clk_div_round_rate,
330 	.set_rate	= sh_clk_div_set_rate,
331 	.enable		= sh_clk_div_enable,
332 	.disable	= sh_clk_div_disable,
333 	.set_parent	= sh_clk_div6_set_parent,
334 };
335 
336 int __init sh_clk_div6_register(struct clk *clks, int nr)
337 {
338 	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
339 				       &sh_clk_div_enable_clk_ops);
340 }
341 
342 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
343 {
344 	return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
345 				       &sh_clk_div6_reparent_clk_ops);
346 }
347 
348 /*
349  * div4 support
350  */
351 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
352 {
353 	struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
354 	u32 value;
355 	int ret;
356 
357 	/* we really need a better way to determine parent index, but for
358 	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
359 	 * no CLK_ENABLE_ON_INIT means external clock...
360 	 */
361 
362 	if (parent->flags & CLK_ENABLE_ON_INIT)
363 		value = sh_clk_read(clk) & ~(1 << 7);
364 	else
365 		value = sh_clk_read(clk) | (1 << 7);
366 
367 	ret = clk_reparent(clk, parent);
368 	if (ret < 0)
369 		return ret;
370 
371 	sh_clk_write(value, clk);
372 
373 	/* Rebiuld the frequency table */
374 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
375 			     table, &clk->arch_flags);
376 
377 	return 0;
378 }
379 
380 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
381 	.recalc		= sh_clk_div_recalc,
382 	.set_rate	= sh_clk_div_set_rate,
383 	.round_rate	= sh_clk_div_round_rate,
384 	.enable		= sh_clk_div_enable,
385 	.disable	= sh_clk_div_disable,
386 	.set_parent	= sh_clk_div4_set_parent,
387 };
388 
389 int __init sh_clk_div4_register(struct clk *clks, int nr,
390 				struct clk_div4_table *table)
391 {
392 	return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
393 }
394 
395 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
396 				struct clk_div4_table *table)
397 {
398 	return sh_clk_div_register_ops(clks, nr, table,
399 				       &sh_clk_div_enable_clk_ops);
400 }
401 
402 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
403 				struct clk_div4_table *table)
404 {
405 	return sh_clk_div_register_ops(clks, nr, table,
406 				       &sh_clk_div4_reparent_clk_ops);
407 }
408 
409 /* FSI-DIV */
410 static unsigned long fsidiv_recalc(struct clk *clk)
411 {
412 	u32 value;
413 
414 	value = __raw_readl(clk->mapping->base);
415 
416 	value >>= 16;
417 	if (value < 2)
418 		return clk->parent->rate;
419 
420 	return clk->parent->rate / value;
421 }
422 
423 static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
424 {
425 	return clk_rate_div_range_round(clk, 1, 0xffff, rate);
426 }
427 
428 static void fsidiv_disable(struct clk *clk)
429 {
430 	__raw_writel(0, clk->mapping->base);
431 }
432 
433 static int fsidiv_enable(struct clk *clk)
434 {
435 	u32 value;
436 
437 	value  = __raw_readl(clk->mapping->base) >> 16;
438 	if (value < 2)
439 		return 0;
440 
441 	__raw_writel((value << 16) | 0x3, clk->mapping->base);
442 
443 	return 0;
444 }
445 
446 static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
447 {
448 	int idx;
449 
450 	idx = (clk->parent->rate / rate) & 0xffff;
451 	if (idx < 2)
452 		__raw_writel(0, clk->mapping->base);
453 	else
454 		__raw_writel(idx << 16, clk->mapping->base);
455 
456 	return 0;
457 }
458 
459 static struct sh_clk_ops fsidiv_clk_ops = {
460 	.recalc		= fsidiv_recalc,
461 	.round_rate	= fsidiv_round_rate,
462 	.set_rate	= fsidiv_set_rate,
463 	.enable		= fsidiv_enable,
464 	.disable	= fsidiv_disable,
465 };
466 
467 int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
468 {
469 	struct clk_mapping *map;
470 	int i;
471 
472 	for (i = 0; i < nr; i++) {
473 
474 		map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
475 		if (!map) {
476 			pr_err("%s: unable to alloc memory\n", __func__);
477 			return -ENOMEM;
478 		}
479 
480 		/* clks[i].enable_reg came from SH_CLK_FSIDIV() */
481 		map->phys		= (phys_addr_t)clks[i].enable_reg;
482 		map->len		= 8;
483 
484 		clks[i].enable_reg	= 0; /* remove .enable_reg */
485 		clks[i].ops		= &fsidiv_clk_ops;
486 		clks[i].mapping		= map;
487 
488 		clk_register(&clks[i]);
489 	}
490 
491 	return 0;
492 }
493