xref: /openbmc/linux/drivers/sh/clk/cpg.c (revision 63dc02bd)
1 /*
2  * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
3  *
4  *  Copyright (C) 2010  Magnus Damm
5  *  Copyright (C) 2010 - 2012  Paul Mundt
6  *
7  * This file is subject to the terms and conditions of the GNU General Public
8  * License.  See the file "COPYING" in the main directory of this archive
9  * for more details.
10  */
11 #include <linux/clk.h>
12 #include <linux/compiler.h>
13 #include <linux/slab.h>
14 #include <linux/io.h>
15 #include <linux/sh_clk.h>
16 
17 static unsigned int sh_clk_read(struct clk *clk)
18 {
19 	if (clk->flags & CLK_ENABLE_REG_8BIT)
20 		return ioread8(clk->mapped_reg);
21 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
22 		return ioread16(clk->mapped_reg);
23 
24 	return ioread32(clk->mapped_reg);
25 }
26 
27 static void sh_clk_write(int value, struct clk *clk)
28 {
29 	if (clk->flags & CLK_ENABLE_REG_8BIT)
30 		iowrite8(value, clk->mapped_reg);
31 	else if (clk->flags & CLK_ENABLE_REG_16BIT)
32 		iowrite16(value, clk->mapped_reg);
33 	else
34 		iowrite32(value, clk->mapped_reg);
35 }
36 
37 static int sh_clk_mstp_enable(struct clk *clk)
38 {
39 	sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
40 	return 0;
41 }
42 
43 static void sh_clk_mstp_disable(struct clk *clk)
44 {
45 	sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
46 }
47 
48 static struct sh_clk_ops sh_clk_mstp_clk_ops = {
49 	.enable		= sh_clk_mstp_enable,
50 	.disable	= sh_clk_mstp_disable,
51 	.recalc		= followparent_recalc,
52 };
53 
54 int __init sh_clk_mstp_register(struct clk *clks, int nr)
55 {
56 	struct clk *clkp;
57 	int ret = 0;
58 	int k;
59 
60 	for (k = 0; !ret && (k < nr); k++) {
61 		clkp = clks + k;
62 		clkp->ops = &sh_clk_mstp_clk_ops;
63 		ret |= clk_register(clkp);
64 	}
65 
66 	return ret;
67 }
68 
69 static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
70 {
71 	return clk_rate_table_round(clk, clk->freq_table, rate);
72 }
73 
74 static int sh_clk_div6_divisors[64] = {
75 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
76 	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
77 	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
78 	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
79 };
80 
81 static struct clk_div_mult_table sh_clk_div6_table = {
82 	.divisors = sh_clk_div6_divisors,
83 	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
84 };
85 
86 static unsigned long sh_clk_div6_recalc(struct clk *clk)
87 {
88 	struct clk_div_mult_table *table = &sh_clk_div6_table;
89 	unsigned int idx;
90 
91 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
92 			     table, NULL);
93 
94 	idx = sh_clk_read(clk) & 0x003f;
95 
96 	return clk->freq_table[idx].frequency;
97 }
98 
99 static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
100 {
101 	struct clk_div_mult_table *table = &sh_clk_div6_table;
102 	u32 value;
103 	int ret, i;
104 
105 	if (!clk->parent_table || !clk->parent_num)
106 		return -EINVAL;
107 
108 	/* Search the parent */
109 	for (i = 0; i < clk->parent_num; i++)
110 		if (clk->parent_table[i] == parent)
111 			break;
112 
113 	if (i == clk->parent_num)
114 		return -ENODEV;
115 
116 	ret = clk_reparent(clk, parent);
117 	if (ret < 0)
118 		return ret;
119 
120 	value = sh_clk_read(clk) &
121 		~(((1 << clk->src_width) - 1) << clk->src_shift);
122 
123 	sh_clk_write(value | (i << clk->src_shift), clk);
124 
125 	/* Rebuild the frequency table */
126 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
127 			     table, NULL);
128 
129 	return 0;
130 }
131 
132 static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
133 {
134 	unsigned long value;
135 	int idx;
136 
137 	idx = clk_rate_table_find(clk, clk->freq_table, rate);
138 	if (idx < 0)
139 		return idx;
140 
141 	value = sh_clk_read(clk);
142 	value &= ~0x3f;
143 	value |= idx;
144 	sh_clk_write(value, clk);
145 	return 0;
146 }
147 
148 static int sh_clk_div6_enable(struct clk *clk)
149 {
150 	unsigned long value;
151 	int ret;
152 
153 	ret = sh_clk_div6_set_rate(clk, clk->rate);
154 	if (ret == 0) {
155 		value = sh_clk_read(clk);
156 		value &= ~0x100; /* clear stop bit to enable clock */
157 		sh_clk_write(value, clk);
158 	}
159 	return ret;
160 }
161 
162 static void sh_clk_div6_disable(struct clk *clk)
163 {
164 	unsigned long value;
165 
166 	value = sh_clk_read(clk);
167 	value |= 0x100; /* stop clock */
168 	value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
169 	sh_clk_write(value, clk);
170 }
171 
172 static struct sh_clk_ops sh_clk_div6_clk_ops = {
173 	.recalc		= sh_clk_div6_recalc,
174 	.round_rate	= sh_clk_div_round_rate,
175 	.set_rate	= sh_clk_div6_set_rate,
176 	.enable		= sh_clk_div6_enable,
177 	.disable	= sh_clk_div6_disable,
178 };
179 
180 static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
181 	.recalc		= sh_clk_div6_recalc,
182 	.round_rate	= sh_clk_div_round_rate,
183 	.set_rate	= sh_clk_div6_set_rate,
184 	.enable		= sh_clk_div6_enable,
185 	.disable	= sh_clk_div6_disable,
186 	.set_parent	= sh_clk_div6_set_parent,
187 };
188 
189 static int __init sh_clk_init_parent(struct clk *clk)
190 {
191 	u32 val;
192 
193 	if (clk->parent)
194 		return 0;
195 
196 	if (!clk->parent_table || !clk->parent_num)
197 		return 0;
198 
199 	if (!clk->src_width) {
200 		pr_err("sh_clk_init_parent: cannot select parent clock\n");
201 		return -EINVAL;
202 	}
203 
204 	val  = (sh_clk_read(clk) >> clk->src_shift);
205 	val &= (1 << clk->src_width) - 1;
206 
207 	if (val >= clk->parent_num) {
208 		pr_err("sh_clk_init_parent: parent table size failed\n");
209 		return -EINVAL;
210 	}
211 
212 	clk_reparent(clk, clk->parent_table[val]);
213 	if (!clk->parent) {
214 		pr_err("sh_clk_init_parent: unable to set parent");
215 		return -EINVAL;
216 	}
217 
218 	return 0;
219 }
220 
221 static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
222 					   struct sh_clk_ops *ops)
223 {
224 	struct clk *clkp;
225 	void *freq_table;
226 	int nr_divs = sh_clk_div6_table.nr_divisors;
227 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
228 	int ret = 0;
229 	int k;
230 
231 	freq_table_size *= (nr_divs + 1);
232 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
233 	if (!freq_table) {
234 		pr_err("sh_clk_div6_register: unable to alloc memory\n");
235 		return -ENOMEM;
236 	}
237 
238 	for (k = 0; !ret && (k < nr); k++) {
239 		clkp = clks + k;
240 
241 		clkp->ops = ops;
242 		clkp->freq_table = freq_table + (k * freq_table_size);
243 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
244 		ret = clk_register(clkp);
245 		if (ret < 0)
246 			break;
247 
248 		ret = sh_clk_init_parent(clkp);
249 	}
250 
251 	return ret;
252 }
253 
254 int __init sh_clk_div6_register(struct clk *clks, int nr)
255 {
256 	return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
257 }
258 
259 int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
260 {
261 	return sh_clk_div6_register_ops(clks, nr,
262 					&sh_clk_div6_reparent_clk_ops);
263 }
264 
265 static unsigned long sh_clk_div4_recalc(struct clk *clk)
266 {
267 	struct clk_div4_table *d4t = clk->priv;
268 	struct clk_div_mult_table *table = d4t->div_mult_table;
269 	unsigned int idx;
270 
271 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
272 			     table, &clk->arch_flags);
273 
274 	idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
275 
276 	return clk->freq_table[idx].frequency;
277 }
278 
279 static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
280 {
281 	struct clk_div4_table *d4t = clk->priv;
282 	struct clk_div_mult_table *table = d4t->div_mult_table;
283 	u32 value;
284 	int ret;
285 
286 	/* we really need a better way to determine parent index, but for
287 	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
288 	 * no CLK_ENABLE_ON_INIT means external clock...
289 	 */
290 
291 	if (parent->flags & CLK_ENABLE_ON_INIT)
292 		value = sh_clk_read(clk) & ~(1 << 7);
293 	else
294 		value = sh_clk_read(clk) | (1 << 7);
295 
296 	ret = clk_reparent(clk, parent);
297 	if (ret < 0)
298 		return ret;
299 
300 	sh_clk_write(value, clk);
301 
302 	/* Rebiuld the frequency table */
303 	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
304 			     table, &clk->arch_flags);
305 
306 	return 0;
307 }
308 
309 static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
310 {
311 	struct clk_div4_table *d4t = clk->priv;
312 	unsigned long value;
313 	int idx = clk_rate_table_find(clk, clk->freq_table, rate);
314 	if (idx < 0)
315 		return idx;
316 
317 	value = sh_clk_read(clk);
318 	value &= ~(0xf << clk->enable_bit);
319 	value |= (idx << clk->enable_bit);
320 	sh_clk_write(value, clk);
321 
322 	if (d4t->kick)
323 		d4t->kick(clk);
324 
325 	return 0;
326 }
327 
328 static int sh_clk_div4_enable(struct clk *clk)
329 {
330 	sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
331 	return 0;
332 }
333 
334 static void sh_clk_div4_disable(struct clk *clk)
335 {
336 	sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
337 }
338 
339 static struct sh_clk_ops sh_clk_div4_clk_ops = {
340 	.recalc		= sh_clk_div4_recalc,
341 	.set_rate	= sh_clk_div4_set_rate,
342 	.round_rate	= sh_clk_div_round_rate,
343 };
344 
345 static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
346 	.recalc		= sh_clk_div4_recalc,
347 	.set_rate	= sh_clk_div4_set_rate,
348 	.round_rate	= sh_clk_div_round_rate,
349 	.enable		= sh_clk_div4_enable,
350 	.disable	= sh_clk_div4_disable,
351 };
352 
353 static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
354 	.recalc		= sh_clk_div4_recalc,
355 	.set_rate	= sh_clk_div4_set_rate,
356 	.round_rate	= sh_clk_div_round_rate,
357 	.enable		= sh_clk_div4_enable,
358 	.disable	= sh_clk_div4_disable,
359 	.set_parent	= sh_clk_div4_set_parent,
360 };
361 
362 static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
363 			struct clk_div4_table *table, struct sh_clk_ops *ops)
364 {
365 	struct clk *clkp;
366 	void *freq_table;
367 	int nr_divs = table->div_mult_table->nr_divisors;
368 	int freq_table_size = sizeof(struct cpufreq_frequency_table);
369 	int ret = 0;
370 	int k;
371 
372 	freq_table_size *= (nr_divs + 1);
373 	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
374 	if (!freq_table) {
375 		pr_err("sh_clk_div4_register: unable to alloc memory\n");
376 		return -ENOMEM;
377 	}
378 
379 	for (k = 0; !ret && (k < nr); k++) {
380 		clkp = clks + k;
381 
382 		clkp->ops = ops;
383 		clkp->priv = table;
384 
385 		clkp->freq_table = freq_table + (k * freq_table_size);
386 		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
387 
388 		ret = clk_register(clkp);
389 	}
390 
391 	return ret;
392 }
393 
394 int __init sh_clk_div4_register(struct clk *clks, int nr,
395 				struct clk_div4_table *table)
396 {
397 	return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
398 }
399 
400 int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
401 				struct clk_div4_table *table)
402 {
403 	return sh_clk_div4_register_ops(clks, nr, table,
404 					&sh_clk_div4_enable_clk_ops);
405 }
406 
407 int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
408 				struct clk_div4_table *table)
409 {
410 	return sh_clk_div4_register_ops(clks, nr, table,
411 					&sh_clk_div4_reparent_clk_ops);
412 }
413