xref: /openbmc/linux/drivers/clk/ingenic/cgu.c (revision d84bf9d6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
22 
23 #include "cgu.h"
24 
25 #define MHZ (1000 * 1000)
26 
27 static inline const struct ingenic_cgu_clk_info *
28 to_clk_info(struct ingenic_clk *clk)
29 {
30 	return &clk->cgu->clock_info[clk->idx];
31 }
32 
33 /**
34  * ingenic_cgu_gate_get() - get the value of clock gate register bit
35  * @cgu: reference to the CGU whose registers should be read
36  * @info: info struct describing the gate bit
37  *
38  * Retrieves the state of the clock gate bit described by info. The
39  * caller must hold cgu->lock.
40  *
41  * Return: true if the gate bit is set, else false.
42  */
43 static inline bool
44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45 		     const struct ingenic_cgu_gate_info *info)
46 {
47 	return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48 		^ info->clear_to_gate;
49 }
50 
51 /**
52  * ingenic_cgu_gate_set() - set the value of clock gate register bit
53  * @cgu: reference to the CGU whose registers should be modified
54  * @info: info struct describing the gate bit
55  * @val: non-zero to gate a clock, otherwise zero
56  *
57  * Sets the given gate bit in order to gate or ungate a clock.
58  *
59  * The caller must hold cgu->lock.
60  */
61 static inline void
62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63 		     const struct ingenic_cgu_gate_info *info, bool val)
64 {
65 	u32 clkgr = readl(cgu->base + info->reg);
66 
67 	if (val ^ info->clear_to_gate)
68 		clkgr |= BIT(info->bit);
69 	else
70 		clkgr &= ~BIT(info->bit);
71 
72 	writel(clkgr, cgu->base + info->reg);
73 }
74 
75 /*
76  * PLL operations
77  */
78 
79 static unsigned long
80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
81 {
82 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
85 	const struct ingenic_cgu_pll_info *pll_info;
86 	unsigned m, n, od, od_enc = 0;
87 	bool bypass;
88 	u32 ctl;
89 
90 	BUG_ON(clk_info->type != CGU_CLK_PLL);
91 	pll_info = &clk_info->pll;
92 
93 	ctl = readl(cgu->base + pll_info->reg);
94 
95 	m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96 	m += pll_info->m_offset;
97 	n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98 	n += pll_info->n_offset;
99 
100 	if (pll_info->od_bits > 0) {
101 		od_enc = ctl >> pll_info->od_shift;
102 		od_enc &= GENMASK(pll_info->od_bits - 1, 0);
103 	}
104 
105 	if (pll_info->bypass_bit >= 0) {
106 		ctl = readl(cgu->base + pll_info->bypass_reg);
107 
108 		bypass = !!(ctl & BIT(pll_info->bypass_bit));
109 
110 		if (bypass)
111 			return parent_rate;
112 	}
113 
114 	for (od = 0; od < pll_info->od_max; od++)
115 		if (pll_info->od_encoding[od] == od_enc)
116 			break;
117 
118 	/* if od_max = 0, od_bits should be 0 and od is fixed to 1. */
119 	if (pll_info->od_max == 0)
120 		BUG_ON(pll_info->od_bits != 0);
121 	else
122 		BUG_ON(od == pll_info->od_max);
123 	od++;
124 
125 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
126 		n * od);
127 }
128 
129 static void
130 ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
131 			unsigned long rate, unsigned long parent_rate,
132 			unsigned int *pm, unsigned int *pn, unsigned int *pod)
133 {
134 	unsigned int m, n, od = 1;
135 
136 	/*
137 	 * The frequency after the input divider must be between 10 and 50 MHz.
138 	 * The highest divider yields the best resolution.
139 	 */
140 	n = parent_rate / (10 * MHZ);
141 	n = min_t(unsigned int, n, 1 << pll_info->n_bits);
142 	n = max_t(unsigned int, n, pll_info->n_offset);
143 
144 	m = (rate / MHZ) * od * n / (parent_rate / MHZ);
145 	m = min_t(unsigned int, m, 1 << pll_info->m_bits);
146 	m = max_t(unsigned int, m, pll_info->m_offset);
147 
148 	*pm = m;
149 	*pn = n;
150 	*pod = od;
151 }
152 
153 static unsigned long
154 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
155 		 unsigned long rate, unsigned long parent_rate,
156 		 unsigned int *pm, unsigned int *pn, unsigned int *pod)
157 {
158 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
159 	unsigned int m, n, od;
160 
161 	if (pll_info->calc_m_n_od)
162 		(*pll_info->calc_m_n_od)(pll_info, rate, parent_rate, &m, &n, &od);
163 	else
164 		ingenic_pll_calc_m_n_od(pll_info, rate, parent_rate, &m, &n, &od);
165 
166 	if (pm)
167 		*pm = m;
168 	if (pn)
169 		*pn = n;
170 	if (pod)
171 		*pod = od;
172 
173 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
174 		n * od);
175 }
176 
177 static long
178 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
179 		       unsigned long *prate)
180 {
181 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
182 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
183 
184 	return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
185 }
186 
187 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
188 					   const struct ingenic_cgu_pll_info *pll_info)
189 {
190 	u32 ctl;
191 
192 	if (pll_info->stable_bit < 0)
193 		return 0;
194 
195 	return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
196 				  ctl & BIT(pll_info->stable_bit),
197 				  0, 100 * USEC_PER_MSEC);
198 }
199 
200 static int
201 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
202 		     unsigned long parent_rate)
203 {
204 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
205 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
206 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
207 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
208 	unsigned long rate, flags;
209 	unsigned int m, n, od;
210 	int ret = 0;
211 	u32 ctl;
212 
213 	rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
214 			       &m, &n, &od);
215 	if (rate != req_rate)
216 		pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
217 			clk_info->name, req_rate, rate);
218 
219 	spin_lock_irqsave(&cgu->lock, flags);
220 	ctl = readl(cgu->base + pll_info->reg);
221 
222 	ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
223 	ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
224 
225 	ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
226 	ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
227 
228 	if (pll_info->od_bits > 0) {
229 		ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
230 		ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
231 	}
232 
233 	writel(ctl, cgu->base + pll_info->reg);
234 
235 	/* If the PLL is enabled, verify that it's stable */
236 	if (pll_info->enable_bit >= 0 && (ctl & BIT(pll_info->enable_bit)))
237 		ret = ingenic_pll_check_stable(cgu, pll_info);
238 
239 	spin_unlock_irqrestore(&cgu->lock, flags);
240 
241 	return ret;
242 }
243 
244 static int ingenic_pll_enable(struct clk_hw *hw)
245 {
246 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
247 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
248 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
249 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
250 	unsigned long flags;
251 	int ret;
252 	u32 ctl;
253 
254 	if (pll_info->enable_bit < 0)
255 		return 0;
256 
257 	spin_lock_irqsave(&cgu->lock, flags);
258 	if (pll_info->bypass_bit >= 0) {
259 		ctl = readl(cgu->base + pll_info->bypass_reg);
260 
261 		ctl &= ~BIT(pll_info->bypass_bit);
262 
263 		writel(ctl, cgu->base + pll_info->bypass_reg);
264 	}
265 
266 	ctl = readl(cgu->base + pll_info->reg);
267 
268 	ctl |= BIT(pll_info->enable_bit);
269 
270 	writel(ctl, cgu->base + pll_info->reg);
271 
272 	ret = ingenic_pll_check_stable(cgu, pll_info);
273 	spin_unlock_irqrestore(&cgu->lock, flags);
274 
275 	return ret;
276 }
277 
278 static void ingenic_pll_disable(struct clk_hw *hw)
279 {
280 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
281 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
282 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
283 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
284 	unsigned long flags;
285 	u32 ctl;
286 
287 	if (pll_info->enable_bit < 0)
288 		return;
289 
290 	spin_lock_irqsave(&cgu->lock, flags);
291 	ctl = readl(cgu->base + pll_info->reg);
292 
293 	ctl &= ~BIT(pll_info->enable_bit);
294 
295 	writel(ctl, cgu->base + pll_info->reg);
296 	spin_unlock_irqrestore(&cgu->lock, flags);
297 }
298 
299 static int ingenic_pll_is_enabled(struct clk_hw *hw)
300 {
301 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
302 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
303 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
304 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
305 	u32 ctl;
306 
307 	if (pll_info->enable_bit < 0)
308 		return true;
309 
310 	ctl = readl(cgu->base + pll_info->reg);
311 
312 	return !!(ctl & BIT(pll_info->enable_bit));
313 }
314 
315 static const struct clk_ops ingenic_pll_ops = {
316 	.recalc_rate = ingenic_pll_recalc_rate,
317 	.round_rate = ingenic_pll_round_rate,
318 	.set_rate = ingenic_pll_set_rate,
319 
320 	.enable = ingenic_pll_enable,
321 	.disable = ingenic_pll_disable,
322 	.is_enabled = ingenic_pll_is_enabled,
323 };
324 
325 /*
326  * Operations for all non-PLL clocks
327  */
328 
329 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
330 {
331 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
332 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
333 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
334 	u32 reg;
335 	u8 i, hw_idx, idx = 0;
336 
337 	if (clk_info->type & CGU_CLK_MUX) {
338 		reg = readl(cgu->base + clk_info->mux.reg);
339 		hw_idx = (reg >> clk_info->mux.shift) &
340 			 GENMASK(clk_info->mux.bits - 1, 0);
341 
342 		/*
343 		 * Convert the hardware index to the parent index by skipping
344 		 * over any -1's in the parents array.
345 		 */
346 		for (i = 0; i < hw_idx; i++) {
347 			if (clk_info->parents[i] != -1)
348 				idx++;
349 		}
350 	}
351 
352 	return idx;
353 }
354 
355 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
356 {
357 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
358 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
359 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
360 	unsigned long flags;
361 	u8 curr_idx, hw_idx, num_poss;
362 	u32 reg, mask;
363 
364 	if (clk_info->type & CGU_CLK_MUX) {
365 		/*
366 		 * Convert the parent index to the hardware index by adding
367 		 * 1 for any -1 in the parents array preceding the given
368 		 * index. That is, we want the index of idx'th entry in
369 		 * clk_info->parents which does not equal -1.
370 		 */
371 		hw_idx = curr_idx = 0;
372 		num_poss = 1 << clk_info->mux.bits;
373 		for (; hw_idx < num_poss; hw_idx++) {
374 			if (clk_info->parents[hw_idx] == -1)
375 				continue;
376 			if (curr_idx == idx)
377 				break;
378 			curr_idx++;
379 		}
380 
381 		/* idx should always be a valid parent */
382 		BUG_ON(curr_idx != idx);
383 
384 		mask = GENMASK(clk_info->mux.bits - 1, 0);
385 		mask <<= clk_info->mux.shift;
386 
387 		spin_lock_irqsave(&cgu->lock, flags);
388 
389 		/* write the register */
390 		reg = readl(cgu->base + clk_info->mux.reg);
391 		reg &= ~mask;
392 		reg |= hw_idx << clk_info->mux.shift;
393 		writel(reg, cgu->base + clk_info->mux.reg);
394 
395 		spin_unlock_irqrestore(&cgu->lock, flags);
396 		return 0;
397 	}
398 
399 	return idx ? -EINVAL : 0;
400 }
401 
402 static unsigned long
403 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
404 {
405 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
406 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
407 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
408 	unsigned long rate = parent_rate;
409 	u32 div_reg, div;
410 	u8 parent;
411 
412 	if (clk_info->type & CGU_CLK_DIV) {
413 		parent = ingenic_clk_get_parent(hw);
414 
415 		if (!(clk_info->div.bypass_mask & BIT(parent))) {
416 			div_reg = readl(cgu->base + clk_info->div.reg);
417 			div = (div_reg >> clk_info->div.shift) &
418 			      GENMASK(clk_info->div.bits - 1, 0);
419 
420 			if (clk_info->div.div_table)
421 				div = clk_info->div.div_table[div];
422 			else
423 				div = (div + 1) * clk_info->div.div;
424 
425 			rate /= div;
426 		}
427 	} else if (clk_info->type & CGU_CLK_FIXDIV) {
428 		rate /= clk_info->fixdiv.div;
429 	}
430 
431 	return rate;
432 }
433 
434 static unsigned int
435 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
436 			unsigned int div)
437 {
438 	unsigned int i, best_i = 0, best = (unsigned int)-1;
439 
440 	for (i = 0; i < (1 << clk_info->div.bits)
441 				&& clk_info->div.div_table[i]; i++) {
442 		if (clk_info->div.div_table[i] >= div &&
443 		    clk_info->div.div_table[i] < best) {
444 			best = clk_info->div.div_table[i];
445 			best_i = i;
446 
447 			if (div == best)
448 				break;
449 		}
450 	}
451 
452 	return best_i;
453 }
454 
455 static unsigned
456 ingenic_clk_calc_div(struct clk_hw *hw,
457 		     const struct ingenic_cgu_clk_info *clk_info,
458 		     unsigned long parent_rate, unsigned long req_rate)
459 {
460 	unsigned int div, hw_div;
461 	u8 parent;
462 
463 	parent = ingenic_clk_get_parent(hw);
464 	if (clk_info->div.bypass_mask & BIT(parent))
465 		return 1;
466 
467 	/* calculate the divide */
468 	div = DIV_ROUND_UP(parent_rate, req_rate);
469 
470 	if (clk_info->div.div_table) {
471 		hw_div = ingenic_clk_calc_hw_div(clk_info, div);
472 
473 		return clk_info->div.div_table[hw_div];
474 	}
475 
476 	/* Impose hardware constraints */
477 	div = clamp_t(unsigned int, div, clk_info->div.div,
478 		      clk_info->div.div << clk_info->div.bits);
479 
480 	/*
481 	 * If the divider value itself must be divided before being written to
482 	 * the divider register, we must ensure we don't have any bits set that
483 	 * would be lost as a result of doing so.
484 	 */
485 	div = DIV_ROUND_UP(div, clk_info->div.div);
486 	div *= clk_info->div.div;
487 
488 	return div;
489 }
490 
491 static long
492 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
493 		       unsigned long *parent_rate)
494 {
495 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
496 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
497 	unsigned int div = 1;
498 
499 	if (clk_info->type & CGU_CLK_DIV)
500 		div = ingenic_clk_calc_div(hw, clk_info, *parent_rate, req_rate);
501 	else if (clk_info->type & CGU_CLK_FIXDIV)
502 		div = clk_info->fixdiv.div;
503 	else if (clk_hw_can_set_rate_parent(hw))
504 		*parent_rate = req_rate;
505 
506 	return DIV_ROUND_UP(*parent_rate, div);
507 }
508 
509 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
510 					   const struct ingenic_cgu_clk_info *clk_info)
511 {
512 	u32 reg;
513 
514 	return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
515 				  !(reg & BIT(clk_info->div.busy_bit)),
516 				  0, 100 * USEC_PER_MSEC);
517 }
518 
519 static int
520 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
521 		     unsigned long parent_rate)
522 {
523 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
524 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
525 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
526 	unsigned long rate, flags;
527 	unsigned int hw_div, div;
528 	u32 reg, mask;
529 	int ret = 0;
530 
531 	if (clk_info->type & CGU_CLK_DIV) {
532 		div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
533 		rate = DIV_ROUND_UP(parent_rate, div);
534 
535 		if (rate != req_rate)
536 			return -EINVAL;
537 
538 		if (clk_info->div.div_table)
539 			hw_div = ingenic_clk_calc_hw_div(clk_info, div);
540 		else
541 			hw_div = ((div / clk_info->div.div) - 1);
542 
543 		spin_lock_irqsave(&cgu->lock, flags);
544 		reg = readl(cgu->base + clk_info->div.reg);
545 
546 		/* update the divide */
547 		mask = GENMASK(clk_info->div.bits - 1, 0);
548 		reg &= ~(mask << clk_info->div.shift);
549 		reg |= hw_div << clk_info->div.shift;
550 
551 		/* clear the stop bit */
552 		if (clk_info->div.stop_bit != -1)
553 			reg &= ~BIT(clk_info->div.stop_bit);
554 
555 		/* set the change enable bit */
556 		if (clk_info->div.ce_bit != -1)
557 			reg |= BIT(clk_info->div.ce_bit);
558 
559 		/* update the hardware */
560 		writel(reg, cgu->base + clk_info->div.reg);
561 
562 		/* wait for the change to take effect */
563 		if (clk_info->div.busy_bit != -1)
564 			ret = ingenic_clk_check_stable(cgu, clk_info);
565 
566 		spin_unlock_irqrestore(&cgu->lock, flags);
567 		return ret;
568 	}
569 
570 	return -EINVAL;
571 }
572 
573 static int ingenic_clk_enable(struct clk_hw *hw)
574 {
575 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
576 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
577 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
578 	unsigned long flags;
579 
580 	if (clk_info->type & CGU_CLK_GATE) {
581 		/* ungate the clock */
582 		spin_lock_irqsave(&cgu->lock, flags);
583 		ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
584 		spin_unlock_irqrestore(&cgu->lock, flags);
585 
586 		if (clk_info->gate.delay_us)
587 			udelay(clk_info->gate.delay_us);
588 	}
589 
590 	return 0;
591 }
592 
593 static void ingenic_clk_disable(struct clk_hw *hw)
594 {
595 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
596 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
597 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
598 	unsigned long flags;
599 
600 	if (clk_info->type & CGU_CLK_GATE) {
601 		/* gate the clock */
602 		spin_lock_irqsave(&cgu->lock, flags);
603 		ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
604 		spin_unlock_irqrestore(&cgu->lock, flags);
605 	}
606 }
607 
608 static int ingenic_clk_is_enabled(struct clk_hw *hw)
609 {
610 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
611 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
612 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
613 	int enabled = 1;
614 
615 	if (clk_info->type & CGU_CLK_GATE)
616 		enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
617 
618 	return enabled;
619 }
620 
621 static const struct clk_ops ingenic_clk_ops = {
622 	.get_parent = ingenic_clk_get_parent,
623 	.set_parent = ingenic_clk_set_parent,
624 
625 	.recalc_rate = ingenic_clk_recalc_rate,
626 	.round_rate = ingenic_clk_round_rate,
627 	.set_rate = ingenic_clk_set_rate,
628 
629 	.enable = ingenic_clk_enable,
630 	.disable = ingenic_clk_disable,
631 	.is_enabled = ingenic_clk_is_enabled,
632 };
633 
634 /*
635  * Setup functions.
636  */
637 
638 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
639 {
640 	const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
641 	struct clk_init_data clk_init;
642 	struct ingenic_clk *ingenic_clk = NULL;
643 	struct clk *clk, *parent;
644 	const char *parent_names[4];
645 	unsigned caps, i, num_possible;
646 	int err = -EINVAL;
647 
648 	BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
649 
650 	if (clk_info->type == CGU_CLK_EXT) {
651 		clk = of_clk_get_by_name(cgu->np, clk_info->name);
652 		if (IS_ERR(clk)) {
653 			pr_err("%s: no external clock '%s' provided\n",
654 			       __func__, clk_info->name);
655 			err = -ENODEV;
656 			goto out;
657 		}
658 		err = clk_register_clkdev(clk, clk_info->name, NULL);
659 		if (err) {
660 			clk_put(clk);
661 			goto out;
662 		}
663 		cgu->clocks.clks[idx] = clk;
664 		return 0;
665 	}
666 
667 	if (!clk_info->type) {
668 		pr_err("%s: no clock type specified for '%s'\n", __func__,
669 		       clk_info->name);
670 		goto out;
671 	}
672 
673 	ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
674 	if (!ingenic_clk) {
675 		err = -ENOMEM;
676 		goto out;
677 	}
678 
679 	ingenic_clk->hw.init = &clk_init;
680 	ingenic_clk->cgu = cgu;
681 	ingenic_clk->idx = idx;
682 
683 	clk_init.name = clk_info->name;
684 	clk_init.flags = clk_info->flags;
685 	clk_init.parent_names = parent_names;
686 
687 	caps = clk_info->type;
688 
689 	if (caps & CGU_CLK_DIV) {
690 		caps &= ~CGU_CLK_DIV;
691 	} else if (!(caps & CGU_CLK_CUSTOM)) {
692 		/* pass rate changes to the parent clock */
693 		clk_init.flags |= CLK_SET_RATE_PARENT;
694 	}
695 
696 	if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
697 		clk_init.num_parents = 0;
698 
699 		if (caps & CGU_CLK_MUX)
700 			num_possible = 1 << clk_info->mux.bits;
701 		else
702 			num_possible = ARRAY_SIZE(clk_info->parents);
703 
704 		for (i = 0; i < num_possible; i++) {
705 			if (clk_info->parents[i] == -1)
706 				continue;
707 
708 			parent = cgu->clocks.clks[clk_info->parents[i]];
709 			parent_names[clk_init.num_parents] =
710 				__clk_get_name(parent);
711 			clk_init.num_parents++;
712 		}
713 
714 		BUG_ON(!clk_init.num_parents);
715 		BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
716 	} else {
717 		BUG_ON(clk_info->parents[0] == -1);
718 		clk_init.num_parents = 1;
719 		parent = cgu->clocks.clks[clk_info->parents[0]];
720 		parent_names[0] = __clk_get_name(parent);
721 	}
722 
723 	if (caps & CGU_CLK_CUSTOM) {
724 		clk_init.ops = clk_info->custom.clk_ops;
725 
726 		caps &= ~CGU_CLK_CUSTOM;
727 
728 		if (caps) {
729 			pr_err("%s: custom clock may not be combined with type 0x%x\n",
730 			       __func__, caps);
731 			goto out;
732 		}
733 	} else if (caps & CGU_CLK_PLL) {
734 		clk_init.ops = &ingenic_pll_ops;
735 
736 		caps &= ~CGU_CLK_PLL;
737 
738 		if (caps) {
739 			pr_err("%s: PLL may not be combined with type 0x%x\n",
740 			       __func__, caps);
741 			goto out;
742 		}
743 	} else {
744 		clk_init.ops = &ingenic_clk_ops;
745 	}
746 
747 	/* nothing to do for gates or fixed dividers */
748 	caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
749 
750 	if (caps & CGU_CLK_MUX) {
751 		if (!(caps & CGU_CLK_MUX_GLITCHFREE))
752 			clk_init.flags |= CLK_SET_PARENT_GATE;
753 
754 		caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
755 	}
756 
757 	if (caps) {
758 		pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
759 		goto out;
760 	}
761 
762 	clk = clk_register(NULL, &ingenic_clk->hw);
763 	if (IS_ERR(clk)) {
764 		pr_err("%s: failed to register clock '%s'\n", __func__,
765 		       clk_info->name);
766 		err = PTR_ERR(clk);
767 		goto out;
768 	}
769 
770 	err = clk_register_clkdev(clk, clk_info->name, NULL);
771 	if (err)
772 		goto out;
773 
774 	cgu->clocks.clks[idx] = clk;
775 out:
776 	if (err)
777 		kfree(ingenic_clk);
778 	return err;
779 }
780 
781 struct ingenic_cgu *
782 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
783 		unsigned num_clocks, struct device_node *np)
784 {
785 	struct ingenic_cgu *cgu;
786 
787 	cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
788 	if (!cgu)
789 		goto err_out;
790 
791 	cgu->base = of_iomap(np, 0);
792 	if (!cgu->base) {
793 		pr_err("%s: failed to map CGU registers\n", __func__);
794 		goto err_out_free;
795 	}
796 
797 	cgu->np = np;
798 	cgu->clock_info = clock_info;
799 	cgu->clocks.clk_num = num_clocks;
800 
801 	spin_lock_init(&cgu->lock);
802 
803 	return cgu;
804 
805 err_out_free:
806 	kfree(cgu);
807 err_out:
808 	return NULL;
809 }
810 
811 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
812 {
813 	unsigned i;
814 	int err;
815 
816 	cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
817 				   GFP_KERNEL);
818 	if (!cgu->clocks.clks) {
819 		err = -ENOMEM;
820 		goto err_out;
821 	}
822 
823 	for (i = 0; i < cgu->clocks.clk_num; i++) {
824 		err = ingenic_register_clock(cgu, i);
825 		if (err)
826 			goto err_out_unregister;
827 	}
828 
829 	err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
830 				  &cgu->clocks);
831 	if (err)
832 		goto err_out_unregister;
833 
834 	return 0;
835 
836 err_out_unregister:
837 	for (i = 0; i < cgu->clocks.clk_num; i++) {
838 		if (!cgu->clocks.clks[i])
839 			continue;
840 		if (cgu->clock_info[i].type & CGU_CLK_EXT)
841 			clk_put(cgu->clocks.clks[i]);
842 		else
843 			clk_unregister(cgu->clocks.clks[i]);
844 	}
845 	kfree(cgu->clocks.clks);
846 err_out:
847 	return err;
848 }
849