xref: /openbmc/linux/drivers/clk/ingenic/cgu.c (revision 21534fe3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
22 
23 #include "cgu.h"
24 
25 #define MHZ (1000 * 1000)
26 
27 static inline const struct ingenic_cgu_clk_info *
28 to_clk_info(struct ingenic_clk *clk)
29 {
30 	return &clk->cgu->clock_info[clk->idx];
31 }
32 
33 /**
34  * ingenic_cgu_gate_get() - get the value of clock gate register bit
35  * @cgu: reference to the CGU whose registers should be read
36  * @info: info struct describing the gate bit
37  *
38  * Retrieves the state of the clock gate bit described by info. The
39  * caller must hold cgu->lock.
40  *
41  * Return: true if the gate bit is set, else false.
42  */
43 static inline bool
44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45 		     const struct ingenic_cgu_gate_info *info)
46 {
47 	return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48 		^ info->clear_to_gate;
49 }
50 
51 /**
52  * ingenic_cgu_gate_set() - set the value of clock gate register bit
53  * @cgu: reference to the CGU whose registers should be modified
54  * @info: info struct describing the gate bit
55  * @val: non-zero to gate a clock, otherwise zero
56  *
57  * Sets the given gate bit in order to gate or ungate a clock.
58  *
59  * The caller must hold cgu->lock.
60  */
61 static inline void
62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63 		     const struct ingenic_cgu_gate_info *info, bool val)
64 {
65 	u32 clkgr = readl(cgu->base + info->reg);
66 
67 	if (val ^ info->clear_to_gate)
68 		clkgr |= BIT(info->bit);
69 	else
70 		clkgr &= ~BIT(info->bit);
71 
72 	writel(clkgr, cgu->base + info->reg);
73 }
74 
75 /*
76  * PLL operations
77  */
78 
79 static unsigned long
80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
81 {
82 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
85 	const struct ingenic_cgu_pll_info *pll_info;
86 	unsigned m, n, od_enc, od;
87 	bool bypass;
88 	u32 ctl;
89 
90 	BUG_ON(clk_info->type != CGU_CLK_PLL);
91 	pll_info = &clk_info->pll;
92 
93 	ctl = readl(cgu->base + pll_info->reg);
94 
95 	m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96 	m += pll_info->m_offset;
97 	n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98 	n += pll_info->n_offset;
99 	od_enc = ctl >> pll_info->od_shift;
100 	od_enc &= GENMASK(pll_info->od_bits - 1, 0);
101 
102 	ctl = readl(cgu->base + pll_info->bypass_reg);
103 
104 	bypass = !pll_info->no_bypass_bit &&
105 		 !!(ctl & BIT(pll_info->bypass_bit));
106 
107 	if (bypass)
108 		return parent_rate;
109 
110 	for (od = 0; od < pll_info->od_max; od++) {
111 		if (pll_info->od_encoding[od] == od_enc)
112 			break;
113 	}
114 	BUG_ON(od == pll_info->od_max);
115 	od++;
116 
117 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
118 		n * od);
119 }
120 
121 static unsigned long
122 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
123 		 unsigned long rate, unsigned long parent_rate,
124 		 unsigned *pm, unsigned *pn, unsigned *pod)
125 {
126 	const struct ingenic_cgu_pll_info *pll_info;
127 	unsigned m, n, od;
128 
129 	pll_info = &clk_info->pll;
130 	od = 1;
131 
132 	/*
133 	 * The frequency after the input divider must be between 10 and 50 MHz.
134 	 * The highest divider yields the best resolution.
135 	 */
136 	n = parent_rate / (10 * MHZ);
137 	n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
138 	n = max_t(unsigned, n, pll_info->n_offset);
139 
140 	m = (rate / MHZ) * od * n / (parent_rate / MHZ);
141 	m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
142 	m = max_t(unsigned, m, pll_info->m_offset);
143 
144 	if (pm)
145 		*pm = m;
146 	if (pn)
147 		*pn = n;
148 	if (pod)
149 		*pod = od;
150 
151 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
152 		n * od);
153 }
154 
155 static long
156 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
157 		       unsigned long *prate)
158 {
159 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
160 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
161 
162 	return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
163 }
164 
165 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
166 					   const struct ingenic_cgu_pll_info *pll_info)
167 {
168 	u32 ctl;
169 
170 	return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
171 				  ctl & BIT(pll_info->stable_bit),
172 				  0, 100 * USEC_PER_MSEC);
173 }
174 
175 static int
176 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
177 		     unsigned long parent_rate)
178 {
179 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
180 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
181 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
182 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
183 	unsigned long rate, flags;
184 	unsigned int m, n, od;
185 	u32 ctl;
186 
187 	rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
188 			       &m, &n, &od);
189 	if (rate != req_rate)
190 		pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
191 			clk_info->name, req_rate, rate);
192 
193 	spin_lock_irqsave(&cgu->lock, flags);
194 	ctl = readl(cgu->base + pll_info->reg);
195 
196 	ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
197 	ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
198 
199 	ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
200 	ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
201 
202 	ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
203 	ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
204 
205 	writel(ctl, cgu->base + pll_info->reg);
206 	spin_unlock_irqrestore(&cgu->lock, flags);
207 
208 	return 0;
209 }
210 
211 static int ingenic_pll_enable(struct clk_hw *hw)
212 {
213 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
214 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
215 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
216 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
217 	unsigned long flags;
218 	int ret;
219 	u32 ctl;
220 
221 	spin_lock_irqsave(&cgu->lock, flags);
222 	ctl = readl(cgu->base + pll_info->bypass_reg);
223 
224 	ctl &= ~BIT(pll_info->bypass_bit);
225 
226 	writel(ctl, cgu->base + pll_info->bypass_reg);
227 
228 	ctl = readl(cgu->base + pll_info->reg);
229 
230 	ctl |= BIT(pll_info->enable_bit);
231 
232 	writel(ctl, cgu->base + pll_info->reg);
233 
234 	ret = ingenic_pll_check_stable(cgu, pll_info);
235 	spin_unlock_irqrestore(&cgu->lock, flags);
236 
237 	return ret;
238 }
239 
240 static void ingenic_pll_disable(struct clk_hw *hw)
241 {
242 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
243 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
244 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
245 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
246 	unsigned long flags;
247 	u32 ctl;
248 
249 	spin_lock_irqsave(&cgu->lock, flags);
250 	ctl = readl(cgu->base + pll_info->reg);
251 
252 	ctl &= ~BIT(pll_info->enable_bit);
253 
254 	writel(ctl, cgu->base + pll_info->reg);
255 	spin_unlock_irqrestore(&cgu->lock, flags);
256 }
257 
258 static int ingenic_pll_is_enabled(struct clk_hw *hw)
259 {
260 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
261 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
262 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
263 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
264 	u32 ctl;
265 
266 	ctl = readl(cgu->base + pll_info->reg);
267 
268 	return !!(ctl & BIT(pll_info->enable_bit));
269 }
270 
271 static const struct clk_ops ingenic_pll_ops = {
272 	.recalc_rate = ingenic_pll_recalc_rate,
273 	.round_rate = ingenic_pll_round_rate,
274 	.set_rate = ingenic_pll_set_rate,
275 
276 	.enable = ingenic_pll_enable,
277 	.disable = ingenic_pll_disable,
278 	.is_enabled = ingenic_pll_is_enabled,
279 };
280 
281 /*
282  * Operations for all non-PLL clocks
283  */
284 
285 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
286 {
287 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
288 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
289 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
290 	u32 reg;
291 	u8 i, hw_idx, idx = 0;
292 
293 	if (clk_info->type & CGU_CLK_MUX) {
294 		reg = readl(cgu->base + clk_info->mux.reg);
295 		hw_idx = (reg >> clk_info->mux.shift) &
296 			 GENMASK(clk_info->mux.bits - 1, 0);
297 
298 		/*
299 		 * Convert the hardware index to the parent index by skipping
300 		 * over any -1's in the parents array.
301 		 */
302 		for (i = 0; i < hw_idx; i++) {
303 			if (clk_info->parents[i] != -1)
304 				idx++;
305 		}
306 	}
307 
308 	return idx;
309 }
310 
311 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
312 {
313 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
314 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
315 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
316 	unsigned long flags;
317 	u8 curr_idx, hw_idx, num_poss;
318 	u32 reg, mask;
319 
320 	if (clk_info->type & CGU_CLK_MUX) {
321 		/*
322 		 * Convert the parent index to the hardware index by adding
323 		 * 1 for any -1 in the parents array preceding the given
324 		 * index. That is, we want the index of idx'th entry in
325 		 * clk_info->parents which does not equal -1.
326 		 */
327 		hw_idx = curr_idx = 0;
328 		num_poss = 1 << clk_info->mux.bits;
329 		for (; hw_idx < num_poss; hw_idx++) {
330 			if (clk_info->parents[hw_idx] == -1)
331 				continue;
332 			if (curr_idx == idx)
333 				break;
334 			curr_idx++;
335 		}
336 
337 		/* idx should always be a valid parent */
338 		BUG_ON(curr_idx != idx);
339 
340 		mask = GENMASK(clk_info->mux.bits - 1, 0);
341 		mask <<= clk_info->mux.shift;
342 
343 		spin_lock_irqsave(&cgu->lock, flags);
344 
345 		/* write the register */
346 		reg = readl(cgu->base + clk_info->mux.reg);
347 		reg &= ~mask;
348 		reg |= hw_idx << clk_info->mux.shift;
349 		writel(reg, cgu->base + clk_info->mux.reg);
350 
351 		spin_unlock_irqrestore(&cgu->lock, flags);
352 		return 0;
353 	}
354 
355 	return idx ? -EINVAL : 0;
356 }
357 
358 static unsigned long
359 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
360 {
361 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
362 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
363 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
364 	unsigned long rate = parent_rate;
365 	u32 div_reg, div;
366 
367 	if (clk_info->type & CGU_CLK_DIV) {
368 		div_reg = readl(cgu->base + clk_info->div.reg);
369 		div = (div_reg >> clk_info->div.shift) &
370 		      GENMASK(clk_info->div.bits - 1, 0);
371 
372 		if (clk_info->div.div_table)
373 			div = clk_info->div.div_table[div];
374 		else
375 			div = (div + 1) * clk_info->div.div;
376 
377 		rate /= div;
378 	} else if (clk_info->type & CGU_CLK_FIXDIV) {
379 		rate /= clk_info->fixdiv.div;
380 	}
381 
382 	return rate;
383 }
384 
385 static unsigned int
386 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
387 			unsigned int div)
388 {
389 	unsigned int i;
390 
391 	for (i = 0; i < (1 << clk_info->div.bits)
392 				&& clk_info->div.div_table[i]; i++) {
393 		if (clk_info->div.div_table[i] >= div)
394 			return i;
395 	}
396 
397 	return i - 1;
398 }
399 
400 static unsigned
401 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
402 		     unsigned long parent_rate, unsigned long req_rate)
403 {
404 	unsigned int div, hw_div;
405 
406 	/* calculate the divide */
407 	div = DIV_ROUND_UP(parent_rate, req_rate);
408 
409 	if (clk_info->div.div_table) {
410 		hw_div = ingenic_clk_calc_hw_div(clk_info, div);
411 
412 		return clk_info->div.div_table[hw_div];
413 	}
414 
415 	/* Impose hardware constraints */
416 	div = min_t(unsigned, div, 1 << clk_info->div.bits);
417 	div = max_t(unsigned, div, 1);
418 
419 	/*
420 	 * If the divider value itself must be divided before being written to
421 	 * the divider register, we must ensure we don't have any bits set that
422 	 * would be lost as a result of doing so.
423 	 */
424 	div /= clk_info->div.div;
425 	div *= clk_info->div.div;
426 
427 	return div;
428 }
429 
430 static long
431 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
432 		       unsigned long *parent_rate)
433 {
434 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
435 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
436 	unsigned int div = 1;
437 
438 	if (clk_info->type & CGU_CLK_DIV)
439 		div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
440 	else if (clk_info->type & CGU_CLK_FIXDIV)
441 		div = clk_info->fixdiv.div;
442 
443 	return DIV_ROUND_UP(*parent_rate, div);
444 }
445 
446 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
447 					   const struct ingenic_cgu_clk_info *clk_info)
448 {
449 	u32 reg;
450 
451 	return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
452 				  !(reg & BIT(clk_info->div.busy_bit)),
453 				  0, 100 * USEC_PER_MSEC);
454 }
455 
456 static int
457 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
458 		     unsigned long parent_rate)
459 {
460 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
461 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
462 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
463 	unsigned long rate, flags;
464 	unsigned int hw_div, div;
465 	u32 reg, mask;
466 	int ret = 0;
467 
468 	if (clk_info->type & CGU_CLK_DIV) {
469 		div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
470 		rate = DIV_ROUND_UP(parent_rate, div);
471 
472 		if (rate != req_rate)
473 			return -EINVAL;
474 
475 		if (clk_info->div.div_table)
476 			hw_div = ingenic_clk_calc_hw_div(clk_info, div);
477 		else
478 			hw_div = ((div / clk_info->div.div) - 1);
479 
480 		spin_lock_irqsave(&cgu->lock, flags);
481 		reg = readl(cgu->base + clk_info->div.reg);
482 
483 		/* update the divide */
484 		mask = GENMASK(clk_info->div.bits - 1, 0);
485 		reg &= ~(mask << clk_info->div.shift);
486 		reg |= hw_div << clk_info->div.shift;
487 
488 		/* clear the stop bit */
489 		if (clk_info->div.stop_bit != -1)
490 			reg &= ~BIT(clk_info->div.stop_bit);
491 
492 		/* set the change enable bit */
493 		if (clk_info->div.ce_bit != -1)
494 			reg |= BIT(clk_info->div.ce_bit);
495 
496 		/* update the hardware */
497 		writel(reg, cgu->base + clk_info->div.reg);
498 
499 		/* wait for the change to take effect */
500 		if (clk_info->div.busy_bit != -1)
501 			ret = ingenic_clk_check_stable(cgu, clk_info);
502 
503 		spin_unlock_irqrestore(&cgu->lock, flags);
504 		return ret;
505 	}
506 
507 	return -EINVAL;
508 }
509 
510 static int ingenic_clk_enable(struct clk_hw *hw)
511 {
512 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
513 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
514 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
515 	unsigned long flags;
516 
517 	if (clk_info->type & CGU_CLK_GATE) {
518 		/* ungate the clock */
519 		spin_lock_irqsave(&cgu->lock, flags);
520 		ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
521 		spin_unlock_irqrestore(&cgu->lock, flags);
522 
523 		if (clk_info->gate.delay_us)
524 			udelay(clk_info->gate.delay_us);
525 	}
526 
527 	return 0;
528 }
529 
530 static void ingenic_clk_disable(struct clk_hw *hw)
531 {
532 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
533 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
534 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
535 	unsigned long flags;
536 
537 	if (clk_info->type & CGU_CLK_GATE) {
538 		/* gate the clock */
539 		spin_lock_irqsave(&cgu->lock, flags);
540 		ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
541 		spin_unlock_irqrestore(&cgu->lock, flags);
542 	}
543 }
544 
545 static int ingenic_clk_is_enabled(struct clk_hw *hw)
546 {
547 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
548 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
549 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
550 	int enabled = 1;
551 
552 	if (clk_info->type & CGU_CLK_GATE)
553 		enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
554 
555 	return enabled;
556 }
557 
558 static const struct clk_ops ingenic_clk_ops = {
559 	.get_parent = ingenic_clk_get_parent,
560 	.set_parent = ingenic_clk_set_parent,
561 
562 	.recalc_rate = ingenic_clk_recalc_rate,
563 	.round_rate = ingenic_clk_round_rate,
564 	.set_rate = ingenic_clk_set_rate,
565 
566 	.enable = ingenic_clk_enable,
567 	.disable = ingenic_clk_disable,
568 	.is_enabled = ingenic_clk_is_enabled,
569 };
570 
571 /*
572  * Setup functions.
573  */
574 
575 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
576 {
577 	const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
578 	struct clk_init_data clk_init;
579 	struct ingenic_clk *ingenic_clk = NULL;
580 	struct clk *clk, *parent;
581 	const char *parent_names[4];
582 	unsigned caps, i, num_possible;
583 	int err = -EINVAL;
584 
585 	BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
586 
587 	if (clk_info->type == CGU_CLK_EXT) {
588 		clk = of_clk_get_by_name(cgu->np, clk_info->name);
589 		if (IS_ERR(clk)) {
590 			pr_err("%s: no external clock '%s' provided\n",
591 			       __func__, clk_info->name);
592 			err = -ENODEV;
593 			goto out;
594 		}
595 		err = clk_register_clkdev(clk, clk_info->name, NULL);
596 		if (err) {
597 			clk_put(clk);
598 			goto out;
599 		}
600 		cgu->clocks.clks[idx] = clk;
601 		return 0;
602 	}
603 
604 	if (!clk_info->type) {
605 		pr_err("%s: no clock type specified for '%s'\n", __func__,
606 		       clk_info->name);
607 		goto out;
608 	}
609 
610 	ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
611 	if (!ingenic_clk) {
612 		err = -ENOMEM;
613 		goto out;
614 	}
615 
616 	ingenic_clk->hw.init = &clk_init;
617 	ingenic_clk->cgu = cgu;
618 	ingenic_clk->idx = idx;
619 
620 	clk_init.name = clk_info->name;
621 	clk_init.flags = 0;
622 	clk_init.parent_names = parent_names;
623 
624 	caps = clk_info->type;
625 
626 	if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
627 		clk_init.num_parents = 0;
628 
629 		if (caps & CGU_CLK_MUX)
630 			num_possible = 1 << clk_info->mux.bits;
631 		else
632 			num_possible = ARRAY_SIZE(clk_info->parents);
633 
634 		for (i = 0; i < num_possible; i++) {
635 			if (clk_info->parents[i] == -1)
636 				continue;
637 
638 			parent = cgu->clocks.clks[clk_info->parents[i]];
639 			parent_names[clk_init.num_parents] =
640 				__clk_get_name(parent);
641 			clk_init.num_parents++;
642 		}
643 
644 		BUG_ON(!clk_init.num_parents);
645 		BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
646 	} else {
647 		BUG_ON(clk_info->parents[0] == -1);
648 		clk_init.num_parents = 1;
649 		parent = cgu->clocks.clks[clk_info->parents[0]];
650 		parent_names[0] = __clk_get_name(parent);
651 	}
652 
653 	if (caps & CGU_CLK_CUSTOM) {
654 		clk_init.ops = clk_info->custom.clk_ops;
655 
656 		caps &= ~CGU_CLK_CUSTOM;
657 
658 		if (caps) {
659 			pr_err("%s: custom clock may not be combined with type 0x%x\n",
660 			       __func__, caps);
661 			goto out;
662 		}
663 	} else if (caps & CGU_CLK_PLL) {
664 		clk_init.ops = &ingenic_pll_ops;
665 		clk_init.flags |= CLK_SET_RATE_GATE;
666 
667 		caps &= ~CGU_CLK_PLL;
668 
669 		if (caps) {
670 			pr_err("%s: PLL may not be combined with type 0x%x\n",
671 			       __func__, caps);
672 			goto out;
673 		}
674 	} else {
675 		clk_init.ops = &ingenic_clk_ops;
676 	}
677 
678 	/* nothing to do for gates or fixed dividers */
679 	caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
680 
681 	if (caps & CGU_CLK_MUX) {
682 		if (!(caps & CGU_CLK_MUX_GLITCHFREE))
683 			clk_init.flags |= CLK_SET_PARENT_GATE;
684 
685 		caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
686 	}
687 
688 	if (caps & CGU_CLK_DIV) {
689 		caps &= ~CGU_CLK_DIV;
690 	} else {
691 		/* pass rate changes to the parent clock */
692 		clk_init.flags |= CLK_SET_RATE_PARENT;
693 	}
694 
695 	if (caps) {
696 		pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
697 		goto out;
698 	}
699 
700 	clk = clk_register(NULL, &ingenic_clk->hw);
701 	if (IS_ERR(clk)) {
702 		pr_err("%s: failed to register clock '%s'\n", __func__,
703 		       clk_info->name);
704 		err = PTR_ERR(clk);
705 		goto out;
706 	}
707 
708 	err = clk_register_clkdev(clk, clk_info->name, NULL);
709 	if (err)
710 		goto out;
711 
712 	cgu->clocks.clks[idx] = clk;
713 out:
714 	if (err)
715 		kfree(ingenic_clk);
716 	return err;
717 }
718 
719 struct ingenic_cgu *
720 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
721 		unsigned num_clocks, struct device_node *np)
722 {
723 	struct ingenic_cgu *cgu;
724 
725 	cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
726 	if (!cgu)
727 		goto err_out;
728 
729 	cgu->base = of_iomap(np, 0);
730 	if (!cgu->base) {
731 		pr_err("%s: failed to map CGU registers\n", __func__);
732 		goto err_out_free;
733 	}
734 
735 	cgu->np = np;
736 	cgu->clock_info = clock_info;
737 	cgu->clocks.clk_num = num_clocks;
738 
739 	spin_lock_init(&cgu->lock);
740 
741 	return cgu;
742 
743 err_out_free:
744 	kfree(cgu);
745 err_out:
746 	return NULL;
747 }
748 
749 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
750 {
751 	unsigned i;
752 	int err;
753 
754 	cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
755 				   GFP_KERNEL);
756 	if (!cgu->clocks.clks) {
757 		err = -ENOMEM;
758 		goto err_out;
759 	}
760 
761 	for (i = 0; i < cgu->clocks.clk_num; i++) {
762 		err = ingenic_register_clock(cgu, i);
763 		if (err)
764 			goto err_out_unregister;
765 	}
766 
767 	err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
768 				  &cgu->clocks);
769 	if (err)
770 		goto err_out_unregister;
771 
772 	return 0;
773 
774 err_out_unregister:
775 	for (i = 0; i < cgu->clocks.clk_num; i++) {
776 		if (!cgu->clocks.clks[i])
777 			continue;
778 		if (cgu->clock_info[i].type & CGU_CLK_EXT)
779 			clk_put(cgu->clocks.clks[i]);
780 		else
781 			clk_unregister(cgu->clocks.clks[i]);
782 	}
783 	kfree(cgu->clocks.clks);
784 err_out:
785 	return err;
786 }
787