xref: /openbmc/linux/drivers/clk/ingenic/cgu.c (revision 0ea33321)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
22 
23 #include "cgu.h"
24 
25 #define MHZ (1000 * 1000)
26 
27 static inline const struct ingenic_cgu_clk_info *
28 to_clk_info(struct ingenic_clk *clk)
29 {
30 	return &clk->cgu->clock_info[clk->idx];
31 }
32 
33 /**
34  * ingenic_cgu_gate_get() - get the value of clock gate register bit
35  * @cgu: reference to the CGU whose registers should be read
36  * @info: info struct describing the gate bit
37  *
38  * Retrieves the state of the clock gate bit described by info. The
39  * caller must hold cgu->lock.
40  *
41  * Return: true if the gate bit is set, else false.
42  */
43 static inline bool
44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45 		     const struct ingenic_cgu_gate_info *info)
46 {
47 	return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48 		^ info->clear_to_gate;
49 }
50 
51 /**
52  * ingenic_cgu_gate_set() - set the value of clock gate register bit
53  * @cgu: reference to the CGU whose registers should be modified
54  * @info: info struct describing the gate bit
55  * @val: non-zero to gate a clock, otherwise zero
56  *
57  * Sets the given gate bit in order to gate or ungate a clock.
58  *
59  * The caller must hold cgu->lock.
60  */
61 static inline void
62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63 		     const struct ingenic_cgu_gate_info *info, bool val)
64 {
65 	u32 clkgr = readl(cgu->base + info->reg);
66 
67 	if (val ^ info->clear_to_gate)
68 		clkgr |= BIT(info->bit);
69 	else
70 		clkgr &= ~BIT(info->bit);
71 
72 	writel(clkgr, cgu->base + info->reg);
73 }
74 
75 /*
76  * PLL operations
77  */
78 
79 static unsigned long
80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
81 {
82 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
85 	const struct ingenic_cgu_pll_info *pll_info;
86 	unsigned m, n, od_enc, od;
87 	bool bypass;
88 	u32 ctl;
89 
90 	BUG_ON(clk_info->type != CGU_CLK_PLL);
91 	pll_info = &clk_info->pll;
92 
93 	ctl = readl(cgu->base + pll_info->reg);
94 
95 	m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96 	m += pll_info->m_offset;
97 	n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98 	n += pll_info->n_offset;
99 	od_enc = ctl >> pll_info->od_shift;
100 	od_enc &= GENMASK(pll_info->od_bits - 1, 0);
101 
102 	ctl = readl(cgu->base + pll_info->bypass_reg);
103 
104 	bypass = !pll_info->no_bypass_bit &&
105 		 !!(ctl & BIT(pll_info->bypass_bit));
106 
107 	if (bypass)
108 		return parent_rate;
109 
110 	for (od = 0; od < pll_info->od_max; od++) {
111 		if (pll_info->od_encoding[od] == od_enc)
112 			break;
113 	}
114 	BUG_ON(od == pll_info->od_max);
115 	od++;
116 
117 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
118 		n * od);
119 }
120 
121 static unsigned long
122 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
123 		 unsigned long rate, unsigned long parent_rate,
124 		 unsigned *pm, unsigned *pn, unsigned *pod)
125 {
126 	const struct ingenic_cgu_pll_info *pll_info;
127 	unsigned m, n, od;
128 
129 	pll_info = &clk_info->pll;
130 	od = 1;
131 
132 	/*
133 	 * The frequency after the input divider must be between 10 and 50 MHz.
134 	 * The highest divider yields the best resolution.
135 	 */
136 	n = parent_rate / (10 * MHZ);
137 	n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
138 	n = max_t(unsigned, n, pll_info->n_offset);
139 
140 	m = (rate / MHZ) * od * n / (parent_rate / MHZ);
141 	m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
142 	m = max_t(unsigned, m, pll_info->m_offset);
143 
144 	if (pm)
145 		*pm = m;
146 	if (pn)
147 		*pn = n;
148 	if (pod)
149 		*pod = od;
150 
151 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
152 		n * od);
153 }
154 
155 static long
156 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
157 		       unsigned long *prate)
158 {
159 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
160 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
161 
162 	return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
163 }
164 
165 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
166 					   const struct ingenic_cgu_pll_info *pll_info)
167 {
168 	u32 ctl;
169 
170 	return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
171 				  ctl & BIT(pll_info->stable_bit),
172 				  0, 100 * USEC_PER_MSEC);
173 }
174 
175 static int
176 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
177 		     unsigned long parent_rate)
178 {
179 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
180 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
181 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
182 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
183 	unsigned long rate, flags;
184 	unsigned int m, n, od;
185 	int ret = 0;
186 	u32 ctl;
187 
188 	rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
189 			       &m, &n, &od);
190 	if (rate != req_rate)
191 		pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
192 			clk_info->name, req_rate, rate);
193 
194 	spin_lock_irqsave(&cgu->lock, flags);
195 	ctl = readl(cgu->base + pll_info->reg);
196 
197 	ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
198 	ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
199 
200 	ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
201 	ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
202 
203 	ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
204 	ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
205 
206 	writel(ctl, cgu->base + pll_info->reg);
207 
208 	/* If the PLL is enabled, verify that it's stable */
209 	if (ctl & BIT(pll_info->enable_bit))
210 		ret = ingenic_pll_check_stable(cgu, pll_info);
211 
212 	spin_unlock_irqrestore(&cgu->lock, flags);
213 
214 	return ret;
215 }
216 
217 static int ingenic_pll_enable(struct clk_hw *hw)
218 {
219 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
220 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
221 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
222 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
223 	unsigned long flags;
224 	int ret;
225 	u32 ctl;
226 
227 	spin_lock_irqsave(&cgu->lock, flags);
228 	ctl = readl(cgu->base + pll_info->bypass_reg);
229 
230 	ctl &= ~BIT(pll_info->bypass_bit);
231 
232 	writel(ctl, cgu->base + pll_info->bypass_reg);
233 
234 	ctl = readl(cgu->base + pll_info->reg);
235 
236 	ctl |= BIT(pll_info->enable_bit);
237 
238 	writel(ctl, cgu->base + pll_info->reg);
239 
240 	ret = ingenic_pll_check_stable(cgu, pll_info);
241 	spin_unlock_irqrestore(&cgu->lock, flags);
242 
243 	return ret;
244 }
245 
246 static void ingenic_pll_disable(struct clk_hw *hw)
247 {
248 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
249 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
250 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
251 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
252 	unsigned long flags;
253 	u32 ctl;
254 
255 	spin_lock_irqsave(&cgu->lock, flags);
256 	ctl = readl(cgu->base + pll_info->reg);
257 
258 	ctl &= ~BIT(pll_info->enable_bit);
259 
260 	writel(ctl, cgu->base + pll_info->reg);
261 	spin_unlock_irqrestore(&cgu->lock, flags);
262 }
263 
264 static int ingenic_pll_is_enabled(struct clk_hw *hw)
265 {
266 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
267 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
268 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
269 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
270 	u32 ctl;
271 
272 	ctl = readl(cgu->base + pll_info->reg);
273 
274 	return !!(ctl & BIT(pll_info->enable_bit));
275 }
276 
277 static const struct clk_ops ingenic_pll_ops = {
278 	.recalc_rate = ingenic_pll_recalc_rate,
279 	.round_rate = ingenic_pll_round_rate,
280 	.set_rate = ingenic_pll_set_rate,
281 
282 	.enable = ingenic_pll_enable,
283 	.disable = ingenic_pll_disable,
284 	.is_enabled = ingenic_pll_is_enabled,
285 };
286 
287 /*
288  * Operations for all non-PLL clocks
289  */
290 
291 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
292 {
293 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
294 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
295 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
296 	u32 reg;
297 	u8 i, hw_idx, idx = 0;
298 
299 	if (clk_info->type & CGU_CLK_MUX) {
300 		reg = readl(cgu->base + clk_info->mux.reg);
301 		hw_idx = (reg >> clk_info->mux.shift) &
302 			 GENMASK(clk_info->mux.bits - 1, 0);
303 
304 		/*
305 		 * Convert the hardware index to the parent index by skipping
306 		 * over any -1's in the parents array.
307 		 */
308 		for (i = 0; i < hw_idx; i++) {
309 			if (clk_info->parents[i] != -1)
310 				idx++;
311 		}
312 	}
313 
314 	return idx;
315 }
316 
317 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
318 {
319 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
320 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
321 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
322 	unsigned long flags;
323 	u8 curr_idx, hw_idx, num_poss;
324 	u32 reg, mask;
325 
326 	if (clk_info->type & CGU_CLK_MUX) {
327 		/*
328 		 * Convert the parent index to the hardware index by adding
329 		 * 1 for any -1 in the parents array preceding the given
330 		 * index. That is, we want the index of idx'th entry in
331 		 * clk_info->parents which does not equal -1.
332 		 */
333 		hw_idx = curr_idx = 0;
334 		num_poss = 1 << clk_info->mux.bits;
335 		for (; hw_idx < num_poss; hw_idx++) {
336 			if (clk_info->parents[hw_idx] == -1)
337 				continue;
338 			if (curr_idx == idx)
339 				break;
340 			curr_idx++;
341 		}
342 
343 		/* idx should always be a valid parent */
344 		BUG_ON(curr_idx != idx);
345 
346 		mask = GENMASK(clk_info->mux.bits - 1, 0);
347 		mask <<= clk_info->mux.shift;
348 
349 		spin_lock_irqsave(&cgu->lock, flags);
350 
351 		/* write the register */
352 		reg = readl(cgu->base + clk_info->mux.reg);
353 		reg &= ~mask;
354 		reg |= hw_idx << clk_info->mux.shift;
355 		writel(reg, cgu->base + clk_info->mux.reg);
356 
357 		spin_unlock_irqrestore(&cgu->lock, flags);
358 		return 0;
359 	}
360 
361 	return idx ? -EINVAL : 0;
362 }
363 
364 static unsigned long
365 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
366 {
367 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
368 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
369 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
370 	unsigned long rate = parent_rate;
371 	u32 div_reg, div;
372 
373 	if (clk_info->type & CGU_CLK_DIV) {
374 		div_reg = readl(cgu->base + clk_info->div.reg);
375 		div = (div_reg >> clk_info->div.shift) &
376 		      GENMASK(clk_info->div.bits - 1, 0);
377 
378 		if (clk_info->div.div_table)
379 			div = clk_info->div.div_table[div];
380 		else
381 			div = (div + 1) * clk_info->div.div;
382 
383 		rate /= div;
384 	} else if (clk_info->type & CGU_CLK_FIXDIV) {
385 		rate /= clk_info->fixdiv.div;
386 	}
387 
388 	return rate;
389 }
390 
391 static unsigned int
392 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
393 			unsigned int div)
394 {
395 	unsigned int i, best_i = 0, best = (unsigned int)-1;
396 
397 	for (i = 0; i < (1 << clk_info->div.bits)
398 				&& clk_info->div.div_table[i]; i++) {
399 		if (clk_info->div.div_table[i] >= div &&
400 		    clk_info->div.div_table[i] < best) {
401 			best = clk_info->div.div_table[i];
402 			best_i = i;
403 
404 			if (div == best)
405 				break;
406 		}
407 	}
408 
409 	return best_i;
410 }
411 
412 static unsigned
413 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
414 		     unsigned long parent_rate, unsigned long req_rate)
415 {
416 	unsigned int div, hw_div;
417 
418 	/* calculate the divide */
419 	div = DIV_ROUND_UP(parent_rate, req_rate);
420 
421 	if (clk_info->div.div_table) {
422 		hw_div = ingenic_clk_calc_hw_div(clk_info, div);
423 
424 		return clk_info->div.div_table[hw_div];
425 	}
426 
427 	/* Impose hardware constraints */
428 	div = min_t(unsigned, div, 1 << clk_info->div.bits);
429 	div = max_t(unsigned, div, 1);
430 
431 	/*
432 	 * If the divider value itself must be divided before being written to
433 	 * the divider register, we must ensure we don't have any bits set that
434 	 * would be lost as a result of doing so.
435 	 */
436 	div /= clk_info->div.div;
437 	div *= clk_info->div.div;
438 
439 	return div;
440 }
441 
442 static long
443 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
444 		       unsigned long *parent_rate)
445 {
446 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
447 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
448 	unsigned int div = 1;
449 
450 	if (clk_info->type & CGU_CLK_DIV)
451 		div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
452 	else if (clk_info->type & CGU_CLK_FIXDIV)
453 		div = clk_info->fixdiv.div;
454 	else if (clk_hw_can_set_rate_parent(hw))
455 		*parent_rate = req_rate;
456 
457 	return DIV_ROUND_UP(*parent_rate, div);
458 }
459 
460 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
461 					   const struct ingenic_cgu_clk_info *clk_info)
462 {
463 	u32 reg;
464 
465 	return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
466 				  !(reg & BIT(clk_info->div.busy_bit)),
467 				  0, 100 * USEC_PER_MSEC);
468 }
469 
470 static int
471 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
472 		     unsigned long parent_rate)
473 {
474 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
475 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
476 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
477 	unsigned long rate, flags;
478 	unsigned int hw_div, div;
479 	u32 reg, mask;
480 	int ret = 0;
481 
482 	if (clk_info->type & CGU_CLK_DIV) {
483 		div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
484 		rate = DIV_ROUND_UP(parent_rate, div);
485 
486 		if (rate != req_rate)
487 			return -EINVAL;
488 
489 		if (clk_info->div.div_table)
490 			hw_div = ingenic_clk_calc_hw_div(clk_info, div);
491 		else
492 			hw_div = ((div / clk_info->div.div) - 1);
493 
494 		spin_lock_irqsave(&cgu->lock, flags);
495 		reg = readl(cgu->base + clk_info->div.reg);
496 
497 		/* update the divide */
498 		mask = GENMASK(clk_info->div.bits - 1, 0);
499 		reg &= ~(mask << clk_info->div.shift);
500 		reg |= hw_div << clk_info->div.shift;
501 
502 		/* clear the stop bit */
503 		if (clk_info->div.stop_bit != -1)
504 			reg &= ~BIT(clk_info->div.stop_bit);
505 
506 		/* set the change enable bit */
507 		if (clk_info->div.ce_bit != -1)
508 			reg |= BIT(clk_info->div.ce_bit);
509 
510 		/* update the hardware */
511 		writel(reg, cgu->base + clk_info->div.reg);
512 
513 		/* wait for the change to take effect */
514 		if (clk_info->div.busy_bit != -1)
515 			ret = ingenic_clk_check_stable(cgu, clk_info);
516 
517 		spin_unlock_irqrestore(&cgu->lock, flags);
518 		return ret;
519 	}
520 
521 	return -EINVAL;
522 }
523 
524 static int ingenic_clk_enable(struct clk_hw *hw)
525 {
526 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
527 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
528 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
529 	unsigned long flags;
530 
531 	if (clk_info->type & CGU_CLK_GATE) {
532 		/* ungate the clock */
533 		spin_lock_irqsave(&cgu->lock, flags);
534 		ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
535 		spin_unlock_irqrestore(&cgu->lock, flags);
536 
537 		if (clk_info->gate.delay_us)
538 			udelay(clk_info->gate.delay_us);
539 	}
540 
541 	return 0;
542 }
543 
544 static void ingenic_clk_disable(struct clk_hw *hw)
545 {
546 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
547 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
548 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
549 	unsigned long flags;
550 
551 	if (clk_info->type & CGU_CLK_GATE) {
552 		/* gate the clock */
553 		spin_lock_irqsave(&cgu->lock, flags);
554 		ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
555 		spin_unlock_irqrestore(&cgu->lock, flags);
556 	}
557 }
558 
559 static int ingenic_clk_is_enabled(struct clk_hw *hw)
560 {
561 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
562 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
563 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
564 	int enabled = 1;
565 
566 	if (clk_info->type & CGU_CLK_GATE)
567 		enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
568 
569 	return enabled;
570 }
571 
572 static const struct clk_ops ingenic_clk_ops = {
573 	.get_parent = ingenic_clk_get_parent,
574 	.set_parent = ingenic_clk_set_parent,
575 
576 	.recalc_rate = ingenic_clk_recalc_rate,
577 	.round_rate = ingenic_clk_round_rate,
578 	.set_rate = ingenic_clk_set_rate,
579 
580 	.enable = ingenic_clk_enable,
581 	.disable = ingenic_clk_disable,
582 	.is_enabled = ingenic_clk_is_enabled,
583 };
584 
585 /*
586  * Setup functions.
587  */
588 
589 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
590 {
591 	const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
592 	struct clk_init_data clk_init;
593 	struct ingenic_clk *ingenic_clk = NULL;
594 	struct clk *clk, *parent;
595 	const char *parent_names[4];
596 	unsigned caps, i, num_possible;
597 	int err = -EINVAL;
598 
599 	BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
600 
601 	if (clk_info->type == CGU_CLK_EXT) {
602 		clk = of_clk_get_by_name(cgu->np, clk_info->name);
603 		if (IS_ERR(clk)) {
604 			pr_err("%s: no external clock '%s' provided\n",
605 			       __func__, clk_info->name);
606 			err = -ENODEV;
607 			goto out;
608 		}
609 		err = clk_register_clkdev(clk, clk_info->name, NULL);
610 		if (err) {
611 			clk_put(clk);
612 			goto out;
613 		}
614 		cgu->clocks.clks[idx] = clk;
615 		return 0;
616 	}
617 
618 	if (!clk_info->type) {
619 		pr_err("%s: no clock type specified for '%s'\n", __func__,
620 		       clk_info->name);
621 		goto out;
622 	}
623 
624 	ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
625 	if (!ingenic_clk) {
626 		err = -ENOMEM;
627 		goto out;
628 	}
629 
630 	ingenic_clk->hw.init = &clk_init;
631 	ingenic_clk->cgu = cgu;
632 	ingenic_clk->idx = idx;
633 
634 	clk_init.name = clk_info->name;
635 	clk_init.flags = 0;
636 	clk_init.parent_names = parent_names;
637 
638 	caps = clk_info->type;
639 
640 	if (caps & CGU_CLK_DIV) {
641 		caps &= ~CGU_CLK_DIV;
642 	} else if (!(caps & CGU_CLK_CUSTOM)) {
643 		/* pass rate changes to the parent clock */
644 		clk_init.flags |= CLK_SET_RATE_PARENT;
645 	}
646 
647 	if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
648 		clk_init.num_parents = 0;
649 
650 		if (caps & CGU_CLK_MUX)
651 			num_possible = 1 << clk_info->mux.bits;
652 		else
653 			num_possible = ARRAY_SIZE(clk_info->parents);
654 
655 		for (i = 0; i < num_possible; i++) {
656 			if (clk_info->parents[i] == -1)
657 				continue;
658 
659 			parent = cgu->clocks.clks[clk_info->parents[i]];
660 			parent_names[clk_init.num_parents] =
661 				__clk_get_name(parent);
662 			clk_init.num_parents++;
663 		}
664 
665 		BUG_ON(!clk_init.num_parents);
666 		BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
667 	} else {
668 		BUG_ON(clk_info->parents[0] == -1);
669 		clk_init.num_parents = 1;
670 		parent = cgu->clocks.clks[clk_info->parents[0]];
671 		parent_names[0] = __clk_get_name(parent);
672 	}
673 
674 	if (caps & CGU_CLK_CUSTOM) {
675 		clk_init.ops = clk_info->custom.clk_ops;
676 
677 		caps &= ~CGU_CLK_CUSTOM;
678 
679 		if (caps) {
680 			pr_err("%s: custom clock may not be combined with type 0x%x\n",
681 			       __func__, caps);
682 			goto out;
683 		}
684 	} else if (caps & CGU_CLK_PLL) {
685 		clk_init.ops = &ingenic_pll_ops;
686 
687 		caps &= ~CGU_CLK_PLL;
688 
689 		if (caps) {
690 			pr_err("%s: PLL may not be combined with type 0x%x\n",
691 			       __func__, caps);
692 			goto out;
693 		}
694 	} else {
695 		clk_init.ops = &ingenic_clk_ops;
696 	}
697 
698 	/* nothing to do for gates or fixed dividers */
699 	caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
700 
701 	if (caps & CGU_CLK_MUX) {
702 		if (!(caps & CGU_CLK_MUX_GLITCHFREE))
703 			clk_init.flags |= CLK_SET_PARENT_GATE;
704 
705 		caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
706 	}
707 
708 	if (caps) {
709 		pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
710 		goto out;
711 	}
712 
713 	clk = clk_register(NULL, &ingenic_clk->hw);
714 	if (IS_ERR(clk)) {
715 		pr_err("%s: failed to register clock '%s'\n", __func__,
716 		       clk_info->name);
717 		err = PTR_ERR(clk);
718 		goto out;
719 	}
720 
721 	err = clk_register_clkdev(clk, clk_info->name, NULL);
722 	if (err)
723 		goto out;
724 
725 	cgu->clocks.clks[idx] = clk;
726 out:
727 	if (err)
728 		kfree(ingenic_clk);
729 	return err;
730 }
731 
732 struct ingenic_cgu *
733 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
734 		unsigned num_clocks, struct device_node *np)
735 {
736 	struct ingenic_cgu *cgu;
737 
738 	cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
739 	if (!cgu)
740 		goto err_out;
741 
742 	cgu->base = of_iomap(np, 0);
743 	if (!cgu->base) {
744 		pr_err("%s: failed to map CGU registers\n", __func__);
745 		goto err_out_free;
746 	}
747 
748 	cgu->np = np;
749 	cgu->clock_info = clock_info;
750 	cgu->clocks.clk_num = num_clocks;
751 
752 	spin_lock_init(&cgu->lock);
753 
754 	return cgu;
755 
756 err_out_free:
757 	kfree(cgu);
758 err_out:
759 	return NULL;
760 }
761 
762 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
763 {
764 	unsigned i;
765 	int err;
766 
767 	cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
768 				   GFP_KERNEL);
769 	if (!cgu->clocks.clks) {
770 		err = -ENOMEM;
771 		goto err_out;
772 	}
773 
774 	for (i = 0; i < cgu->clocks.clk_num; i++) {
775 		err = ingenic_register_clock(cgu, i);
776 		if (err)
777 			goto err_out_unregister;
778 	}
779 
780 	err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
781 				  &cgu->clocks);
782 	if (err)
783 		goto err_out_unregister;
784 
785 	return 0;
786 
787 err_out_unregister:
788 	for (i = 0; i < cgu->clocks.clk_num; i++) {
789 		if (!cgu->clocks.clks[i])
790 			continue;
791 		if (cgu->clock_info[i].type & CGU_CLK_EXT)
792 			clk_put(cgu->clocks.clks[i]);
793 		else
794 			clk_unregister(cgu->clocks.clks[i]);
795 	}
796 	kfree(cgu->clocks.clks);
797 err_out:
798 	return err;
799 }
800