xref: /openbmc/linux/drivers/clk/ingenic/cgu.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/math64.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include "cgu.h"
21 
22 #define MHZ (1000 * 1000)
23 
24 /**
25  * ingenic_cgu_gate_get() - get the value of clock gate register bit
26  * @cgu: reference to the CGU whose registers should be read
27  * @info: info struct describing the gate bit
28  *
29  * Retrieves the state of the clock gate bit described by info. The
30  * caller must hold cgu->lock.
31  *
32  * Return: true if the gate bit is set, else false.
33  */
34 static inline bool
35 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
36 		     const struct ingenic_cgu_gate_info *info)
37 {
38 	return !!(readl(cgu->base + info->reg) & BIT(info->bit))
39 		^ info->clear_to_gate;
40 }
41 
42 /**
43  * ingenic_cgu_gate_set() - set the value of clock gate register bit
44  * @cgu: reference to the CGU whose registers should be modified
45  * @info: info struct describing the gate bit
46  * @val: non-zero to gate a clock, otherwise zero
47  *
48  * Sets the given gate bit in order to gate or ungate a clock.
49  *
50  * The caller must hold cgu->lock.
51  */
52 static inline void
53 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
54 		     const struct ingenic_cgu_gate_info *info, bool val)
55 {
56 	u32 clkgr = readl(cgu->base + info->reg);
57 
58 	if (val ^ info->clear_to_gate)
59 		clkgr |= BIT(info->bit);
60 	else
61 		clkgr &= ~BIT(info->bit);
62 
63 	writel(clkgr, cgu->base + info->reg);
64 }
65 
66 /*
67  * PLL operations
68  */
69 
70 static unsigned long
71 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
72 {
73 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
74 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
75 	const struct ingenic_cgu_clk_info *clk_info;
76 	const struct ingenic_cgu_pll_info *pll_info;
77 	unsigned m, n, od_enc, od;
78 	bool bypass;
79 	u32 ctl;
80 
81 	clk_info = &cgu->clock_info[ingenic_clk->idx];
82 	BUG_ON(clk_info->type != CGU_CLK_PLL);
83 	pll_info = &clk_info->pll;
84 
85 	ctl = readl(cgu->base + pll_info->reg);
86 
87 	m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
88 	m += pll_info->m_offset;
89 	n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
90 	n += pll_info->n_offset;
91 	od_enc = ctl >> pll_info->od_shift;
92 	od_enc &= GENMASK(pll_info->od_bits - 1, 0);
93 	bypass = !pll_info->no_bypass_bit &&
94 		 !!(ctl & BIT(pll_info->bypass_bit));
95 
96 	if (bypass)
97 		return parent_rate;
98 
99 	for (od = 0; od < pll_info->od_max; od++) {
100 		if (pll_info->od_encoding[od] == od_enc)
101 			break;
102 	}
103 	BUG_ON(od == pll_info->od_max);
104 	od++;
105 
106 	return div_u64((u64)parent_rate * m, n * od);
107 }
108 
109 static unsigned long
110 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
111 		 unsigned long rate, unsigned long parent_rate,
112 		 unsigned *pm, unsigned *pn, unsigned *pod)
113 {
114 	const struct ingenic_cgu_pll_info *pll_info;
115 	unsigned m, n, od;
116 
117 	pll_info = &clk_info->pll;
118 	od = 1;
119 
120 	/*
121 	 * The frequency after the input divider must be between 10 and 50 MHz.
122 	 * The highest divider yields the best resolution.
123 	 */
124 	n = parent_rate / (10 * MHZ);
125 	n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
126 	n = max_t(unsigned, n, pll_info->n_offset);
127 
128 	m = (rate / MHZ) * od * n / (parent_rate / MHZ);
129 	m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
130 	m = max_t(unsigned, m, pll_info->m_offset);
131 
132 	if (pm)
133 		*pm = m;
134 	if (pn)
135 		*pn = n;
136 	if (pod)
137 		*pod = od;
138 
139 	return div_u64((u64)parent_rate * m, n * od);
140 }
141 
142 static inline const struct ingenic_cgu_clk_info *to_clk_info(
143 		struct ingenic_clk *ingenic_clk)
144 {
145 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
146 	const struct ingenic_cgu_clk_info *clk_info;
147 
148 	clk_info = &cgu->clock_info[ingenic_clk->idx];
149 	BUG_ON(clk_info->type != CGU_CLK_PLL);
150 
151 	return clk_info;
152 }
153 
154 static long
155 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
156 		       unsigned long *prate)
157 {
158 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
159 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
160 
161 	return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
162 }
163 
164 static int
165 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
166 		     unsigned long parent_rate)
167 {
168 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
169 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
170 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
171 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
172 	unsigned long rate, flags;
173 	unsigned int m, n, od;
174 	u32 ctl;
175 
176 	rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
177 			       &m, &n, &od);
178 	if (rate != req_rate)
179 		pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
180 			clk_info->name, req_rate, rate);
181 
182 	spin_lock_irqsave(&cgu->lock, flags);
183 	ctl = readl(cgu->base + pll_info->reg);
184 
185 	ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
186 	ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
187 
188 	ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
189 	ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
190 
191 	ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
192 	ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
193 
194 	writel(ctl, cgu->base + pll_info->reg);
195 	spin_unlock_irqrestore(&cgu->lock, flags);
196 
197 	return 0;
198 }
199 
200 static int ingenic_pll_enable(struct clk_hw *hw)
201 {
202 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
203 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
204 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
205 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
206 	const unsigned int timeout = 100;
207 	unsigned long flags;
208 	unsigned int i;
209 	u32 ctl;
210 
211 	spin_lock_irqsave(&cgu->lock, flags);
212 	ctl = readl(cgu->base + pll_info->reg);
213 
214 	ctl &= ~BIT(pll_info->bypass_bit);
215 	ctl |= BIT(pll_info->enable_bit);
216 
217 	writel(ctl, cgu->base + pll_info->reg);
218 
219 	/* wait for the PLL to stabilise */
220 	for (i = 0; i < timeout; i++) {
221 		ctl = readl(cgu->base + pll_info->reg);
222 		if (ctl & BIT(pll_info->stable_bit))
223 			break;
224 		mdelay(1);
225 	}
226 
227 	spin_unlock_irqrestore(&cgu->lock, flags);
228 
229 	if (i == timeout)
230 		return -EBUSY;
231 
232 	return 0;
233 }
234 
235 static void ingenic_pll_disable(struct clk_hw *hw)
236 {
237 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
238 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
239 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
240 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
241 	unsigned long flags;
242 	u32 ctl;
243 
244 	spin_lock_irqsave(&cgu->lock, flags);
245 	ctl = readl(cgu->base + pll_info->reg);
246 
247 	ctl &= ~BIT(pll_info->enable_bit);
248 
249 	writel(ctl, cgu->base + pll_info->reg);
250 	spin_unlock_irqrestore(&cgu->lock, flags);
251 }
252 
253 static int ingenic_pll_is_enabled(struct clk_hw *hw)
254 {
255 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
256 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
257 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
258 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
259 	u32 ctl;
260 
261 	ctl = readl(cgu->base + pll_info->reg);
262 
263 	return !!(ctl & BIT(pll_info->enable_bit));
264 }
265 
266 static const struct clk_ops ingenic_pll_ops = {
267 	.recalc_rate = ingenic_pll_recalc_rate,
268 	.round_rate = ingenic_pll_round_rate,
269 	.set_rate = ingenic_pll_set_rate,
270 
271 	.enable = ingenic_pll_enable,
272 	.disable = ingenic_pll_disable,
273 	.is_enabled = ingenic_pll_is_enabled,
274 };
275 
276 /*
277  * Operations for all non-PLL clocks
278  */
279 
280 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
281 {
282 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
283 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
284 	const struct ingenic_cgu_clk_info *clk_info;
285 	u32 reg;
286 	u8 i, hw_idx, idx = 0;
287 
288 	clk_info = &cgu->clock_info[ingenic_clk->idx];
289 
290 	if (clk_info->type & CGU_CLK_MUX) {
291 		reg = readl(cgu->base + clk_info->mux.reg);
292 		hw_idx = (reg >> clk_info->mux.shift) &
293 			 GENMASK(clk_info->mux.bits - 1, 0);
294 
295 		/*
296 		 * Convert the hardware index to the parent index by skipping
297 		 * over any -1's in the parents array.
298 		 */
299 		for (i = 0; i < hw_idx; i++) {
300 			if (clk_info->parents[i] != -1)
301 				idx++;
302 		}
303 	}
304 
305 	return idx;
306 }
307 
308 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
309 {
310 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
311 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
312 	const struct ingenic_cgu_clk_info *clk_info;
313 	unsigned long flags;
314 	u8 curr_idx, hw_idx, num_poss;
315 	u32 reg, mask;
316 
317 	clk_info = &cgu->clock_info[ingenic_clk->idx];
318 
319 	if (clk_info->type & CGU_CLK_MUX) {
320 		/*
321 		 * Convert the parent index to the hardware index by adding
322 		 * 1 for any -1 in the parents array preceding the given
323 		 * index. That is, we want the index of idx'th entry in
324 		 * clk_info->parents which does not equal -1.
325 		 */
326 		hw_idx = curr_idx = 0;
327 		num_poss = 1 << clk_info->mux.bits;
328 		for (; hw_idx < num_poss; hw_idx++) {
329 			if (clk_info->parents[hw_idx] == -1)
330 				continue;
331 			if (curr_idx == idx)
332 				break;
333 			curr_idx++;
334 		}
335 
336 		/* idx should always be a valid parent */
337 		BUG_ON(curr_idx != idx);
338 
339 		mask = GENMASK(clk_info->mux.bits - 1, 0);
340 		mask <<= clk_info->mux.shift;
341 
342 		spin_lock_irqsave(&cgu->lock, flags);
343 
344 		/* write the register */
345 		reg = readl(cgu->base + clk_info->mux.reg);
346 		reg &= ~mask;
347 		reg |= hw_idx << clk_info->mux.shift;
348 		writel(reg, cgu->base + clk_info->mux.reg);
349 
350 		spin_unlock_irqrestore(&cgu->lock, flags);
351 		return 0;
352 	}
353 
354 	return idx ? -EINVAL : 0;
355 }
356 
357 static unsigned long
358 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
359 {
360 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
361 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
362 	const struct ingenic_cgu_clk_info *clk_info;
363 	unsigned long rate = parent_rate;
364 	u32 div_reg, div;
365 
366 	clk_info = &cgu->clock_info[ingenic_clk->idx];
367 
368 	if (clk_info->type & CGU_CLK_DIV) {
369 		div_reg = readl(cgu->base + clk_info->div.reg);
370 		div = (div_reg >> clk_info->div.shift) &
371 		      GENMASK(clk_info->div.bits - 1, 0);
372 
373 		if (clk_info->div.div_table)
374 			div = clk_info->div.div_table[div];
375 		else
376 			div = (div + 1) * clk_info->div.div;
377 
378 		rate /= div;
379 	} else if (clk_info->type & CGU_CLK_FIXDIV) {
380 		rate /= clk_info->fixdiv.div;
381 	}
382 
383 	return rate;
384 }
385 
386 static unsigned int
387 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
388 			unsigned int div)
389 {
390 	unsigned int i;
391 
392 	for (i = 0; i < (1 << clk_info->div.bits)
393 				&& clk_info->div.div_table[i]; i++) {
394 		if (clk_info->div.div_table[i] >= div)
395 			return i;
396 	}
397 
398 	return i - 1;
399 }
400 
401 static unsigned
402 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
403 		     unsigned long parent_rate, unsigned long req_rate)
404 {
405 	unsigned int div, hw_div;
406 
407 	/* calculate the divide */
408 	div = DIV_ROUND_UP(parent_rate, req_rate);
409 
410 	if (clk_info->div.div_table) {
411 		hw_div = ingenic_clk_calc_hw_div(clk_info, div);
412 
413 		return clk_info->div.div_table[hw_div];
414 	}
415 
416 	/* Impose hardware constraints */
417 	div = min_t(unsigned, div, 1 << clk_info->div.bits);
418 	div = max_t(unsigned, div, 1);
419 
420 	/*
421 	 * If the divider value itself must be divided before being written to
422 	 * the divider register, we must ensure we don't have any bits set that
423 	 * would be lost as a result of doing so.
424 	 */
425 	div /= clk_info->div.div;
426 	div *= clk_info->div.div;
427 
428 	return div;
429 }
430 
431 static long
432 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
433 		       unsigned long *parent_rate)
434 {
435 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
436 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
437 	const struct ingenic_cgu_clk_info *clk_info;
438 	unsigned int div = 1;
439 
440 	clk_info = &cgu->clock_info[ingenic_clk->idx];
441 
442 	if (clk_info->type & CGU_CLK_DIV)
443 		div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
444 	else if (clk_info->type & CGU_CLK_FIXDIV)
445 		div = clk_info->fixdiv.div;
446 
447 	return DIV_ROUND_UP(*parent_rate, div);
448 }
449 
450 static int
451 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
452 		     unsigned long parent_rate)
453 {
454 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
455 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
456 	const struct ingenic_cgu_clk_info *clk_info;
457 	const unsigned timeout = 100;
458 	unsigned long rate, flags;
459 	unsigned int hw_div, div, i;
460 	u32 reg, mask;
461 	int ret = 0;
462 
463 	clk_info = &cgu->clock_info[ingenic_clk->idx];
464 
465 	if (clk_info->type & CGU_CLK_DIV) {
466 		div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
467 		rate = DIV_ROUND_UP(parent_rate, div);
468 
469 		if (rate != req_rate)
470 			return -EINVAL;
471 
472 		if (clk_info->div.div_table)
473 			hw_div = ingenic_clk_calc_hw_div(clk_info, div);
474 		else
475 			hw_div = ((div / clk_info->div.div) - 1);
476 
477 		spin_lock_irqsave(&cgu->lock, flags);
478 		reg = readl(cgu->base + clk_info->div.reg);
479 
480 		/* update the divide */
481 		mask = GENMASK(clk_info->div.bits - 1, 0);
482 		reg &= ~(mask << clk_info->div.shift);
483 		reg |= hw_div << clk_info->div.shift;
484 
485 		/* clear the stop bit */
486 		if (clk_info->div.stop_bit != -1)
487 			reg &= ~BIT(clk_info->div.stop_bit);
488 
489 		/* set the change enable bit */
490 		if (clk_info->div.ce_bit != -1)
491 			reg |= BIT(clk_info->div.ce_bit);
492 
493 		/* update the hardware */
494 		writel(reg, cgu->base + clk_info->div.reg);
495 
496 		/* wait for the change to take effect */
497 		if (clk_info->div.busy_bit != -1) {
498 			for (i = 0; i < timeout; i++) {
499 				reg = readl(cgu->base + clk_info->div.reg);
500 				if (!(reg & BIT(clk_info->div.busy_bit)))
501 					break;
502 				mdelay(1);
503 			}
504 			if (i == timeout)
505 				ret = -EBUSY;
506 		}
507 
508 		spin_unlock_irqrestore(&cgu->lock, flags);
509 		return ret;
510 	}
511 
512 	return -EINVAL;
513 }
514 
515 static int ingenic_clk_enable(struct clk_hw *hw)
516 {
517 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
518 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
519 	const struct ingenic_cgu_clk_info *clk_info;
520 	unsigned long flags;
521 
522 	clk_info = &cgu->clock_info[ingenic_clk->idx];
523 
524 	if (clk_info->type & CGU_CLK_GATE) {
525 		/* ungate the clock */
526 		spin_lock_irqsave(&cgu->lock, flags);
527 		ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
528 		spin_unlock_irqrestore(&cgu->lock, flags);
529 
530 		if (clk_info->gate.delay_us)
531 			udelay(clk_info->gate.delay_us);
532 	}
533 
534 	return 0;
535 }
536 
537 static void ingenic_clk_disable(struct clk_hw *hw)
538 {
539 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
540 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
541 	const struct ingenic_cgu_clk_info *clk_info;
542 	unsigned long flags;
543 
544 	clk_info = &cgu->clock_info[ingenic_clk->idx];
545 
546 	if (clk_info->type & CGU_CLK_GATE) {
547 		/* gate the clock */
548 		spin_lock_irqsave(&cgu->lock, flags);
549 		ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
550 		spin_unlock_irqrestore(&cgu->lock, flags);
551 	}
552 }
553 
554 static int ingenic_clk_is_enabled(struct clk_hw *hw)
555 {
556 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
557 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
558 	const struct ingenic_cgu_clk_info *clk_info;
559 	int enabled = 1;
560 
561 	clk_info = &cgu->clock_info[ingenic_clk->idx];
562 
563 	if (clk_info->type & CGU_CLK_GATE)
564 		enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
565 
566 	return enabled;
567 }
568 
569 static const struct clk_ops ingenic_clk_ops = {
570 	.get_parent = ingenic_clk_get_parent,
571 	.set_parent = ingenic_clk_set_parent,
572 
573 	.recalc_rate = ingenic_clk_recalc_rate,
574 	.round_rate = ingenic_clk_round_rate,
575 	.set_rate = ingenic_clk_set_rate,
576 
577 	.enable = ingenic_clk_enable,
578 	.disable = ingenic_clk_disable,
579 	.is_enabled = ingenic_clk_is_enabled,
580 };
581 
582 /*
583  * Setup functions.
584  */
585 
586 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
587 {
588 	const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
589 	struct clk_init_data clk_init;
590 	struct ingenic_clk *ingenic_clk = NULL;
591 	struct clk *clk, *parent;
592 	const char *parent_names[4];
593 	unsigned caps, i, num_possible;
594 	int err = -EINVAL;
595 
596 	BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
597 
598 	if (clk_info->type == CGU_CLK_EXT) {
599 		clk = of_clk_get_by_name(cgu->np, clk_info->name);
600 		if (IS_ERR(clk)) {
601 			pr_err("%s: no external clock '%s' provided\n",
602 			       __func__, clk_info->name);
603 			err = -ENODEV;
604 			goto out;
605 		}
606 		err = clk_register_clkdev(clk, clk_info->name, NULL);
607 		if (err) {
608 			clk_put(clk);
609 			goto out;
610 		}
611 		cgu->clocks.clks[idx] = clk;
612 		return 0;
613 	}
614 
615 	if (!clk_info->type) {
616 		pr_err("%s: no clock type specified for '%s'\n", __func__,
617 		       clk_info->name);
618 		goto out;
619 	}
620 
621 	ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
622 	if (!ingenic_clk) {
623 		err = -ENOMEM;
624 		goto out;
625 	}
626 
627 	ingenic_clk->hw.init = &clk_init;
628 	ingenic_clk->cgu = cgu;
629 	ingenic_clk->idx = idx;
630 
631 	clk_init.name = clk_info->name;
632 	clk_init.flags = 0;
633 	clk_init.parent_names = parent_names;
634 
635 	caps = clk_info->type;
636 
637 	if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
638 		clk_init.num_parents = 0;
639 
640 		if (caps & CGU_CLK_MUX)
641 			num_possible = 1 << clk_info->mux.bits;
642 		else
643 			num_possible = ARRAY_SIZE(clk_info->parents);
644 
645 		for (i = 0; i < num_possible; i++) {
646 			if (clk_info->parents[i] == -1)
647 				continue;
648 
649 			parent = cgu->clocks.clks[clk_info->parents[i]];
650 			parent_names[clk_init.num_parents] =
651 				__clk_get_name(parent);
652 			clk_init.num_parents++;
653 		}
654 
655 		BUG_ON(!clk_init.num_parents);
656 		BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
657 	} else {
658 		BUG_ON(clk_info->parents[0] == -1);
659 		clk_init.num_parents = 1;
660 		parent = cgu->clocks.clks[clk_info->parents[0]];
661 		parent_names[0] = __clk_get_name(parent);
662 	}
663 
664 	if (caps & CGU_CLK_CUSTOM) {
665 		clk_init.ops = clk_info->custom.clk_ops;
666 
667 		caps &= ~CGU_CLK_CUSTOM;
668 
669 		if (caps) {
670 			pr_err("%s: custom clock may not be combined with type 0x%x\n",
671 			       __func__, caps);
672 			goto out;
673 		}
674 	} else if (caps & CGU_CLK_PLL) {
675 		clk_init.ops = &ingenic_pll_ops;
676 		clk_init.flags |= CLK_SET_RATE_GATE;
677 
678 		caps &= ~CGU_CLK_PLL;
679 
680 		if (caps) {
681 			pr_err("%s: PLL may not be combined with type 0x%x\n",
682 			       __func__, caps);
683 			goto out;
684 		}
685 	} else {
686 		clk_init.ops = &ingenic_clk_ops;
687 	}
688 
689 	/* nothing to do for gates or fixed dividers */
690 	caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
691 
692 	if (caps & CGU_CLK_MUX) {
693 		if (!(caps & CGU_CLK_MUX_GLITCHFREE))
694 			clk_init.flags |= CLK_SET_PARENT_GATE;
695 
696 		caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
697 	}
698 
699 	if (caps & CGU_CLK_DIV) {
700 		caps &= ~CGU_CLK_DIV;
701 	} else {
702 		/* pass rate changes to the parent clock */
703 		clk_init.flags |= CLK_SET_RATE_PARENT;
704 	}
705 
706 	if (caps) {
707 		pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
708 		goto out;
709 	}
710 
711 	clk = clk_register(NULL, &ingenic_clk->hw);
712 	if (IS_ERR(clk)) {
713 		pr_err("%s: failed to register clock '%s'\n", __func__,
714 		       clk_info->name);
715 		err = PTR_ERR(clk);
716 		goto out;
717 	}
718 
719 	err = clk_register_clkdev(clk, clk_info->name, NULL);
720 	if (err)
721 		goto out;
722 
723 	cgu->clocks.clks[idx] = clk;
724 out:
725 	if (err)
726 		kfree(ingenic_clk);
727 	return err;
728 }
729 
730 struct ingenic_cgu *
731 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
732 		unsigned num_clocks, struct device_node *np)
733 {
734 	struct ingenic_cgu *cgu;
735 
736 	cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
737 	if (!cgu)
738 		goto err_out;
739 
740 	cgu->base = of_iomap(np, 0);
741 	if (!cgu->base) {
742 		pr_err("%s: failed to map CGU registers\n", __func__);
743 		goto err_out_free;
744 	}
745 
746 	cgu->np = np;
747 	cgu->clock_info = clock_info;
748 	cgu->clocks.clk_num = num_clocks;
749 
750 	spin_lock_init(&cgu->lock);
751 
752 	return cgu;
753 
754 err_out_free:
755 	kfree(cgu);
756 err_out:
757 	return NULL;
758 }
759 
760 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
761 {
762 	unsigned i;
763 	int err;
764 
765 	cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
766 				   GFP_KERNEL);
767 	if (!cgu->clocks.clks) {
768 		err = -ENOMEM;
769 		goto err_out;
770 	}
771 
772 	for (i = 0; i < cgu->clocks.clk_num; i++) {
773 		err = ingenic_register_clock(cgu, i);
774 		if (err)
775 			goto err_out_unregister;
776 	}
777 
778 	err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
779 				  &cgu->clocks);
780 	if (err)
781 		goto err_out_unregister;
782 
783 	return 0;
784 
785 err_out_unregister:
786 	for (i = 0; i < cgu->clocks.clk_num; i++) {
787 		if (!cgu->clocks.clks[i])
788 			continue;
789 		if (cgu->clock_info[i].type & CGU_CLK_EXT)
790 			clk_put(cgu->clocks.clks[i]);
791 		else
792 			clk_unregister(cgu->clocks.clks[i]);
793 	}
794 	kfree(cgu->clocks.clks);
795 err_out:
796 	return err;
797 }
798