xref: /openbmc/linux/drivers/clk/ingenic/cgu.c (revision e2c75e76)
1 /*
2  * Ingenic SoC CGU driver
3  *
4  * Copyright (c) 2013-2015 Imagination Technologies
5  * Author: Paul Burton <paul.burton@mips.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/bitops.h>
19 #include <linux/clk.h>
20 #include <linux/clk-provider.h>
21 #include <linux/clkdev.h>
22 #include <linux/delay.h>
23 #include <linux/math64.h>
24 #include <linux/of.h>
25 #include <linux/of_address.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include "cgu.h"
29 
30 #define MHZ (1000 * 1000)
31 
32 /**
33  * ingenic_cgu_gate_get() - get the value of clock gate register bit
34  * @cgu: reference to the CGU whose registers should be read
35  * @info: info struct describing the gate bit
36  *
37  * Retrieves the state of the clock gate bit described by info. The
38  * caller must hold cgu->lock.
39  *
40  * Return: true if the gate bit is set, else false.
41  */
42 static inline bool
43 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
44 		     const struct ingenic_cgu_gate_info *info)
45 {
46 	return readl(cgu->base + info->reg) & BIT(info->bit);
47 }
48 
49 /**
50  * ingenic_cgu_gate_set() - set the value of clock gate register bit
51  * @cgu: reference to the CGU whose registers should be modified
52  * @info: info struct describing the gate bit
53  * @val: non-zero to gate a clock, otherwise zero
54  *
55  * Sets the given gate bit in order to gate or ungate a clock.
56  *
57  * The caller must hold cgu->lock.
58  */
59 static inline void
60 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
61 		     const struct ingenic_cgu_gate_info *info, bool val)
62 {
63 	u32 clkgr = readl(cgu->base + info->reg);
64 
65 	if (val)
66 		clkgr |= BIT(info->bit);
67 	else
68 		clkgr &= ~BIT(info->bit);
69 
70 	writel(clkgr, cgu->base + info->reg);
71 }
72 
73 /*
74  * PLL operations
75  */
76 
77 static unsigned long
78 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
79 {
80 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
81 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
82 	const struct ingenic_cgu_clk_info *clk_info;
83 	const struct ingenic_cgu_pll_info *pll_info;
84 	unsigned m, n, od_enc, od;
85 	bool bypass, enable;
86 	unsigned long flags;
87 	u32 ctl;
88 
89 	clk_info = &cgu->clock_info[ingenic_clk->idx];
90 	BUG_ON(clk_info->type != CGU_CLK_PLL);
91 	pll_info = &clk_info->pll;
92 
93 	spin_lock_irqsave(&cgu->lock, flags);
94 	ctl = readl(cgu->base + pll_info->reg);
95 	spin_unlock_irqrestore(&cgu->lock, flags);
96 
97 	m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
98 	m += pll_info->m_offset;
99 	n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
100 	n += pll_info->n_offset;
101 	od_enc = ctl >> pll_info->od_shift;
102 	od_enc &= GENMASK(pll_info->od_bits - 1, 0);
103 	bypass = !pll_info->no_bypass_bit &&
104 		 !!(ctl & BIT(pll_info->bypass_bit));
105 	enable = !!(ctl & BIT(pll_info->enable_bit));
106 
107 	if (bypass)
108 		return parent_rate;
109 
110 	for (od = 0; od < pll_info->od_max; od++) {
111 		if (pll_info->od_encoding[od] == od_enc)
112 			break;
113 	}
114 	BUG_ON(od == pll_info->od_max);
115 	od++;
116 
117 	return div_u64((u64)parent_rate * m, n * od);
118 }
119 
120 static unsigned long
121 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
122 		 unsigned long rate, unsigned long parent_rate,
123 		 unsigned *pm, unsigned *pn, unsigned *pod)
124 {
125 	const struct ingenic_cgu_pll_info *pll_info;
126 	unsigned m, n, od;
127 
128 	pll_info = &clk_info->pll;
129 	od = 1;
130 
131 	/*
132 	 * The frequency after the input divider must be between 10 and 50 MHz.
133 	 * The highest divider yields the best resolution.
134 	 */
135 	n = parent_rate / (10 * MHZ);
136 	n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
137 	n = max_t(unsigned, n, pll_info->n_offset);
138 
139 	m = (rate / MHZ) * od * n / (parent_rate / MHZ);
140 	m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
141 	m = max_t(unsigned, m, pll_info->m_offset);
142 
143 	if (pm)
144 		*pm = m;
145 	if (pn)
146 		*pn = n;
147 	if (pod)
148 		*pod = od;
149 
150 	return div_u64((u64)parent_rate * m, n * od);
151 }
152 
153 static inline const struct ingenic_cgu_clk_info *to_clk_info(
154 		struct ingenic_clk *ingenic_clk)
155 {
156 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
157 	const struct ingenic_cgu_clk_info *clk_info;
158 
159 	clk_info = &cgu->clock_info[ingenic_clk->idx];
160 	BUG_ON(clk_info->type != CGU_CLK_PLL);
161 
162 	return clk_info;
163 }
164 
165 static long
166 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
167 		       unsigned long *prate)
168 {
169 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
170 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
171 
172 	return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
173 }
174 
175 static int
176 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
177 		     unsigned long parent_rate)
178 {
179 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
180 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
181 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
182 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
183 	unsigned long rate, flags;
184 	unsigned int m, n, od;
185 	u32 ctl;
186 
187 	rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
188 			       &m, &n, &od);
189 	if (rate != req_rate)
190 		pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
191 			clk_info->name, req_rate, rate);
192 
193 	spin_lock_irqsave(&cgu->lock, flags);
194 	ctl = readl(cgu->base + pll_info->reg);
195 
196 	ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
197 	ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
198 
199 	ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
200 	ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
201 
202 	ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
203 	ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
204 
205 	writel(ctl, cgu->base + pll_info->reg);
206 	spin_unlock_irqrestore(&cgu->lock, flags);
207 
208 	return 0;
209 }
210 
211 static int ingenic_pll_enable(struct clk_hw *hw)
212 {
213 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
214 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
215 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
216 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
217 	const unsigned int timeout = 100;
218 	unsigned long flags;
219 	unsigned int i;
220 	u32 ctl;
221 
222 	spin_lock_irqsave(&cgu->lock, flags);
223 	ctl = readl(cgu->base + pll_info->reg);
224 
225 	ctl &= ~BIT(pll_info->bypass_bit);
226 	ctl |= BIT(pll_info->enable_bit);
227 
228 	writel(ctl, cgu->base + pll_info->reg);
229 
230 	/* wait for the PLL to stabilise */
231 	for (i = 0; i < timeout; i++) {
232 		ctl = readl(cgu->base + pll_info->reg);
233 		if (ctl & BIT(pll_info->stable_bit))
234 			break;
235 		mdelay(1);
236 	}
237 
238 	spin_unlock_irqrestore(&cgu->lock, flags);
239 
240 	if (i == timeout)
241 		return -EBUSY;
242 
243 	return 0;
244 }
245 
246 static void ingenic_pll_disable(struct clk_hw *hw)
247 {
248 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
249 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
250 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
251 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
252 	unsigned long flags;
253 	u32 ctl;
254 
255 	spin_lock_irqsave(&cgu->lock, flags);
256 	ctl = readl(cgu->base + pll_info->reg);
257 
258 	ctl &= ~BIT(pll_info->enable_bit);
259 
260 	writel(ctl, cgu->base + pll_info->reg);
261 	spin_unlock_irqrestore(&cgu->lock, flags);
262 }
263 
264 static int ingenic_pll_is_enabled(struct clk_hw *hw)
265 {
266 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
267 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
268 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
269 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
270 	unsigned long flags;
271 	u32 ctl;
272 
273 	spin_lock_irqsave(&cgu->lock, flags);
274 	ctl = readl(cgu->base + pll_info->reg);
275 	spin_unlock_irqrestore(&cgu->lock, flags);
276 
277 	return !!(ctl & BIT(pll_info->enable_bit));
278 }
279 
280 static const struct clk_ops ingenic_pll_ops = {
281 	.recalc_rate = ingenic_pll_recalc_rate,
282 	.round_rate = ingenic_pll_round_rate,
283 	.set_rate = ingenic_pll_set_rate,
284 
285 	.enable = ingenic_pll_enable,
286 	.disable = ingenic_pll_disable,
287 	.is_enabled = ingenic_pll_is_enabled,
288 };
289 
290 /*
291  * Operations for all non-PLL clocks
292  */
293 
294 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
295 {
296 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
297 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
298 	const struct ingenic_cgu_clk_info *clk_info;
299 	u32 reg;
300 	u8 i, hw_idx, idx = 0;
301 
302 	clk_info = &cgu->clock_info[ingenic_clk->idx];
303 
304 	if (clk_info->type & CGU_CLK_MUX) {
305 		reg = readl(cgu->base + clk_info->mux.reg);
306 		hw_idx = (reg >> clk_info->mux.shift) &
307 			 GENMASK(clk_info->mux.bits - 1, 0);
308 
309 		/*
310 		 * Convert the hardware index to the parent index by skipping
311 		 * over any -1's in the parents array.
312 		 */
313 		for (i = 0; i < hw_idx; i++) {
314 			if (clk_info->parents[i] != -1)
315 				idx++;
316 		}
317 	}
318 
319 	return idx;
320 }
321 
322 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
323 {
324 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
325 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
326 	const struct ingenic_cgu_clk_info *clk_info;
327 	unsigned long flags;
328 	u8 curr_idx, hw_idx, num_poss;
329 	u32 reg, mask;
330 
331 	clk_info = &cgu->clock_info[ingenic_clk->idx];
332 
333 	if (clk_info->type & CGU_CLK_MUX) {
334 		/*
335 		 * Convert the parent index to the hardware index by adding
336 		 * 1 for any -1 in the parents array preceding the given
337 		 * index. That is, we want the index of idx'th entry in
338 		 * clk_info->parents which does not equal -1.
339 		 */
340 		hw_idx = curr_idx = 0;
341 		num_poss = 1 << clk_info->mux.bits;
342 		for (; hw_idx < num_poss; hw_idx++) {
343 			if (clk_info->parents[hw_idx] == -1)
344 				continue;
345 			if (curr_idx == idx)
346 				break;
347 			curr_idx++;
348 		}
349 
350 		/* idx should always be a valid parent */
351 		BUG_ON(curr_idx != idx);
352 
353 		mask = GENMASK(clk_info->mux.bits - 1, 0);
354 		mask <<= clk_info->mux.shift;
355 
356 		spin_lock_irqsave(&cgu->lock, flags);
357 
358 		/* write the register */
359 		reg = readl(cgu->base + clk_info->mux.reg);
360 		reg &= ~mask;
361 		reg |= hw_idx << clk_info->mux.shift;
362 		writel(reg, cgu->base + clk_info->mux.reg);
363 
364 		spin_unlock_irqrestore(&cgu->lock, flags);
365 		return 0;
366 	}
367 
368 	return idx ? -EINVAL : 0;
369 }
370 
371 static unsigned long
372 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
373 {
374 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
375 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
376 	const struct ingenic_cgu_clk_info *clk_info;
377 	unsigned long rate = parent_rate;
378 	u32 div_reg, div;
379 
380 	clk_info = &cgu->clock_info[ingenic_clk->idx];
381 
382 	if (clk_info->type & CGU_CLK_DIV) {
383 		div_reg = readl(cgu->base + clk_info->div.reg);
384 		div = (div_reg >> clk_info->div.shift) &
385 		      GENMASK(clk_info->div.bits - 1, 0);
386 		div += 1;
387 		div *= clk_info->div.div;
388 
389 		rate /= div;
390 	} else if (clk_info->type & CGU_CLK_FIXDIV) {
391 		rate /= clk_info->fixdiv.div;
392 	}
393 
394 	return rate;
395 }
396 
397 static unsigned
398 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
399 		     unsigned long parent_rate, unsigned long req_rate)
400 {
401 	unsigned div;
402 
403 	/* calculate the divide */
404 	div = DIV_ROUND_UP(parent_rate, req_rate);
405 
406 	/* and impose hardware constraints */
407 	div = min_t(unsigned, div, 1 << clk_info->div.bits);
408 	div = max_t(unsigned, div, 1);
409 
410 	/*
411 	 * If the divider value itself must be divided before being written to
412 	 * the divider register, we must ensure we don't have any bits set that
413 	 * would be lost as a result of doing so.
414 	 */
415 	div /= clk_info->div.div;
416 	div *= clk_info->div.div;
417 
418 	return div;
419 }
420 
421 static long
422 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
423 		       unsigned long *parent_rate)
424 {
425 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
426 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
427 	const struct ingenic_cgu_clk_info *clk_info;
428 	long rate = *parent_rate;
429 
430 	clk_info = &cgu->clock_info[ingenic_clk->idx];
431 
432 	if (clk_info->type & CGU_CLK_DIV)
433 		rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
434 	else if (clk_info->type & CGU_CLK_FIXDIV)
435 		rate /= clk_info->fixdiv.div;
436 
437 	return rate;
438 }
439 
440 static int
441 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
442 		     unsigned long parent_rate)
443 {
444 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
445 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
446 	const struct ingenic_cgu_clk_info *clk_info;
447 	const unsigned timeout = 100;
448 	unsigned long rate, flags;
449 	unsigned div, i;
450 	u32 reg, mask;
451 	int ret = 0;
452 
453 	clk_info = &cgu->clock_info[ingenic_clk->idx];
454 
455 	if (clk_info->type & CGU_CLK_DIV) {
456 		div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
457 		rate = parent_rate / div;
458 
459 		if (rate != req_rate)
460 			return -EINVAL;
461 
462 		spin_lock_irqsave(&cgu->lock, flags);
463 		reg = readl(cgu->base + clk_info->div.reg);
464 
465 		/* update the divide */
466 		mask = GENMASK(clk_info->div.bits - 1, 0);
467 		reg &= ~(mask << clk_info->div.shift);
468 		reg |= ((div / clk_info->div.div) - 1) << clk_info->div.shift;
469 
470 		/* clear the stop bit */
471 		if (clk_info->div.stop_bit != -1)
472 			reg &= ~BIT(clk_info->div.stop_bit);
473 
474 		/* set the change enable bit */
475 		if (clk_info->div.ce_bit != -1)
476 			reg |= BIT(clk_info->div.ce_bit);
477 
478 		/* update the hardware */
479 		writel(reg, cgu->base + clk_info->div.reg);
480 
481 		/* wait for the change to take effect */
482 		if (clk_info->div.busy_bit != -1) {
483 			for (i = 0; i < timeout; i++) {
484 				reg = readl(cgu->base + clk_info->div.reg);
485 				if (!(reg & BIT(clk_info->div.busy_bit)))
486 					break;
487 				mdelay(1);
488 			}
489 			if (i == timeout)
490 				ret = -EBUSY;
491 		}
492 
493 		spin_unlock_irqrestore(&cgu->lock, flags);
494 		return ret;
495 	}
496 
497 	return -EINVAL;
498 }
499 
500 static int ingenic_clk_enable(struct clk_hw *hw)
501 {
502 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
503 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
504 	const struct ingenic_cgu_clk_info *clk_info;
505 	unsigned long flags;
506 
507 	clk_info = &cgu->clock_info[ingenic_clk->idx];
508 
509 	if (clk_info->type & CGU_CLK_GATE) {
510 		/* ungate the clock */
511 		spin_lock_irqsave(&cgu->lock, flags);
512 		ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
513 		spin_unlock_irqrestore(&cgu->lock, flags);
514 	}
515 
516 	return 0;
517 }
518 
519 static void ingenic_clk_disable(struct clk_hw *hw)
520 {
521 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
522 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
523 	const struct ingenic_cgu_clk_info *clk_info;
524 	unsigned long flags;
525 
526 	clk_info = &cgu->clock_info[ingenic_clk->idx];
527 
528 	if (clk_info->type & CGU_CLK_GATE) {
529 		/* gate the clock */
530 		spin_lock_irqsave(&cgu->lock, flags);
531 		ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
532 		spin_unlock_irqrestore(&cgu->lock, flags);
533 	}
534 }
535 
536 static int ingenic_clk_is_enabled(struct clk_hw *hw)
537 {
538 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
539 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
540 	const struct ingenic_cgu_clk_info *clk_info;
541 	unsigned long flags;
542 	int enabled = 1;
543 
544 	clk_info = &cgu->clock_info[ingenic_clk->idx];
545 
546 	if (clk_info->type & CGU_CLK_GATE) {
547 		spin_lock_irqsave(&cgu->lock, flags);
548 		enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
549 		spin_unlock_irqrestore(&cgu->lock, flags);
550 	}
551 
552 	return enabled;
553 }
554 
555 static const struct clk_ops ingenic_clk_ops = {
556 	.get_parent = ingenic_clk_get_parent,
557 	.set_parent = ingenic_clk_set_parent,
558 
559 	.recalc_rate = ingenic_clk_recalc_rate,
560 	.round_rate = ingenic_clk_round_rate,
561 	.set_rate = ingenic_clk_set_rate,
562 
563 	.enable = ingenic_clk_enable,
564 	.disable = ingenic_clk_disable,
565 	.is_enabled = ingenic_clk_is_enabled,
566 };
567 
568 /*
569  * Setup functions.
570  */
571 
572 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
573 {
574 	const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
575 	struct clk_init_data clk_init;
576 	struct ingenic_clk *ingenic_clk = NULL;
577 	struct clk *clk, *parent;
578 	const char *parent_names[4];
579 	unsigned caps, i, num_possible;
580 	int err = -EINVAL;
581 
582 	BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
583 
584 	if (clk_info->type == CGU_CLK_EXT) {
585 		clk = of_clk_get_by_name(cgu->np, clk_info->name);
586 		if (IS_ERR(clk)) {
587 			pr_err("%s: no external clock '%s' provided\n",
588 			       __func__, clk_info->name);
589 			err = -ENODEV;
590 			goto out;
591 		}
592 		err = clk_register_clkdev(clk, clk_info->name, NULL);
593 		if (err) {
594 			clk_put(clk);
595 			goto out;
596 		}
597 		cgu->clocks.clks[idx] = clk;
598 		return 0;
599 	}
600 
601 	if (!clk_info->type) {
602 		pr_err("%s: no clock type specified for '%s'\n", __func__,
603 		       clk_info->name);
604 		goto out;
605 	}
606 
607 	ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
608 	if (!ingenic_clk) {
609 		err = -ENOMEM;
610 		goto out;
611 	}
612 
613 	ingenic_clk->hw.init = &clk_init;
614 	ingenic_clk->cgu = cgu;
615 	ingenic_clk->idx = idx;
616 
617 	clk_init.name = clk_info->name;
618 	clk_init.flags = 0;
619 	clk_init.parent_names = parent_names;
620 
621 	caps = clk_info->type;
622 
623 	if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
624 		clk_init.num_parents = 0;
625 
626 		if (caps & CGU_CLK_MUX)
627 			num_possible = 1 << clk_info->mux.bits;
628 		else
629 			num_possible = ARRAY_SIZE(clk_info->parents);
630 
631 		for (i = 0; i < num_possible; i++) {
632 			if (clk_info->parents[i] == -1)
633 				continue;
634 
635 			parent = cgu->clocks.clks[clk_info->parents[i]];
636 			parent_names[clk_init.num_parents] =
637 				__clk_get_name(parent);
638 			clk_init.num_parents++;
639 		}
640 
641 		BUG_ON(!clk_init.num_parents);
642 		BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
643 	} else {
644 		BUG_ON(clk_info->parents[0] == -1);
645 		clk_init.num_parents = 1;
646 		parent = cgu->clocks.clks[clk_info->parents[0]];
647 		parent_names[0] = __clk_get_name(parent);
648 	}
649 
650 	if (caps & CGU_CLK_CUSTOM) {
651 		clk_init.ops = clk_info->custom.clk_ops;
652 
653 		caps &= ~CGU_CLK_CUSTOM;
654 
655 		if (caps) {
656 			pr_err("%s: custom clock may not be combined with type 0x%x\n",
657 			       __func__, caps);
658 			goto out;
659 		}
660 	} else if (caps & CGU_CLK_PLL) {
661 		clk_init.ops = &ingenic_pll_ops;
662 		clk_init.flags |= CLK_SET_RATE_GATE;
663 
664 		caps &= ~CGU_CLK_PLL;
665 
666 		if (caps) {
667 			pr_err("%s: PLL may not be combined with type 0x%x\n",
668 			       __func__, caps);
669 			goto out;
670 		}
671 	} else {
672 		clk_init.ops = &ingenic_clk_ops;
673 	}
674 
675 	/* nothing to do for gates or fixed dividers */
676 	caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
677 
678 	if (caps & CGU_CLK_MUX) {
679 		if (!(caps & CGU_CLK_MUX_GLITCHFREE))
680 			clk_init.flags |= CLK_SET_PARENT_GATE;
681 
682 		caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
683 	}
684 
685 	if (caps & CGU_CLK_DIV) {
686 		caps &= ~CGU_CLK_DIV;
687 	} else {
688 		/* pass rate changes to the parent clock */
689 		clk_init.flags |= CLK_SET_RATE_PARENT;
690 	}
691 
692 	if (caps) {
693 		pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
694 		goto out;
695 	}
696 
697 	clk = clk_register(NULL, &ingenic_clk->hw);
698 	if (IS_ERR(clk)) {
699 		pr_err("%s: failed to register clock '%s'\n", __func__,
700 		       clk_info->name);
701 		err = PTR_ERR(clk);
702 		goto out;
703 	}
704 
705 	err = clk_register_clkdev(clk, clk_info->name, NULL);
706 	if (err)
707 		goto out;
708 
709 	cgu->clocks.clks[idx] = clk;
710 out:
711 	if (err)
712 		kfree(ingenic_clk);
713 	return err;
714 }
715 
716 struct ingenic_cgu *
717 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
718 		unsigned num_clocks, struct device_node *np)
719 {
720 	struct ingenic_cgu *cgu;
721 
722 	cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
723 	if (!cgu)
724 		goto err_out;
725 
726 	cgu->base = of_iomap(np, 0);
727 	if (!cgu->base) {
728 		pr_err("%s: failed to map CGU registers\n", __func__);
729 		goto err_out_free;
730 	}
731 
732 	cgu->np = np;
733 	cgu->clock_info = clock_info;
734 	cgu->clocks.clk_num = num_clocks;
735 
736 	spin_lock_init(&cgu->lock);
737 
738 	return cgu;
739 
740 err_out_free:
741 	kfree(cgu);
742 err_out:
743 	return NULL;
744 }
745 
746 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
747 {
748 	unsigned i;
749 	int err;
750 
751 	cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
752 				   GFP_KERNEL);
753 	if (!cgu->clocks.clks) {
754 		err = -ENOMEM;
755 		goto err_out;
756 	}
757 
758 	for (i = 0; i < cgu->clocks.clk_num; i++) {
759 		err = ingenic_register_clock(cgu, i);
760 		if (err)
761 			goto err_out_unregister;
762 	}
763 
764 	err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
765 				  &cgu->clocks);
766 	if (err)
767 		goto err_out_unregister;
768 
769 	return 0;
770 
771 err_out_unregister:
772 	for (i = 0; i < cgu->clocks.clk_num; i++) {
773 		if (!cgu->clocks.clks[i])
774 			continue;
775 		if (cgu->clock_info[i].type & CGU_CLK_EXT)
776 			clk_put(cgu->clocks.clks[i]);
777 		else
778 			clk_unregister(cgu->clocks.clks[i]);
779 	}
780 	kfree(cgu->clocks.clks);
781 err_out:
782 	return err;
783 }
784