xref: /openbmc/linux/drivers/clk/ingenic/cgu.c (revision 315a8423)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Ingenic SoC CGU driver
4  *
5  * Copyright (c) 2013-2015 Imagination Technologies
6  * Author: Paul Burton <paul.burton@mips.com>
7  */
8 
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
14 #include <linux/io.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
22 
23 #include "cgu.h"
24 
25 #define MHZ (1000 * 1000)
26 
27 static inline const struct ingenic_cgu_clk_info *
28 to_clk_info(struct ingenic_clk *clk)
29 {
30 	return &clk->cgu->clock_info[clk->idx];
31 }
32 
33 /**
34  * ingenic_cgu_gate_get() - get the value of clock gate register bit
35  * @cgu: reference to the CGU whose registers should be read
36  * @info: info struct describing the gate bit
37  *
38  * Retrieves the state of the clock gate bit described by info. The
39  * caller must hold cgu->lock.
40  *
41  * Return: true if the gate bit is set, else false.
42  */
43 static inline bool
44 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
45 		     const struct ingenic_cgu_gate_info *info)
46 {
47 	return !!(readl(cgu->base + info->reg) & BIT(info->bit))
48 		^ info->clear_to_gate;
49 }
50 
51 /**
52  * ingenic_cgu_gate_set() - set the value of clock gate register bit
53  * @cgu: reference to the CGU whose registers should be modified
54  * @info: info struct describing the gate bit
55  * @val: non-zero to gate a clock, otherwise zero
56  *
57  * Sets the given gate bit in order to gate or ungate a clock.
58  *
59  * The caller must hold cgu->lock.
60  */
61 static inline void
62 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
63 		     const struct ingenic_cgu_gate_info *info, bool val)
64 {
65 	u32 clkgr = readl(cgu->base + info->reg);
66 
67 	if (val ^ info->clear_to_gate)
68 		clkgr |= BIT(info->bit);
69 	else
70 		clkgr &= ~BIT(info->bit);
71 
72 	writel(clkgr, cgu->base + info->reg);
73 }
74 
75 /*
76  * PLL operations
77  */
78 
79 static unsigned long
80 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
81 {
82 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
83 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
84 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
85 	const struct ingenic_cgu_pll_info *pll_info;
86 	unsigned m, n, od_enc, od;
87 	bool bypass;
88 	u32 ctl;
89 
90 	BUG_ON(clk_info->type != CGU_CLK_PLL);
91 	pll_info = &clk_info->pll;
92 
93 	ctl = readl(cgu->base + pll_info->reg);
94 
95 	m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
96 	m += pll_info->m_offset;
97 	n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
98 	n += pll_info->n_offset;
99 	od_enc = ctl >> pll_info->od_shift;
100 	od_enc &= GENMASK(pll_info->od_bits - 1, 0);
101 
102 	if (!pll_info->no_bypass_bit) {
103 		ctl = readl(cgu->base + pll_info->bypass_reg);
104 
105 		bypass = !!(ctl & BIT(pll_info->bypass_bit));
106 
107 		if (bypass)
108 			return parent_rate;
109 	}
110 
111 	for (od = 0; od < pll_info->od_max; od++) {
112 		if (pll_info->od_encoding[od] == od_enc)
113 			break;
114 	}
115 	BUG_ON(od == pll_info->od_max);
116 	od++;
117 
118 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
119 		n * od);
120 }
121 
122 static unsigned long
123 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
124 		 unsigned long rate, unsigned long parent_rate,
125 		 unsigned *pm, unsigned *pn, unsigned *pod)
126 {
127 	const struct ingenic_cgu_pll_info *pll_info;
128 	unsigned m, n, od;
129 
130 	pll_info = &clk_info->pll;
131 	od = 1;
132 
133 	/*
134 	 * The frequency after the input divider must be between 10 and 50 MHz.
135 	 * The highest divider yields the best resolution.
136 	 */
137 	n = parent_rate / (10 * MHZ);
138 	n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
139 	n = max_t(unsigned, n, pll_info->n_offset);
140 
141 	m = (rate / MHZ) * od * n / (parent_rate / MHZ);
142 	m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
143 	m = max_t(unsigned, m, pll_info->m_offset);
144 
145 	if (pm)
146 		*pm = m;
147 	if (pn)
148 		*pn = n;
149 	if (pod)
150 		*pod = od;
151 
152 	return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
153 		n * od);
154 }
155 
156 static long
157 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
158 		       unsigned long *prate)
159 {
160 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
161 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
162 
163 	return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
164 }
165 
166 static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
167 					   const struct ingenic_cgu_pll_info *pll_info)
168 {
169 	u32 ctl;
170 
171 	return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
172 				  ctl & BIT(pll_info->stable_bit),
173 				  0, 100 * USEC_PER_MSEC);
174 }
175 
176 static int
177 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
178 		     unsigned long parent_rate)
179 {
180 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
181 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
182 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
183 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
184 	unsigned long rate, flags;
185 	unsigned int m, n, od;
186 	int ret = 0;
187 	u32 ctl;
188 
189 	rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
190 			       &m, &n, &od);
191 	if (rate != req_rate)
192 		pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
193 			clk_info->name, req_rate, rate);
194 
195 	spin_lock_irqsave(&cgu->lock, flags);
196 	ctl = readl(cgu->base + pll_info->reg);
197 
198 	ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
199 	ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
200 
201 	ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
202 	ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
203 
204 	ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
205 	ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
206 
207 	writel(ctl, cgu->base + pll_info->reg);
208 
209 	/* If the PLL is enabled, verify that it's stable */
210 	if (ctl & BIT(pll_info->enable_bit))
211 		ret = ingenic_pll_check_stable(cgu, pll_info);
212 
213 	spin_unlock_irqrestore(&cgu->lock, flags);
214 
215 	return ret;
216 }
217 
218 static int ingenic_pll_enable(struct clk_hw *hw)
219 {
220 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
221 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
222 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
223 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
224 	unsigned long flags;
225 	int ret;
226 	u32 ctl;
227 
228 	spin_lock_irqsave(&cgu->lock, flags);
229 	if (!pll_info->no_bypass_bit) {
230 		ctl = readl(cgu->base + pll_info->bypass_reg);
231 
232 		ctl &= ~BIT(pll_info->bypass_bit);
233 
234 		writel(ctl, cgu->base + pll_info->bypass_reg);
235 	}
236 
237 	ctl = readl(cgu->base + pll_info->reg);
238 
239 	ctl |= BIT(pll_info->enable_bit);
240 
241 	writel(ctl, cgu->base + pll_info->reg);
242 
243 	ret = ingenic_pll_check_stable(cgu, pll_info);
244 	spin_unlock_irqrestore(&cgu->lock, flags);
245 
246 	return ret;
247 }
248 
249 static void ingenic_pll_disable(struct clk_hw *hw)
250 {
251 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
252 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
253 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
254 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
255 	unsigned long flags;
256 	u32 ctl;
257 
258 	spin_lock_irqsave(&cgu->lock, flags);
259 	ctl = readl(cgu->base + pll_info->reg);
260 
261 	ctl &= ~BIT(pll_info->enable_bit);
262 
263 	writel(ctl, cgu->base + pll_info->reg);
264 	spin_unlock_irqrestore(&cgu->lock, flags);
265 }
266 
267 static int ingenic_pll_is_enabled(struct clk_hw *hw)
268 {
269 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
270 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
271 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
272 	const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
273 	u32 ctl;
274 
275 	ctl = readl(cgu->base + pll_info->reg);
276 
277 	return !!(ctl & BIT(pll_info->enable_bit));
278 }
279 
280 static const struct clk_ops ingenic_pll_ops = {
281 	.recalc_rate = ingenic_pll_recalc_rate,
282 	.round_rate = ingenic_pll_round_rate,
283 	.set_rate = ingenic_pll_set_rate,
284 
285 	.enable = ingenic_pll_enable,
286 	.disable = ingenic_pll_disable,
287 	.is_enabled = ingenic_pll_is_enabled,
288 };
289 
290 /*
291  * Operations for all non-PLL clocks
292  */
293 
294 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
295 {
296 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
297 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
298 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
299 	u32 reg;
300 	u8 i, hw_idx, idx = 0;
301 
302 	if (clk_info->type & CGU_CLK_MUX) {
303 		reg = readl(cgu->base + clk_info->mux.reg);
304 		hw_idx = (reg >> clk_info->mux.shift) &
305 			 GENMASK(clk_info->mux.bits - 1, 0);
306 
307 		/*
308 		 * Convert the hardware index to the parent index by skipping
309 		 * over any -1's in the parents array.
310 		 */
311 		for (i = 0; i < hw_idx; i++) {
312 			if (clk_info->parents[i] != -1)
313 				idx++;
314 		}
315 	}
316 
317 	return idx;
318 }
319 
320 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
321 {
322 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
323 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
324 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
325 	unsigned long flags;
326 	u8 curr_idx, hw_idx, num_poss;
327 	u32 reg, mask;
328 
329 	if (clk_info->type & CGU_CLK_MUX) {
330 		/*
331 		 * Convert the parent index to the hardware index by adding
332 		 * 1 for any -1 in the parents array preceding the given
333 		 * index. That is, we want the index of idx'th entry in
334 		 * clk_info->parents which does not equal -1.
335 		 */
336 		hw_idx = curr_idx = 0;
337 		num_poss = 1 << clk_info->mux.bits;
338 		for (; hw_idx < num_poss; hw_idx++) {
339 			if (clk_info->parents[hw_idx] == -1)
340 				continue;
341 			if (curr_idx == idx)
342 				break;
343 			curr_idx++;
344 		}
345 
346 		/* idx should always be a valid parent */
347 		BUG_ON(curr_idx != idx);
348 
349 		mask = GENMASK(clk_info->mux.bits - 1, 0);
350 		mask <<= clk_info->mux.shift;
351 
352 		spin_lock_irqsave(&cgu->lock, flags);
353 
354 		/* write the register */
355 		reg = readl(cgu->base + clk_info->mux.reg);
356 		reg &= ~mask;
357 		reg |= hw_idx << clk_info->mux.shift;
358 		writel(reg, cgu->base + clk_info->mux.reg);
359 
360 		spin_unlock_irqrestore(&cgu->lock, flags);
361 		return 0;
362 	}
363 
364 	return idx ? -EINVAL : 0;
365 }
366 
367 static unsigned long
368 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
369 {
370 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
371 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
372 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
373 	unsigned long rate = parent_rate;
374 	u32 div_reg, div;
375 	u8 parent;
376 
377 	if (clk_info->type & CGU_CLK_DIV) {
378 		parent = ingenic_clk_get_parent(hw);
379 
380 		if (!(clk_info->div.bypass_mask & BIT(parent))) {
381 			div_reg = readl(cgu->base + clk_info->div.reg);
382 			div = (div_reg >> clk_info->div.shift) &
383 			      GENMASK(clk_info->div.bits - 1, 0);
384 
385 			if (clk_info->div.div_table)
386 				div = clk_info->div.div_table[div];
387 			else
388 				div = (div + 1) * clk_info->div.div;
389 
390 			rate /= div;
391 		}
392 	} else if (clk_info->type & CGU_CLK_FIXDIV) {
393 		rate /= clk_info->fixdiv.div;
394 	}
395 
396 	return rate;
397 }
398 
399 static unsigned int
400 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
401 			unsigned int div)
402 {
403 	unsigned int i, best_i = 0, best = (unsigned int)-1;
404 
405 	for (i = 0; i < (1 << clk_info->div.bits)
406 				&& clk_info->div.div_table[i]; i++) {
407 		if (clk_info->div.div_table[i] >= div &&
408 		    clk_info->div.div_table[i] < best) {
409 			best = clk_info->div.div_table[i];
410 			best_i = i;
411 
412 			if (div == best)
413 				break;
414 		}
415 	}
416 
417 	return best_i;
418 }
419 
420 static unsigned
421 ingenic_clk_calc_div(struct clk_hw *hw,
422 		     const struct ingenic_cgu_clk_info *clk_info,
423 		     unsigned long parent_rate, unsigned long req_rate)
424 {
425 	unsigned int div, hw_div;
426 	u8 parent;
427 
428 	parent = ingenic_clk_get_parent(hw);
429 	if (clk_info->div.bypass_mask & BIT(parent))
430 		return 1;
431 
432 	/* calculate the divide */
433 	div = DIV_ROUND_UP(parent_rate, req_rate);
434 
435 	if (clk_info->div.div_table) {
436 		hw_div = ingenic_clk_calc_hw_div(clk_info, div);
437 
438 		return clk_info->div.div_table[hw_div];
439 	}
440 
441 	/* Impose hardware constraints */
442 	div = min_t(unsigned, div, 1 << clk_info->div.bits);
443 	div = max_t(unsigned, div, 1);
444 
445 	/*
446 	 * If the divider value itself must be divided before being written to
447 	 * the divider register, we must ensure we don't have any bits set that
448 	 * would be lost as a result of doing so.
449 	 */
450 	div /= clk_info->div.div;
451 	div *= clk_info->div.div;
452 
453 	return div;
454 }
455 
456 static long
457 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
458 		       unsigned long *parent_rate)
459 {
460 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
461 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
462 	unsigned int div = 1;
463 
464 	if (clk_info->type & CGU_CLK_DIV)
465 		div = ingenic_clk_calc_div(hw, clk_info, *parent_rate, req_rate);
466 	else if (clk_info->type & CGU_CLK_FIXDIV)
467 		div = clk_info->fixdiv.div;
468 	else if (clk_hw_can_set_rate_parent(hw))
469 		*parent_rate = req_rate;
470 
471 	return DIV_ROUND_UP(*parent_rate, div);
472 }
473 
474 static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
475 					   const struct ingenic_cgu_clk_info *clk_info)
476 {
477 	u32 reg;
478 
479 	return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
480 				  !(reg & BIT(clk_info->div.busy_bit)),
481 				  0, 100 * USEC_PER_MSEC);
482 }
483 
484 static int
485 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
486 		     unsigned long parent_rate)
487 {
488 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
489 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
490 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
491 	unsigned long rate, flags;
492 	unsigned int hw_div, div;
493 	u32 reg, mask;
494 	int ret = 0;
495 
496 	if (clk_info->type & CGU_CLK_DIV) {
497 		div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
498 		rate = DIV_ROUND_UP(parent_rate, div);
499 
500 		if (rate != req_rate)
501 			return -EINVAL;
502 
503 		if (clk_info->div.div_table)
504 			hw_div = ingenic_clk_calc_hw_div(clk_info, div);
505 		else
506 			hw_div = ((div / clk_info->div.div) - 1);
507 
508 		spin_lock_irqsave(&cgu->lock, flags);
509 		reg = readl(cgu->base + clk_info->div.reg);
510 
511 		/* update the divide */
512 		mask = GENMASK(clk_info->div.bits - 1, 0);
513 		reg &= ~(mask << clk_info->div.shift);
514 		reg |= hw_div << clk_info->div.shift;
515 
516 		/* clear the stop bit */
517 		if (clk_info->div.stop_bit != -1)
518 			reg &= ~BIT(clk_info->div.stop_bit);
519 
520 		/* set the change enable bit */
521 		if (clk_info->div.ce_bit != -1)
522 			reg |= BIT(clk_info->div.ce_bit);
523 
524 		/* update the hardware */
525 		writel(reg, cgu->base + clk_info->div.reg);
526 
527 		/* wait for the change to take effect */
528 		if (clk_info->div.busy_bit != -1)
529 			ret = ingenic_clk_check_stable(cgu, clk_info);
530 
531 		spin_unlock_irqrestore(&cgu->lock, flags);
532 		return ret;
533 	}
534 
535 	return -EINVAL;
536 }
537 
538 static int ingenic_clk_enable(struct clk_hw *hw)
539 {
540 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
541 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
542 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
543 	unsigned long flags;
544 
545 	if (clk_info->type & CGU_CLK_GATE) {
546 		/* ungate the clock */
547 		spin_lock_irqsave(&cgu->lock, flags);
548 		ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
549 		spin_unlock_irqrestore(&cgu->lock, flags);
550 
551 		if (clk_info->gate.delay_us)
552 			udelay(clk_info->gate.delay_us);
553 	}
554 
555 	return 0;
556 }
557 
558 static void ingenic_clk_disable(struct clk_hw *hw)
559 {
560 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
561 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
562 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
563 	unsigned long flags;
564 
565 	if (clk_info->type & CGU_CLK_GATE) {
566 		/* gate the clock */
567 		spin_lock_irqsave(&cgu->lock, flags);
568 		ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
569 		spin_unlock_irqrestore(&cgu->lock, flags);
570 	}
571 }
572 
573 static int ingenic_clk_is_enabled(struct clk_hw *hw)
574 {
575 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
576 	const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
577 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
578 	int enabled = 1;
579 
580 	if (clk_info->type & CGU_CLK_GATE)
581 		enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
582 
583 	return enabled;
584 }
585 
586 static const struct clk_ops ingenic_clk_ops = {
587 	.get_parent = ingenic_clk_get_parent,
588 	.set_parent = ingenic_clk_set_parent,
589 
590 	.recalc_rate = ingenic_clk_recalc_rate,
591 	.round_rate = ingenic_clk_round_rate,
592 	.set_rate = ingenic_clk_set_rate,
593 
594 	.enable = ingenic_clk_enable,
595 	.disable = ingenic_clk_disable,
596 	.is_enabled = ingenic_clk_is_enabled,
597 };
598 
599 /*
600  * Setup functions.
601  */
602 
603 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
604 {
605 	const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
606 	struct clk_init_data clk_init;
607 	struct ingenic_clk *ingenic_clk = NULL;
608 	struct clk *clk, *parent;
609 	const char *parent_names[4];
610 	unsigned caps, i, num_possible;
611 	int err = -EINVAL;
612 
613 	BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
614 
615 	if (clk_info->type == CGU_CLK_EXT) {
616 		clk = of_clk_get_by_name(cgu->np, clk_info->name);
617 		if (IS_ERR(clk)) {
618 			pr_err("%s: no external clock '%s' provided\n",
619 			       __func__, clk_info->name);
620 			err = -ENODEV;
621 			goto out;
622 		}
623 		err = clk_register_clkdev(clk, clk_info->name, NULL);
624 		if (err) {
625 			clk_put(clk);
626 			goto out;
627 		}
628 		cgu->clocks.clks[idx] = clk;
629 		return 0;
630 	}
631 
632 	if (!clk_info->type) {
633 		pr_err("%s: no clock type specified for '%s'\n", __func__,
634 		       clk_info->name);
635 		goto out;
636 	}
637 
638 	ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
639 	if (!ingenic_clk) {
640 		err = -ENOMEM;
641 		goto out;
642 	}
643 
644 	ingenic_clk->hw.init = &clk_init;
645 	ingenic_clk->cgu = cgu;
646 	ingenic_clk->idx = idx;
647 
648 	clk_init.name = clk_info->name;
649 	clk_init.flags = 0;
650 	clk_init.parent_names = parent_names;
651 
652 	caps = clk_info->type;
653 
654 	if (caps & CGU_CLK_DIV) {
655 		caps &= ~CGU_CLK_DIV;
656 	} else if (!(caps & CGU_CLK_CUSTOM)) {
657 		/* pass rate changes to the parent clock */
658 		clk_init.flags |= CLK_SET_RATE_PARENT;
659 	}
660 
661 	if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
662 		clk_init.num_parents = 0;
663 
664 		if (caps & CGU_CLK_MUX)
665 			num_possible = 1 << clk_info->mux.bits;
666 		else
667 			num_possible = ARRAY_SIZE(clk_info->parents);
668 
669 		for (i = 0; i < num_possible; i++) {
670 			if (clk_info->parents[i] == -1)
671 				continue;
672 
673 			parent = cgu->clocks.clks[clk_info->parents[i]];
674 			parent_names[clk_init.num_parents] =
675 				__clk_get_name(parent);
676 			clk_init.num_parents++;
677 		}
678 
679 		BUG_ON(!clk_init.num_parents);
680 		BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
681 	} else {
682 		BUG_ON(clk_info->parents[0] == -1);
683 		clk_init.num_parents = 1;
684 		parent = cgu->clocks.clks[clk_info->parents[0]];
685 		parent_names[0] = __clk_get_name(parent);
686 	}
687 
688 	if (caps & CGU_CLK_CUSTOM) {
689 		clk_init.ops = clk_info->custom.clk_ops;
690 
691 		caps &= ~CGU_CLK_CUSTOM;
692 
693 		if (caps) {
694 			pr_err("%s: custom clock may not be combined with type 0x%x\n",
695 			       __func__, caps);
696 			goto out;
697 		}
698 	} else if (caps & CGU_CLK_PLL) {
699 		clk_init.ops = &ingenic_pll_ops;
700 
701 		caps &= ~CGU_CLK_PLL;
702 
703 		if (caps) {
704 			pr_err("%s: PLL may not be combined with type 0x%x\n",
705 			       __func__, caps);
706 			goto out;
707 		}
708 	} else {
709 		clk_init.ops = &ingenic_clk_ops;
710 	}
711 
712 	/* nothing to do for gates or fixed dividers */
713 	caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
714 
715 	if (caps & CGU_CLK_MUX) {
716 		if (!(caps & CGU_CLK_MUX_GLITCHFREE))
717 			clk_init.flags |= CLK_SET_PARENT_GATE;
718 
719 		caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
720 	}
721 
722 	if (caps) {
723 		pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
724 		goto out;
725 	}
726 
727 	clk = clk_register(NULL, &ingenic_clk->hw);
728 	if (IS_ERR(clk)) {
729 		pr_err("%s: failed to register clock '%s'\n", __func__,
730 		       clk_info->name);
731 		err = PTR_ERR(clk);
732 		goto out;
733 	}
734 
735 	err = clk_register_clkdev(clk, clk_info->name, NULL);
736 	if (err)
737 		goto out;
738 
739 	cgu->clocks.clks[idx] = clk;
740 out:
741 	if (err)
742 		kfree(ingenic_clk);
743 	return err;
744 }
745 
746 struct ingenic_cgu *
747 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
748 		unsigned num_clocks, struct device_node *np)
749 {
750 	struct ingenic_cgu *cgu;
751 
752 	cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
753 	if (!cgu)
754 		goto err_out;
755 
756 	cgu->base = of_iomap(np, 0);
757 	if (!cgu->base) {
758 		pr_err("%s: failed to map CGU registers\n", __func__);
759 		goto err_out_free;
760 	}
761 
762 	cgu->np = np;
763 	cgu->clock_info = clock_info;
764 	cgu->clocks.clk_num = num_clocks;
765 
766 	spin_lock_init(&cgu->lock);
767 
768 	return cgu;
769 
770 err_out_free:
771 	kfree(cgu);
772 err_out:
773 	return NULL;
774 }
775 
776 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
777 {
778 	unsigned i;
779 	int err;
780 
781 	cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
782 				   GFP_KERNEL);
783 	if (!cgu->clocks.clks) {
784 		err = -ENOMEM;
785 		goto err_out;
786 	}
787 
788 	for (i = 0; i < cgu->clocks.clk_num; i++) {
789 		err = ingenic_register_clock(cgu, i);
790 		if (err)
791 			goto err_out_unregister;
792 	}
793 
794 	err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
795 				  &cgu->clocks);
796 	if (err)
797 		goto err_out_unregister;
798 
799 	return 0;
800 
801 err_out_unregister:
802 	for (i = 0; i < cgu->clocks.clk_num; i++) {
803 		if (!cgu->clocks.clks[i])
804 			continue;
805 		if (cgu->clock_info[i].type & CGU_CLK_EXT)
806 			clk_put(cgu->clocks.clks[i]);
807 		else
808 			clk_unregister(cgu->clocks.clks[i]);
809 	}
810 	kfree(cgu->clocks.clks);
811 err_out:
812 	return err;
813 }
814