xref: /openbmc/linux/drivers/clk/ingenic/cgu.c (revision 93df8a1e)
1 /*
2  * Ingenic SoC CGU driver
3  *
4  * Copyright (c) 2013-2015 Imagination Technologies
5  * Author: Paul Burton <paul.burton@imgtec.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License as
9  * published by the Free Software Foundation; either version 2 of
10  * the License, or (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17 
18 #include <linux/bitops.h>
19 #include <linux/clk-provider.h>
20 #include <linux/clkdev.h>
21 #include <linux/delay.h>
22 #include <linux/math64.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include "cgu.h"
28 
29 #define MHZ (1000 * 1000)
30 
31 /**
32  * ingenic_cgu_gate_get() - get the value of clock gate register bit
33  * @cgu: reference to the CGU whose registers should be read
34  * @info: info struct describing the gate bit
35  *
36  * Retrieves the state of the clock gate bit described by info. The
37  * caller must hold cgu->lock.
38  *
39  * Return: true if the gate bit is set, else false.
40  */
41 static inline bool
42 ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
43 		     const struct ingenic_cgu_gate_info *info)
44 {
45 	return readl(cgu->base + info->reg) & BIT(info->bit);
46 }
47 
48 /**
49  * ingenic_cgu_gate_set() - set the value of clock gate register bit
50  * @cgu: reference to the CGU whose registers should be modified
51  * @info: info struct describing the gate bit
52  * @val: non-zero to gate a clock, otherwise zero
53  *
54  * Sets the given gate bit in order to gate or ungate a clock.
55  *
56  * The caller must hold cgu->lock.
57  */
58 static inline void
59 ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
60 		     const struct ingenic_cgu_gate_info *info, bool val)
61 {
62 	u32 clkgr = readl(cgu->base + info->reg);
63 
64 	if (val)
65 		clkgr |= BIT(info->bit);
66 	else
67 		clkgr &= ~BIT(info->bit);
68 
69 	writel(clkgr, cgu->base + info->reg);
70 }
71 
72 /*
73  * PLL operations
74  */
75 
76 static unsigned long
77 ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
78 {
79 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
80 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
81 	const struct ingenic_cgu_clk_info *clk_info;
82 	const struct ingenic_cgu_pll_info *pll_info;
83 	unsigned m, n, od_enc, od;
84 	bool bypass, enable;
85 	unsigned long flags;
86 	u32 ctl;
87 
88 	clk_info = &cgu->clock_info[ingenic_clk->idx];
89 	BUG_ON(clk_info->type != CGU_CLK_PLL);
90 	pll_info = &clk_info->pll;
91 
92 	spin_lock_irqsave(&cgu->lock, flags);
93 	ctl = readl(cgu->base + pll_info->reg);
94 	spin_unlock_irqrestore(&cgu->lock, flags);
95 
96 	m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
97 	m += pll_info->m_offset;
98 	n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
99 	n += pll_info->n_offset;
100 	od_enc = ctl >> pll_info->od_shift;
101 	od_enc &= GENMASK(pll_info->od_bits - 1, 0);
102 	bypass = !!(ctl & BIT(pll_info->bypass_bit));
103 	enable = !!(ctl & BIT(pll_info->enable_bit));
104 
105 	if (bypass)
106 		return parent_rate;
107 
108 	if (!enable)
109 		return 0;
110 
111 	for (od = 0; od < pll_info->od_max; od++) {
112 		if (pll_info->od_encoding[od] == od_enc)
113 			break;
114 	}
115 	BUG_ON(od == pll_info->od_max);
116 	od++;
117 
118 	return div_u64((u64)parent_rate * m, n * od);
119 }
120 
121 static unsigned long
122 ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
123 		 unsigned long rate, unsigned long parent_rate,
124 		 unsigned *pm, unsigned *pn, unsigned *pod)
125 {
126 	const struct ingenic_cgu_pll_info *pll_info;
127 	unsigned m, n, od;
128 
129 	pll_info = &clk_info->pll;
130 	od = 1;
131 
132 	/*
133 	 * The frequency after the input divider must be between 10 and 50 MHz.
134 	 * The highest divider yields the best resolution.
135 	 */
136 	n = parent_rate / (10 * MHZ);
137 	n = min_t(unsigned, n, 1 << clk_info->pll.n_bits);
138 	n = max_t(unsigned, n, pll_info->n_offset);
139 
140 	m = (rate / MHZ) * od * n / (parent_rate / MHZ);
141 	m = min_t(unsigned, m, 1 << clk_info->pll.m_bits);
142 	m = max_t(unsigned, m, pll_info->m_offset);
143 
144 	if (pm)
145 		*pm = m;
146 	if (pn)
147 		*pn = n;
148 	if (pod)
149 		*pod = od;
150 
151 	return div_u64((u64)parent_rate * m, n * od);
152 }
153 
154 static long
155 ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
156 		       unsigned long *prate)
157 {
158 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
159 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
160 	const struct ingenic_cgu_clk_info *clk_info;
161 
162 	clk_info = &cgu->clock_info[ingenic_clk->idx];
163 	BUG_ON(clk_info->type != CGU_CLK_PLL);
164 
165 	return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
166 }
167 
168 static int
169 ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
170 		     unsigned long parent_rate)
171 {
172 	const unsigned timeout = 100;
173 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
174 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
175 	const struct ingenic_cgu_clk_info *clk_info;
176 	const struct ingenic_cgu_pll_info *pll_info;
177 	unsigned long rate, flags;
178 	unsigned m, n, od, i;
179 	u32 ctl;
180 
181 	clk_info = &cgu->clock_info[ingenic_clk->idx];
182 	BUG_ON(clk_info->type != CGU_CLK_PLL);
183 	pll_info = &clk_info->pll;
184 
185 	rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
186 			       &m, &n, &od);
187 	if (rate != req_rate)
188 		pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
189 			clk_info->name, req_rate, rate);
190 
191 	spin_lock_irqsave(&cgu->lock, flags);
192 	ctl = readl(cgu->base + pll_info->reg);
193 
194 	ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
195 	ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
196 
197 	ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
198 	ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
199 
200 	ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
201 	ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
202 
203 	ctl &= ~BIT(pll_info->bypass_bit);
204 	ctl |= BIT(pll_info->enable_bit);
205 
206 	writel(ctl, cgu->base + pll_info->reg);
207 
208 	/* wait for the PLL to stabilise */
209 	for (i = 0; i < timeout; i++) {
210 		ctl = readl(cgu->base + pll_info->reg);
211 		if (ctl & BIT(pll_info->stable_bit))
212 			break;
213 		mdelay(1);
214 	}
215 
216 	spin_unlock_irqrestore(&cgu->lock, flags);
217 
218 	if (i == timeout)
219 		return -EBUSY;
220 
221 	return 0;
222 }
223 
224 static const struct clk_ops ingenic_pll_ops = {
225 	.recalc_rate = ingenic_pll_recalc_rate,
226 	.round_rate = ingenic_pll_round_rate,
227 	.set_rate = ingenic_pll_set_rate,
228 };
229 
230 /*
231  * Operations for all non-PLL clocks
232  */
233 
234 static u8 ingenic_clk_get_parent(struct clk_hw *hw)
235 {
236 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
237 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
238 	const struct ingenic_cgu_clk_info *clk_info;
239 	u32 reg;
240 	u8 i, hw_idx, idx = 0;
241 
242 	clk_info = &cgu->clock_info[ingenic_clk->idx];
243 
244 	if (clk_info->type & CGU_CLK_MUX) {
245 		reg = readl(cgu->base + clk_info->mux.reg);
246 		hw_idx = (reg >> clk_info->mux.shift) &
247 			 GENMASK(clk_info->mux.bits - 1, 0);
248 
249 		/*
250 		 * Convert the hardware index to the parent index by skipping
251 		 * over any -1's in the parents array.
252 		 */
253 		for (i = 0; i < hw_idx; i++) {
254 			if (clk_info->parents[i] != -1)
255 				idx++;
256 		}
257 	}
258 
259 	return idx;
260 }
261 
262 static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
263 {
264 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
265 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
266 	const struct ingenic_cgu_clk_info *clk_info;
267 	unsigned long flags;
268 	u8 curr_idx, hw_idx, num_poss;
269 	u32 reg, mask;
270 
271 	clk_info = &cgu->clock_info[ingenic_clk->idx];
272 
273 	if (clk_info->type & CGU_CLK_MUX) {
274 		/*
275 		 * Convert the parent index to the hardware index by adding
276 		 * 1 for any -1 in the parents array preceding the given
277 		 * index. That is, we want the index of idx'th entry in
278 		 * clk_info->parents which does not equal -1.
279 		 */
280 		hw_idx = curr_idx = 0;
281 		num_poss = 1 << clk_info->mux.bits;
282 		for (; hw_idx < num_poss; hw_idx++) {
283 			if (clk_info->parents[hw_idx] == -1)
284 				continue;
285 			if (curr_idx == idx)
286 				break;
287 			curr_idx++;
288 		}
289 
290 		/* idx should always be a valid parent */
291 		BUG_ON(curr_idx != idx);
292 
293 		mask = GENMASK(clk_info->mux.bits - 1, 0);
294 		mask <<= clk_info->mux.shift;
295 
296 		spin_lock_irqsave(&cgu->lock, flags);
297 
298 		/* write the register */
299 		reg = readl(cgu->base + clk_info->mux.reg);
300 		reg &= ~mask;
301 		reg |= hw_idx << clk_info->mux.shift;
302 		writel(reg, cgu->base + clk_info->mux.reg);
303 
304 		spin_unlock_irqrestore(&cgu->lock, flags);
305 		return 0;
306 	}
307 
308 	return idx ? -EINVAL : 0;
309 }
310 
311 static unsigned long
312 ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
313 {
314 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
315 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
316 	const struct ingenic_cgu_clk_info *clk_info;
317 	unsigned long rate = parent_rate;
318 	u32 div_reg, div;
319 
320 	clk_info = &cgu->clock_info[ingenic_clk->idx];
321 
322 	if (clk_info->type & CGU_CLK_DIV) {
323 		div_reg = readl(cgu->base + clk_info->div.reg);
324 		div = (div_reg >> clk_info->div.shift) &
325 		      GENMASK(clk_info->div.bits - 1, 0);
326 		div += 1;
327 
328 		rate /= div;
329 	}
330 
331 	return rate;
332 }
333 
334 static unsigned
335 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info,
336 		     unsigned long parent_rate, unsigned long req_rate)
337 {
338 	unsigned div;
339 
340 	/* calculate the divide */
341 	div = DIV_ROUND_UP(parent_rate, req_rate);
342 
343 	/* and impose hardware constraints */
344 	div = min_t(unsigned, div, 1 << clk_info->div.bits);
345 	div = max_t(unsigned, div, 1);
346 
347 	return div;
348 }
349 
350 static long
351 ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
352 		       unsigned long *parent_rate)
353 {
354 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
355 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
356 	const struct ingenic_cgu_clk_info *clk_info;
357 	long rate = *parent_rate;
358 
359 	clk_info = &cgu->clock_info[ingenic_clk->idx];
360 
361 	if (clk_info->type & CGU_CLK_DIV)
362 		rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
363 	else if (clk_info->type & CGU_CLK_FIXDIV)
364 		rate /= clk_info->fixdiv.div;
365 
366 	return rate;
367 }
368 
369 static int
370 ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
371 		     unsigned long parent_rate)
372 {
373 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
374 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
375 	const struct ingenic_cgu_clk_info *clk_info;
376 	const unsigned timeout = 100;
377 	unsigned long rate, flags;
378 	unsigned div, i;
379 	u32 reg, mask;
380 	int ret = 0;
381 
382 	clk_info = &cgu->clock_info[ingenic_clk->idx];
383 
384 	if (clk_info->type & CGU_CLK_DIV) {
385 		div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
386 		rate = parent_rate / div;
387 
388 		if (rate != req_rate)
389 			return -EINVAL;
390 
391 		spin_lock_irqsave(&cgu->lock, flags);
392 		reg = readl(cgu->base + clk_info->div.reg);
393 
394 		/* update the divide */
395 		mask = GENMASK(clk_info->div.bits - 1, 0);
396 		reg &= ~(mask << clk_info->div.shift);
397 		reg |= (div - 1) << clk_info->div.shift;
398 
399 		/* clear the stop bit */
400 		if (clk_info->div.stop_bit != -1)
401 			reg &= ~BIT(clk_info->div.stop_bit);
402 
403 		/* set the change enable bit */
404 		if (clk_info->div.ce_bit != -1)
405 			reg |= BIT(clk_info->div.ce_bit);
406 
407 		/* update the hardware */
408 		writel(reg, cgu->base + clk_info->div.reg);
409 
410 		/* wait for the change to take effect */
411 		if (clk_info->div.busy_bit != -1) {
412 			for (i = 0; i < timeout; i++) {
413 				reg = readl(cgu->base + clk_info->div.reg);
414 				if (!(reg & BIT(clk_info->div.busy_bit)))
415 					break;
416 				mdelay(1);
417 			}
418 			if (i == timeout)
419 				ret = -EBUSY;
420 		}
421 
422 		spin_unlock_irqrestore(&cgu->lock, flags);
423 		return ret;
424 	}
425 
426 	return -EINVAL;
427 }
428 
429 static int ingenic_clk_enable(struct clk_hw *hw)
430 {
431 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
432 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
433 	const struct ingenic_cgu_clk_info *clk_info;
434 	unsigned long flags;
435 
436 	clk_info = &cgu->clock_info[ingenic_clk->idx];
437 
438 	if (clk_info->type & CGU_CLK_GATE) {
439 		/* ungate the clock */
440 		spin_lock_irqsave(&cgu->lock, flags);
441 		ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
442 		spin_unlock_irqrestore(&cgu->lock, flags);
443 	}
444 
445 	return 0;
446 }
447 
448 static void ingenic_clk_disable(struct clk_hw *hw)
449 {
450 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
451 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
452 	const struct ingenic_cgu_clk_info *clk_info;
453 	unsigned long flags;
454 
455 	clk_info = &cgu->clock_info[ingenic_clk->idx];
456 
457 	if (clk_info->type & CGU_CLK_GATE) {
458 		/* gate the clock */
459 		spin_lock_irqsave(&cgu->lock, flags);
460 		ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
461 		spin_unlock_irqrestore(&cgu->lock, flags);
462 	}
463 }
464 
465 static int ingenic_clk_is_enabled(struct clk_hw *hw)
466 {
467 	struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
468 	struct ingenic_cgu *cgu = ingenic_clk->cgu;
469 	const struct ingenic_cgu_clk_info *clk_info;
470 	unsigned long flags;
471 	int enabled = 1;
472 
473 	clk_info = &cgu->clock_info[ingenic_clk->idx];
474 
475 	if (clk_info->type & CGU_CLK_GATE) {
476 		spin_lock_irqsave(&cgu->lock, flags);
477 		enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
478 		spin_unlock_irqrestore(&cgu->lock, flags);
479 	}
480 
481 	return enabled;
482 }
483 
484 static const struct clk_ops ingenic_clk_ops = {
485 	.get_parent = ingenic_clk_get_parent,
486 	.set_parent = ingenic_clk_set_parent,
487 
488 	.recalc_rate = ingenic_clk_recalc_rate,
489 	.round_rate = ingenic_clk_round_rate,
490 	.set_rate = ingenic_clk_set_rate,
491 
492 	.enable = ingenic_clk_enable,
493 	.disable = ingenic_clk_disable,
494 	.is_enabled = ingenic_clk_is_enabled,
495 };
496 
497 /*
498  * Setup functions.
499  */
500 
501 static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
502 {
503 	const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
504 	struct clk_init_data clk_init;
505 	struct ingenic_clk *ingenic_clk = NULL;
506 	struct clk *clk, *parent;
507 	const char *parent_names[4];
508 	unsigned caps, i, num_possible;
509 	int err = -EINVAL;
510 
511 	BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
512 
513 	if (clk_info->type == CGU_CLK_EXT) {
514 		clk = of_clk_get_by_name(cgu->np, clk_info->name);
515 		if (IS_ERR(clk)) {
516 			pr_err("%s: no external clock '%s' provided\n",
517 			       __func__, clk_info->name);
518 			err = -ENODEV;
519 			goto out;
520 		}
521 		err = clk_register_clkdev(clk, clk_info->name, NULL);
522 		if (err) {
523 			clk_put(clk);
524 			goto out;
525 		}
526 		cgu->clocks.clks[idx] = clk;
527 		return 0;
528 	}
529 
530 	if (!clk_info->type) {
531 		pr_err("%s: no clock type specified for '%s'\n", __func__,
532 		       clk_info->name);
533 		goto out;
534 	}
535 
536 	ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
537 	if (!ingenic_clk) {
538 		err = -ENOMEM;
539 		goto out;
540 	}
541 
542 	ingenic_clk->hw.init = &clk_init;
543 	ingenic_clk->cgu = cgu;
544 	ingenic_clk->idx = idx;
545 
546 	clk_init.name = clk_info->name;
547 	clk_init.flags = 0;
548 	clk_init.parent_names = parent_names;
549 
550 	caps = clk_info->type;
551 
552 	if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
553 		clk_init.num_parents = 0;
554 
555 		if (caps & CGU_CLK_MUX)
556 			num_possible = 1 << clk_info->mux.bits;
557 		else
558 			num_possible = ARRAY_SIZE(clk_info->parents);
559 
560 		for (i = 0; i < num_possible; i++) {
561 			if (clk_info->parents[i] == -1)
562 				continue;
563 
564 			parent = cgu->clocks.clks[clk_info->parents[i]];
565 			parent_names[clk_init.num_parents] =
566 				__clk_get_name(parent);
567 			clk_init.num_parents++;
568 		}
569 
570 		BUG_ON(!clk_init.num_parents);
571 		BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
572 	} else {
573 		BUG_ON(clk_info->parents[0] == -1);
574 		clk_init.num_parents = 1;
575 		parent = cgu->clocks.clks[clk_info->parents[0]];
576 		parent_names[0] = __clk_get_name(parent);
577 	}
578 
579 	if (caps & CGU_CLK_CUSTOM) {
580 		clk_init.ops = clk_info->custom.clk_ops;
581 
582 		caps &= ~CGU_CLK_CUSTOM;
583 
584 		if (caps) {
585 			pr_err("%s: custom clock may not be combined with type 0x%x\n",
586 			       __func__, caps);
587 			goto out;
588 		}
589 	} else if (caps & CGU_CLK_PLL) {
590 		clk_init.ops = &ingenic_pll_ops;
591 
592 		caps &= ~CGU_CLK_PLL;
593 
594 		if (caps) {
595 			pr_err("%s: PLL may not be combined with type 0x%x\n",
596 			       __func__, caps);
597 			goto out;
598 		}
599 	} else {
600 		clk_init.ops = &ingenic_clk_ops;
601 	}
602 
603 	/* nothing to do for gates or fixed dividers */
604 	caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
605 
606 	if (caps & CGU_CLK_MUX) {
607 		if (!(caps & CGU_CLK_MUX_GLITCHFREE))
608 			clk_init.flags |= CLK_SET_PARENT_GATE;
609 
610 		caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
611 	}
612 
613 	if (caps & CGU_CLK_DIV) {
614 		caps &= ~CGU_CLK_DIV;
615 	} else {
616 		/* pass rate changes to the parent clock */
617 		clk_init.flags |= CLK_SET_RATE_PARENT;
618 	}
619 
620 	if (caps) {
621 		pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
622 		goto out;
623 	}
624 
625 	clk = clk_register(NULL, &ingenic_clk->hw);
626 	if (IS_ERR(clk)) {
627 		pr_err("%s: failed to register clock '%s'\n", __func__,
628 		       clk_info->name);
629 		err = PTR_ERR(clk);
630 		goto out;
631 	}
632 
633 	err = clk_register_clkdev(clk, clk_info->name, NULL);
634 	if (err)
635 		goto out;
636 
637 	cgu->clocks.clks[idx] = clk;
638 out:
639 	if (err)
640 		kfree(ingenic_clk);
641 	return err;
642 }
643 
644 struct ingenic_cgu *
645 ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
646 		unsigned num_clocks, struct device_node *np)
647 {
648 	struct ingenic_cgu *cgu;
649 
650 	cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
651 	if (!cgu)
652 		goto err_out;
653 
654 	cgu->base = of_iomap(np, 0);
655 	if (!cgu->base) {
656 		pr_err("%s: failed to map CGU registers\n", __func__);
657 		goto err_out_free;
658 	}
659 
660 	cgu->np = np;
661 	cgu->clock_info = clock_info;
662 	cgu->clocks.clk_num = num_clocks;
663 
664 	spin_lock_init(&cgu->lock);
665 
666 	return cgu;
667 
668 err_out_free:
669 	kfree(cgu);
670 err_out:
671 	return NULL;
672 }
673 
674 int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
675 {
676 	unsigned i;
677 	int err;
678 
679 	cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
680 				   GFP_KERNEL);
681 	if (!cgu->clocks.clks) {
682 		err = -ENOMEM;
683 		goto err_out;
684 	}
685 
686 	for (i = 0; i < cgu->clocks.clk_num; i++) {
687 		err = ingenic_register_clock(cgu, i);
688 		if (err)
689 			goto err_out_unregister;
690 	}
691 
692 	err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
693 				  &cgu->clocks);
694 	if (err)
695 		goto err_out_unregister;
696 
697 	return 0;
698 
699 err_out_unregister:
700 	for (i = 0; i < cgu->clocks.clk_num; i++) {
701 		if (!cgu->clocks.clks[i])
702 			continue;
703 		if (cgu->clock_info[i].type & CGU_CLK_EXT)
704 			clk_put(cgu->clocks.clks[i]);
705 		else
706 			clk_unregister(cgu->clocks.clks[i]);
707 	}
708 	kfree(cgu->clocks.clks);
709 err_out:
710 	return err;
711 }
712