xref: /openbmc/linux/drivers/clk/bcm/clk-iproc-pll.c (revision f713c6bf32092a259d6baf2be24f9c3dbf2462c3)
1 /*
2  * Copyright (C) 2014 Broadcom Corporation
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation version 2.
7  *
8  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9  * kind, whether express or implied; without even the implied warranty
10  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/err.h>
16 #include <linux/clk-provider.h>
17 #include <linux/io.h>
18 #include <linux/of.h>
19 #include <linux/clkdev.h>
20 #include <linux/of_address.h>
21 #include <linux/delay.h>
22 
23 #include "clk-iproc.h"
24 
25 #define PLL_VCO_HIGH_SHIFT 19
26 #define PLL_VCO_LOW_SHIFT  30
27 
28 /* number of delay loops waiting for PLL to lock */
29 #define LOCK_DELAY 100
30 
31 /* number of VCO frequency bands */
32 #define NUM_FREQ_BANDS 8
33 
34 #define NUM_KP_BANDS 3
35 enum kp_band {
36 	KP_BAND_MID = 0,
37 	KP_BAND_HIGH,
38 	KP_BAND_HIGH_HIGH
39 };
40 
41 static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = {
42 	{ 5, 6, 6, 7, 7, 8, 9, 10 },
43 	{ 4, 4, 5, 5, 6, 7, 8, 9  },
44 	{ 4, 5, 5, 6, 7, 8, 9, 10 },
45 };
46 
47 static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = {
48 	{ 10000000,  12500000  },
49 	{ 12500000,  15000000  },
50 	{ 15000000,  20000000  },
51 	{ 20000000,  25000000  },
52 	{ 25000000,  50000000  },
53 	{ 50000000,  75000000  },
54 	{ 75000000,  100000000 },
55 	{ 100000000, 125000000 },
56 };
57 
58 enum vco_freq_range {
59 	VCO_LOW       = 700000000U,
60 	VCO_MID       = 1200000000U,
61 	VCO_HIGH      = 2200000000U,
62 	VCO_HIGH_HIGH = 3100000000U,
63 	VCO_MAX       = 4000000000U,
64 };
65 
66 struct iproc_pll;
67 
68 struct iproc_clk {
69 	struct clk_hw hw;
70 	const char *name;
71 	struct iproc_pll *pll;
72 	unsigned long rate;
73 	const struct iproc_clk_ctrl *ctrl;
74 };
75 
76 struct iproc_pll {
77 	void __iomem *pll_base;
78 	void __iomem *pwr_base;
79 	void __iomem *asiu_base;
80 
81 	const struct iproc_pll_ctrl *ctrl;
82 	const struct iproc_pll_vco_param *vco_param;
83 	unsigned int num_vco_entries;
84 
85 	struct clk_onecell_data clk_data;
86 	struct iproc_clk *clks;
87 };
88 
89 #define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
90 
91 /*
92  * Based on the target frequency, find a match from the VCO frequency parameter
93  * table and return its index
94  */
95 static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate)
96 {
97 	int i;
98 
99 	for (i = 0; i < pll->num_vco_entries; i++)
100 		if (target_rate == pll->vco_param[i].rate)
101 			break;
102 
103 	if (i >= pll->num_vco_entries)
104 		return -EINVAL;
105 
106 	return i;
107 }
108 
109 static int get_kp(unsigned long ref_freq, enum kp_band kp_index)
110 {
111 	int i;
112 
113 	if (ref_freq < ref_freq_table[0][0])
114 		return -EINVAL;
115 
116 	for (i = 0; i < NUM_FREQ_BANDS; i++) {
117 		if (ref_freq >= ref_freq_table[i][0] &&
118 		    ref_freq < ref_freq_table[i][1])
119 			return kp_table[kp_index][i];
120 	}
121 	return -EINVAL;
122 }
123 
124 static int pll_wait_for_lock(struct iproc_pll *pll)
125 {
126 	int i;
127 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
128 
129 	for (i = 0; i < LOCK_DELAY; i++) {
130 		u32 val = readl(pll->pll_base + ctrl->status.offset);
131 
132 		if (val & (1 << ctrl->status.shift))
133 			return 0;
134 		udelay(10);
135 	}
136 
137 	return -EIO;
138 }
139 
140 static void iproc_pll_write(const struct iproc_pll *pll, void __iomem *base,
141 			    const u32 offset, u32 val)
142 {
143 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
144 
145 	writel(val, base + offset);
146 
147 	if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK &&
148 		     base == pll->pll_base))
149 		val = readl(base + offset);
150 }
151 
152 static void __pll_disable(struct iproc_pll *pll)
153 {
154 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
155 	u32 val;
156 
157 	if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
158 		val = readl(pll->asiu_base + ctrl->asiu.offset);
159 		val &= ~(1 << ctrl->asiu.en_shift);
160 		iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
161 	}
162 
163 	if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
164 		val = readl(pll->pll_base + ctrl->aon.offset);
165 		val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
166 		iproc_pll_write(pll, pll->pll_base, ctrl->aon.offset, val);
167 	}
168 
169 	if (pll->pwr_base) {
170 		/* latch input value so core power can be shut down */
171 		val = readl(pll->pwr_base + ctrl->aon.offset);
172 		val |= 1 << ctrl->aon.iso_shift;
173 		iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
174 
175 		/* power down the core */
176 		val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
177 		iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
178 	}
179 }
180 
181 static int __pll_enable(struct iproc_pll *pll)
182 {
183 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
184 	u32 val;
185 
186 	if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) {
187 		val = readl(pll->pll_base + ctrl->aon.offset);
188 		val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift);
189 		iproc_pll_write(pll, pll->pll_base, ctrl->aon.offset, val);
190 	}
191 
192 	if (pll->pwr_base) {
193 		/* power up the PLL and make sure it's not latched */
194 		val = readl(pll->pwr_base + ctrl->aon.offset);
195 		val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift;
196 		val &= ~(1 << ctrl->aon.iso_shift);
197 		iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val);
198 	}
199 
200 	/* certain PLLs also need to be ungated from the ASIU top level */
201 	if (ctrl->flags & IPROC_CLK_PLL_ASIU) {
202 		val = readl(pll->asiu_base + ctrl->asiu.offset);
203 		val |= (1 << ctrl->asiu.en_shift);
204 		iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val);
205 	}
206 
207 	return 0;
208 }
209 
210 static void __pll_put_in_reset(struct iproc_pll *pll)
211 {
212 	u32 val;
213 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
214 	const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
215 
216 	val = readl(pll->pll_base + reset->offset);
217 	val &= ~(1 << reset->reset_shift | 1 << reset->p_reset_shift);
218 	iproc_pll_write(pll, pll->pll_base, reset->offset, val);
219 }
220 
221 static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
222 				  unsigned int ka, unsigned int ki)
223 {
224 	u32 val;
225 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
226 	const struct iproc_pll_reset_ctrl *reset = &ctrl->reset;
227 	const struct iproc_pll_dig_filter_ctrl *dig_filter = &ctrl->dig_filter;
228 
229 	val = readl(pll->pll_base + dig_filter->offset);
230 	val &= ~(bit_mask(dig_filter->ki_width) << dig_filter->ki_shift |
231 		bit_mask(dig_filter->kp_width) << dig_filter->kp_shift |
232 		bit_mask(dig_filter->ka_width) << dig_filter->ka_shift);
233 	val |= ki << dig_filter->ki_shift | kp << dig_filter->kp_shift |
234 	       ka << dig_filter->ka_shift;
235 	iproc_pll_write(pll, pll->pll_base, dig_filter->offset, val);
236 
237 	val = readl(pll->pll_base + reset->offset);
238 	val |= 1 << reset->reset_shift | 1 << reset->p_reset_shift;
239 	iproc_pll_write(pll, pll->pll_base, reset->offset, val);
240 }
241 
242 static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
243 			unsigned long parent_rate)
244 {
245 	struct iproc_pll *pll = clk->pll;
246 	const struct iproc_pll_vco_param *vco = &pll->vco_param[rate_index];
247 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
248 	int ka = 0, ki, kp, ret;
249 	unsigned long rate = vco->rate;
250 	u32 val;
251 	enum kp_band kp_index;
252 	unsigned long ref_freq;
253 
254 	/*
255 	 * reference frequency = parent frequency / PDIV
256 	 * If PDIV = 0, then it becomes a multiplier (x2)
257 	 */
258 	if (vco->pdiv == 0)
259 		ref_freq = parent_rate * 2;
260 	else
261 		ref_freq = parent_rate / vco->pdiv;
262 
263 	/* determine Ki and Kp index based on target VCO frequency */
264 	if (rate >= VCO_LOW && rate < VCO_HIGH) {
265 		ki = 4;
266 		kp_index = KP_BAND_MID;
267 	} else if (rate >= VCO_HIGH && rate && rate < VCO_HIGH_HIGH) {
268 		ki = 3;
269 		kp_index = KP_BAND_HIGH;
270 	} else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) {
271 		ki = 3;
272 		kp_index = KP_BAND_HIGH_HIGH;
273 	} else {
274 		pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
275 				clk->name, rate);
276 		return -EINVAL;
277 	}
278 
279 	kp = get_kp(ref_freq, kp_index);
280 	if (kp < 0) {
281 		pr_err("%s: pll: %s has invalid kp\n", __func__, clk->name);
282 		return kp;
283 	}
284 
285 	ret = __pll_enable(pll);
286 	if (ret) {
287 		pr_err("%s: pll: %s fails to enable\n", __func__, clk->name);
288 		return ret;
289 	}
290 
291 	/* put PLL in reset */
292 	__pll_put_in_reset(pll);
293 
294 	iproc_pll_write(pll, pll->pll_base, ctrl->vco_ctrl.u_offset, 0);
295 
296 	val = readl(pll->pll_base + ctrl->vco_ctrl.l_offset);
297 
298 	if (rate >= VCO_LOW && rate < VCO_MID)
299 		val |= (1 << PLL_VCO_LOW_SHIFT);
300 
301 	if (rate < VCO_HIGH)
302 		val &= ~(1 << PLL_VCO_HIGH_SHIFT);
303 	else
304 		val |= (1 << PLL_VCO_HIGH_SHIFT);
305 
306 	iproc_pll_write(pll, pll->pll_base, ctrl->vco_ctrl.l_offset, val);
307 
308 	/* program integer part of NDIV */
309 	val = readl(pll->pll_base + ctrl->ndiv_int.offset);
310 	val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift);
311 	val |= vco->ndiv_int << ctrl->ndiv_int.shift;
312 	iproc_pll_write(pll, pll->pll_base, ctrl->ndiv_int.offset, val);
313 
314 	/* program fractional part of NDIV */
315 	if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
316 		val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
317 		val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
318 			 ctrl->ndiv_frac.shift);
319 		val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
320 		iproc_pll_write(pll, pll->pll_base, ctrl->ndiv_frac.offset,
321 				val);
322 	}
323 
324 	/* program PDIV */
325 	val = readl(pll->pll_base + ctrl->pdiv.offset);
326 	val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift);
327 	val |= vco->pdiv << ctrl->pdiv.shift;
328 	iproc_pll_write(pll, pll->pll_base, ctrl->pdiv.offset, val);
329 
330 	__pll_bring_out_reset(pll, kp, ka, ki);
331 
332 	ret = pll_wait_for_lock(pll);
333 	if (ret < 0) {
334 		pr_err("%s: pll: %s failed to lock\n", __func__, clk->name);
335 		return ret;
336 	}
337 
338 	return 0;
339 }
340 
341 static int iproc_pll_enable(struct clk_hw *hw)
342 {
343 	struct iproc_clk *clk = to_iproc_clk(hw);
344 	struct iproc_pll *pll = clk->pll;
345 
346 	return __pll_enable(pll);
347 }
348 
349 static void iproc_pll_disable(struct clk_hw *hw)
350 {
351 	struct iproc_clk *clk = to_iproc_clk(hw);
352 	struct iproc_pll *pll = clk->pll;
353 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
354 
355 	if (ctrl->flags & IPROC_CLK_AON)
356 		return;
357 
358 	__pll_disable(pll);
359 }
360 
361 static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
362 					   unsigned long parent_rate)
363 {
364 	struct iproc_clk *clk = to_iproc_clk(hw);
365 	struct iproc_pll *pll = clk->pll;
366 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
367 	u32 val;
368 	u64 ndiv;
369 	unsigned int ndiv_int, ndiv_frac, pdiv;
370 
371 	if (parent_rate == 0)
372 		return 0;
373 
374 	/* PLL needs to be locked */
375 	val = readl(pll->pll_base + ctrl->status.offset);
376 	if ((val & (1 << ctrl->status.shift)) == 0) {
377 		clk->rate = 0;
378 		return 0;
379 	}
380 
381 	/*
382 	 * PLL output frequency =
383 	 *
384 	 * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv)
385 	 */
386 	val = readl(pll->pll_base + ctrl->ndiv_int.offset);
387 	ndiv_int = (val >> ctrl->ndiv_int.shift) &
388 		bit_mask(ctrl->ndiv_int.width);
389 	ndiv = (u64)ndiv_int << ctrl->ndiv_int.shift;
390 
391 	if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
392 		val = readl(pll->pll_base + ctrl->ndiv_frac.offset);
393 		ndiv_frac = (val >> ctrl->ndiv_frac.shift) &
394 			bit_mask(ctrl->ndiv_frac.width);
395 
396 		if (ndiv_frac != 0)
397 			ndiv = ((u64)ndiv_int << ctrl->ndiv_int.shift) |
398 				ndiv_frac;
399 	}
400 
401 	val = readl(pll->pll_base + ctrl->pdiv.offset);
402 	pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
403 
404 	clk->rate = (ndiv * parent_rate) >> ctrl->ndiv_int.shift;
405 
406 	if (pdiv == 0)
407 		clk->rate *= 2;
408 	else
409 		clk->rate /= pdiv;
410 
411 	return clk->rate;
412 }
413 
414 static long iproc_pll_round_rate(struct clk_hw *hw, unsigned long rate,
415 				 unsigned long *parent_rate)
416 {
417 	unsigned i;
418 	struct iproc_clk *clk = to_iproc_clk(hw);
419 	struct iproc_pll *pll = clk->pll;
420 
421 	if (rate == 0 || *parent_rate == 0 || !pll->vco_param)
422 		return -EINVAL;
423 
424 	for (i = 0; i < pll->num_vco_entries; i++) {
425 		if (rate <= pll->vco_param[i].rate)
426 			break;
427 	}
428 
429 	if (i == pll->num_vco_entries)
430 		i--;
431 
432 	return pll->vco_param[i].rate;
433 }
434 
435 static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
436 		unsigned long parent_rate)
437 {
438 	struct iproc_clk *clk = to_iproc_clk(hw);
439 	struct iproc_pll *pll = clk->pll;
440 	int rate_index, ret;
441 
442 	rate_index = pll_get_rate_index(pll, rate);
443 	if (rate_index < 0)
444 		return rate_index;
445 
446 	ret = pll_set_rate(clk, rate_index, parent_rate);
447 	return ret;
448 }
449 
450 static const struct clk_ops iproc_pll_ops = {
451 	.enable = iproc_pll_enable,
452 	.disable = iproc_pll_disable,
453 	.recalc_rate = iproc_pll_recalc_rate,
454 	.round_rate = iproc_pll_round_rate,
455 	.set_rate = iproc_pll_set_rate,
456 };
457 
458 static int iproc_clk_enable(struct clk_hw *hw)
459 {
460 	struct iproc_clk *clk = to_iproc_clk(hw);
461 	const struct iproc_clk_ctrl *ctrl = clk->ctrl;
462 	struct iproc_pll *pll = clk->pll;
463 	u32 val;
464 
465 	/* channel enable is active low */
466 	val = readl(pll->pll_base + ctrl->enable.offset);
467 	val &= ~(1 << ctrl->enable.enable_shift);
468 	iproc_pll_write(pll, pll->pll_base, ctrl->enable.offset, val);
469 
470 	/* also make sure channel is not held */
471 	val = readl(pll->pll_base + ctrl->enable.offset);
472 	val &= ~(1 << ctrl->enable.hold_shift);
473 	iproc_pll_write(pll, pll->pll_base, ctrl->enable.offset, val);
474 
475 	return 0;
476 }
477 
478 static void iproc_clk_disable(struct clk_hw *hw)
479 {
480 	struct iproc_clk *clk = to_iproc_clk(hw);
481 	const struct iproc_clk_ctrl *ctrl = clk->ctrl;
482 	struct iproc_pll *pll = clk->pll;
483 	u32 val;
484 
485 	if (ctrl->flags & IPROC_CLK_AON)
486 		return;
487 
488 	val = readl(pll->pll_base + ctrl->enable.offset);
489 	val |= 1 << ctrl->enable.enable_shift;
490 	iproc_pll_write(pll, pll->pll_base, ctrl->enable.offset, val);
491 }
492 
493 static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
494 		unsigned long parent_rate)
495 {
496 	struct iproc_clk *clk = to_iproc_clk(hw);
497 	const struct iproc_clk_ctrl *ctrl = clk->ctrl;
498 	struct iproc_pll *pll = clk->pll;
499 	u32 val;
500 	unsigned int mdiv;
501 
502 	if (parent_rate == 0)
503 		return 0;
504 
505 	val = readl(pll->pll_base + ctrl->mdiv.offset);
506 	mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width);
507 	if (mdiv == 0)
508 		mdiv = 256;
509 
510 	clk->rate = parent_rate / mdiv;
511 
512 	return clk->rate;
513 }
514 
515 static long iproc_clk_round_rate(struct clk_hw *hw, unsigned long rate,
516 		unsigned long *parent_rate)
517 {
518 	unsigned int div;
519 
520 	if (rate == 0 || *parent_rate == 0)
521 		return -EINVAL;
522 
523 	if (rate == *parent_rate)
524 		return *parent_rate;
525 
526 	div = DIV_ROUND_UP(*parent_rate, rate);
527 	if (div < 2)
528 		return *parent_rate;
529 
530 	if (div > 256)
531 		div = 256;
532 
533 	return *parent_rate / div;
534 }
535 
536 static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
537 		unsigned long parent_rate)
538 {
539 	struct iproc_clk *clk = to_iproc_clk(hw);
540 	const struct iproc_clk_ctrl *ctrl = clk->ctrl;
541 	struct iproc_pll *pll = clk->pll;
542 	u32 val;
543 	unsigned int div;
544 
545 	if (rate == 0 || parent_rate == 0)
546 		return -EINVAL;
547 
548 	div = DIV_ROUND_UP(parent_rate, rate);
549 	if (div > 256)
550 		return -EINVAL;
551 
552 	val = readl(pll->pll_base + ctrl->mdiv.offset);
553 	if (div == 256) {
554 		val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
555 	} else {
556 		val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift);
557 		val |= div << ctrl->mdiv.shift;
558 	}
559 	iproc_pll_write(pll, pll->pll_base, ctrl->mdiv.offset, val);
560 	clk->rate = parent_rate / div;
561 
562 	return 0;
563 }
564 
565 static const struct clk_ops iproc_clk_ops = {
566 	.enable = iproc_clk_enable,
567 	.disable = iproc_clk_disable,
568 	.recalc_rate = iproc_clk_recalc_rate,
569 	.round_rate = iproc_clk_round_rate,
570 	.set_rate = iproc_clk_set_rate,
571 };
572 
573 /**
574  * Some PLLs require the PLL SW override bit to be set before changes can be
575  * applied to the PLL
576  */
577 static void iproc_pll_sw_cfg(struct iproc_pll *pll)
578 {
579 	const struct iproc_pll_ctrl *ctrl = pll->ctrl;
580 
581 	if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) {
582 		u32 val;
583 
584 		val = readl(pll->pll_base + ctrl->sw_ctrl.offset);
585 		val |= BIT(ctrl->sw_ctrl.shift);
586 		iproc_pll_write(pll, pll->pll_base, ctrl->sw_ctrl.offset, val);
587 	}
588 }
589 
590 void __init iproc_pll_clk_setup(struct device_node *node,
591 				const struct iproc_pll_ctrl *pll_ctrl,
592 				const struct iproc_pll_vco_param *vco,
593 				unsigned int num_vco_entries,
594 				const struct iproc_clk_ctrl *clk_ctrl,
595 				unsigned int num_clks)
596 {
597 	int i, ret;
598 	struct clk *clk;
599 	struct iproc_pll *pll;
600 	struct iproc_clk *iclk;
601 	struct clk_init_data init;
602 	const char *parent_name;
603 
604 	if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
605 		return;
606 
607 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
608 	if (WARN_ON(!pll))
609 		return;
610 
611 	pll->clk_data.clk_num = num_clks;
612 	pll->clk_data.clks = kcalloc(num_clks, sizeof(*pll->clk_data.clks),
613 				     GFP_KERNEL);
614 	if (WARN_ON(!pll->clk_data.clks))
615 		goto err_clk_data;
616 
617 	pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL);
618 	if (WARN_ON(!pll->clks))
619 		goto err_clks;
620 
621 	pll->pll_base = of_iomap(node, 0);
622 	if (WARN_ON(!pll->pll_base))
623 		goto err_pll_iomap;
624 
625 	/* Some SoCs do not require the pwr_base, thus failing is not fatal */
626 	pll->pwr_base = of_iomap(node, 1);
627 
628 	/* some PLLs require gating control at the top ASIU level */
629 	if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) {
630 		pll->asiu_base = of_iomap(node, 2);
631 		if (WARN_ON(!pll->asiu_base))
632 			goto err_asiu_iomap;
633 	}
634 
635 	/* initialize and register the PLL itself */
636 	pll->ctrl = pll_ctrl;
637 
638 	iclk = &pll->clks[0];
639 	iclk->pll = pll;
640 	iclk->name = node->name;
641 
642 	init.name = node->name;
643 	init.ops = &iproc_pll_ops;
644 	init.flags = 0;
645 	parent_name = of_clk_get_parent_name(node, 0);
646 	init.parent_names = (parent_name ? &parent_name : NULL);
647 	init.num_parents = (parent_name ? 1 : 0);
648 	iclk->hw.init = &init;
649 
650 	if (vco) {
651 		pll->num_vco_entries = num_vco_entries;
652 		pll->vco_param = vco;
653 	}
654 
655 	iproc_pll_sw_cfg(pll);
656 
657 	clk = clk_register(NULL, &iclk->hw);
658 	if (WARN_ON(IS_ERR(clk)))
659 		goto err_pll_register;
660 
661 	pll->clk_data.clks[0] = clk;
662 
663 	/* now initialize and register all leaf clocks */
664 	for (i = 1; i < num_clks; i++) {
665 		const char *clk_name;
666 
667 		memset(&init, 0, sizeof(init));
668 		parent_name = node->name;
669 
670 		ret = of_property_read_string_index(node, "clock-output-names",
671 						    i, &clk_name);
672 		if (WARN_ON(ret))
673 			goto err_clk_register;
674 
675 		iclk = &pll->clks[i];
676 		iclk->name = clk_name;
677 		iclk->pll = pll;
678 		iclk->ctrl = &clk_ctrl[i];
679 
680 		init.name = clk_name;
681 		init.ops = &iproc_clk_ops;
682 		init.flags = 0;
683 		init.parent_names = (parent_name ? &parent_name : NULL);
684 		init.num_parents = (parent_name ? 1 : 0);
685 		iclk->hw.init = &init;
686 
687 		clk = clk_register(NULL, &iclk->hw);
688 		if (WARN_ON(IS_ERR(clk)))
689 			goto err_clk_register;
690 
691 		pll->clk_data.clks[i] = clk;
692 	}
693 
694 	ret = of_clk_add_provider(node, of_clk_src_onecell_get, &pll->clk_data);
695 	if (WARN_ON(ret))
696 		goto err_clk_register;
697 
698 	return;
699 
700 err_clk_register:
701 	for (i = 0; i < num_clks; i++)
702 		clk_unregister(pll->clk_data.clks[i]);
703 
704 err_pll_register:
705 	if (pll->asiu_base)
706 		iounmap(pll->asiu_base);
707 
708 err_asiu_iomap:
709 	if (pll->pwr_base)
710 		iounmap(pll->pwr_base);
711 
712 	iounmap(pll->pll_base);
713 
714 err_pll_iomap:
715 	kfree(pll->clks);
716 
717 err_clks:
718 	kfree(pll->clk_data.clks);
719 
720 err_clk_data:
721 	kfree(pll);
722 }
723