1 /* 2 * Copyright (C) 2014 Broadcom Corporation 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation version 2. 7 * 8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 9 * kind, whether express or implied; without even the implied warranty 10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/kernel.h> 15 #include <linux/err.h> 16 #include <linux/clk-provider.h> 17 #include <linux/io.h> 18 #include <linux/of.h> 19 #include <linux/clkdev.h> 20 #include <linux/of_address.h> 21 #include <linux/delay.h> 22 23 #include "clk-iproc.h" 24 25 #define PLL_VCO_HIGH_SHIFT 19 26 #define PLL_VCO_LOW_SHIFT 30 27 28 /* number of delay loops waiting for PLL to lock */ 29 #define LOCK_DELAY 100 30 31 /* number of VCO frequency bands */ 32 #define NUM_FREQ_BANDS 8 33 34 #define NUM_KP_BANDS 3 35 enum kp_band { 36 KP_BAND_MID = 0, 37 KP_BAND_HIGH, 38 KP_BAND_HIGH_HIGH 39 }; 40 41 static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = { 42 { 5, 6, 6, 7, 7, 8, 9, 10 }, 43 { 4, 4, 5, 5, 6, 7, 8, 9 }, 44 { 4, 5, 5, 6, 7, 8, 9, 10 }, 45 }; 46 47 static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = { 48 { 10000000, 12500000 }, 49 { 12500000, 15000000 }, 50 { 15000000, 20000000 }, 51 { 20000000, 25000000 }, 52 { 25000000, 50000000 }, 53 { 50000000, 75000000 }, 54 { 75000000, 100000000 }, 55 { 100000000, 125000000 }, 56 }; 57 58 enum vco_freq_range { 59 VCO_LOW = 700000000U, 60 VCO_MID = 1200000000U, 61 VCO_HIGH = 2200000000U, 62 VCO_HIGH_HIGH = 3100000000U, 63 VCO_MAX = 4000000000U, 64 }; 65 66 struct iproc_pll; 67 68 struct iproc_clk { 69 struct clk_hw hw; 70 const char *name; 71 struct iproc_pll *pll; 72 unsigned long rate; 73 const struct iproc_clk_ctrl *ctrl; 74 }; 75 76 struct iproc_pll { 77 void __iomem *status_base; 78 void __iomem *control_base; 79 void __iomem *pwr_base; 80 void __iomem *asiu_base; 81 82 const struct iproc_pll_ctrl *ctrl; 83 const struct iproc_pll_vco_param *vco_param; 84 unsigned int num_vco_entries; 85 86 struct clk_onecell_data clk_data; 87 struct iproc_clk *clks; 88 }; 89 90 #define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw) 91 92 /* 93 * Based on the target frequency, find a match from the VCO frequency parameter 94 * table and return its index 95 */ 96 static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate) 97 { 98 int i; 99 100 for (i = 0; i < pll->num_vco_entries; i++) 101 if (target_rate == pll->vco_param[i].rate) 102 break; 103 104 if (i >= pll->num_vco_entries) 105 return -EINVAL; 106 107 return i; 108 } 109 110 static int get_kp(unsigned long ref_freq, enum kp_band kp_index) 111 { 112 int i; 113 114 if (ref_freq < ref_freq_table[0][0]) 115 return -EINVAL; 116 117 for (i = 0; i < NUM_FREQ_BANDS; i++) { 118 if (ref_freq >= ref_freq_table[i][0] && 119 ref_freq < ref_freq_table[i][1]) 120 return kp_table[kp_index][i]; 121 } 122 return -EINVAL; 123 } 124 125 static int pll_wait_for_lock(struct iproc_pll *pll) 126 { 127 int i; 128 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 129 130 for (i = 0; i < LOCK_DELAY; i++) { 131 u32 val = readl(pll->status_base + ctrl->status.offset); 132 133 if (val & (1 << ctrl->status.shift)) 134 return 0; 135 udelay(10); 136 } 137 138 return -EIO; 139 } 140 141 static void iproc_pll_write(const struct iproc_pll *pll, void __iomem *base, 142 const u32 offset, u32 val) 143 { 144 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 145 146 writel(val, base + offset); 147 148 if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK && 149 (base == pll->status_base || base == pll->control_base))) 150 val = readl(base + offset); 151 } 152 153 static void __pll_disable(struct iproc_pll *pll) 154 { 155 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 156 u32 val; 157 158 if (ctrl->flags & IPROC_CLK_PLL_ASIU) { 159 val = readl(pll->asiu_base + ctrl->asiu.offset); 160 val &= ~(1 << ctrl->asiu.en_shift); 161 iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val); 162 } 163 164 if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) { 165 val = readl(pll->control_base + ctrl->aon.offset); 166 val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift; 167 iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val); 168 } 169 170 if (pll->pwr_base) { 171 /* latch input value so core power can be shut down */ 172 val = readl(pll->pwr_base + ctrl->aon.offset); 173 val |= 1 << ctrl->aon.iso_shift; 174 iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val); 175 176 /* power down the core */ 177 val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift); 178 iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val); 179 } 180 } 181 182 static int __pll_enable(struct iproc_pll *pll) 183 { 184 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 185 u32 val; 186 187 if (ctrl->flags & IPROC_CLK_EMBED_PWRCTRL) { 188 val = readl(pll->control_base + ctrl->aon.offset); 189 val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift); 190 iproc_pll_write(pll, pll->control_base, ctrl->aon.offset, val); 191 } 192 193 if (pll->pwr_base) { 194 /* power up the PLL and make sure it's not latched */ 195 val = readl(pll->pwr_base + ctrl->aon.offset); 196 val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift; 197 val &= ~(1 << ctrl->aon.iso_shift); 198 iproc_pll_write(pll, pll->pwr_base, ctrl->aon.offset, val); 199 } 200 201 /* certain PLLs also need to be ungated from the ASIU top level */ 202 if (ctrl->flags & IPROC_CLK_PLL_ASIU) { 203 val = readl(pll->asiu_base + ctrl->asiu.offset); 204 val |= (1 << ctrl->asiu.en_shift); 205 iproc_pll_write(pll, pll->asiu_base, ctrl->asiu.offset, val); 206 } 207 208 return 0; 209 } 210 211 static void __pll_put_in_reset(struct iproc_pll *pll) 212 { 213 u32 val; 214 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 215 const struct iproc_pll_reset_ctrl *reset = &ctrl->reset; 216 217 val = readl(pll->control_base + reset->offset); 218 val &= ~(1 << reset->reset_shift | 1 << reset->p_reset_shift); 219 iproc_pll_write(pll, pll->control_base, reset->offset, val); 220 } 221 222 static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp, 223 unsigned int ka, unsigned int ki) 224 { 225 u32 val; 226 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 227 const struct iproc_pll_reset_ctrl *reset = &ctrl->reset; 228 const struct iproc_pll_dig_filter_ctrl *dig_filter = &ctrl->dig_filter; 229 230 val = readl(pll->control_base + dig_filter->offset); 231 val &= ~(bit_mask(dig_filter->ki_width) << dig_filter->ki_shift | 232 bit_mask(dig_filter->kp_width) << dig_filter->kp_shift | 233 bit_mask(dig_filter->ka_width) << dig_filter->ka_shift); 234 val |= ki << dig_filter->ki_shift | kp << dig_filter->kp_shift | 235 ka << dig_filter->ka_shift; 236 iproc_pll_write(pll, pll->control_base, dig_filter->offset, val); 237 238 val = readl(pll->control_base + reset->offset); 239 val |= 1 << reset->reset_shift | 1 << reset->p_reset_shift; 240 iproc_pll_write(pll, pll->control_base, reset->offset, val); 241 } 242 243 static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index, 244 unsigned long parent_rate) 245 { 246 struct iproc_pll *pll = clk->pll; 247 const struct iproc_pll_vco_param *vco = &pll->vco_param[rate_index]; 248 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 249 int ka = 0, ki, kp, ret; 250 unsigned long rate = vco->rate; 251 u32 val; 252 enum kp_band kp_index; 253 unsigned long ref_freq; 254 255 /* 256 * reference frequency = parent frequency / PDIV 257 * If PDIV = 0, then it becomes a multiplier (x2) 258 */ 259 if (vco->pdiv == 0) 260 ref_freq = parent_rate * 2; 261 else 262 ref_freq = parent_rate / vco->pdiv; 263 264 /* determine Ki and Kp index based on target VCO frequency */ 265 if (rate >= VCO_LOW && rate < VCO_HIGH) { 266 ki = 4; 267 kp_index = KP_BAND_MID; 268 } else if (rate >= VCO_HIGH && rate && rate < VCO_HIGH_HIGH) { 269 ki = 3; 270 kp_index = KP_BAND_HIGH; 271 } else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) { 272 ki = 3; 273 kp_index = KP_BAND_HIGH_HIGH; 274 } else { 275 pr_err("%s: pll: %s has invalid rate: %lu\n", __func__, 276 clk->name, rate); 277 return -EINVAL; 278 } 279 280 kp = get_kp(ref_freq, kp_index); 281 if (kp < 0) { 282 pr_err("%s: pll: %s has invalid kp\n", __func__, clk->name); 283 return kp; 284 } 285 286 ret = __pll_enable(pll); 287 if (ret) { 288 pr_err("%s: pll: %s fails to enable\n", __func__, clk->name); 289 return ret; 290 } 291 292 /* put PLL in reset */ 293 __pll_put_in_reset(pll); 294 295 iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.u_offset, 0); 296 297 val = readl(pll->control_base + ctrl->vco_ctrl.l_offset); 298 299 if (rate >= VCO_LOW && rate < VCO_MID) 300 val |= (1 << PLL_VCO_LOW_SHIFT); 301 302 if (rate < VCO_HIGH) 303 val &= ~(1 << PLL_VCO_HIGH_SHIFT); 304 else 305 val |= (1 << PLL_VCO_HIGH_SHIFT); 306 307 iproc_pll_write(pll, pll->control_base, ctrl->vco_ctrl.l_offset, val); 308 309 /* program integer part of NDIV */ 310 val = readl(pll->control_base + ctrl->ndiv_int.offset); 311 val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift); 312 val |= vco->ndiv_int << ctrl->ndiv_int.shift; 313 iproc_pll_write(pll, pll->control_base, ctrl->ndiv_int.offset, val); 314 315 /* program fractional part of NDIV */ 316 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { 317 val = readl(pll->control_base + ctrl->ndiv_frac.offset); 318 val &= ~(bit_mask(ctrl->ndiv_frac.width) << 319 ctrl->ndiv_frac.shift); 320 val |= vco->ndiv_frac << ctrl->ndiv_frac.shift; 321 iproc_pll_write(pll, pll->control_base, ctrl->ndiv_frac.offset, 322 val); 323 } 324 325 /* program PDIV */ 326 val = readl(pll->control_base + ctrl->pdiv.offset); 327 val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift); 328 val |= vco->pdiv << ctrl->pdiv.shift; 329 iproc_pll_write(pll, pll->control_base, ctrl->pdiv.offset, val); 330 331 __pll_bring_out_reset(pll, kp, ka, ki); 332 333 ret = pll_wait_for_lock(pll); 334 if (ret < 0) { 335 pr_err("%s: pll: %s failed to lock\n", __func__, clk->name); 336 return ret; 337 } 338 339 return 0; 340 } 341 342 static int iproc_pll_enable(struct clk_hw *hw) 343 { 344 struct iproc_clk *clk = to_iproc_clk(hw); 345 struct iproc_pll *pll = clk->pll; 346 347 return __pll_enable(pll); 348 } 349 350 static void iproc_pll_disable(struct clk_hw *hw) 351 { 352 struct iproc_clk *clk = to_iproc_clk(hw); 353 struct iproc_pll *pll = clk->pll; 354 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 355 356 if (ctrl->flags & IPROC_CLK_AON) 357 return; 358 359 __pll_disable(pll); 360 } 361 362 static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw, 363 unsigned long parent_rate) 364 { 365 struct iproc_clk *clk = to_iproc_clk(hw); 366 struct iproc_pll *pll = clk->pll; 367 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 368 u32 val; 369 u64 ndiv, ndiv_int, ndiv_frac; 370 unsigned int pdiv; 371 372 if (parent_rate == 0) 373 return 0; 374 375 /* PLL needs to be locked */ 376 val = readl(pll->status_base + ctrl->status.offset); 377 if ((val & (1 << ctrl->status.shift)) == 0) { 378 clk->rate = 0; 379 return 0; 380 } 381 382 /* 383 * PLL output frequency = 384 * 385 * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv) 386 */ 387 val = readl(pll->control_base + ctrl->ndiv_int.offset); 388 ndiv_int = (val >> ctrl->ndiv_int.shift) & 389 bit_mask(ctrl->ndiv_int.width); 390 ndiv = ndiv_int << 20; 391 392 if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { 393 val = readl(pll->control_base + ctrl->ndiv_frac.offset); 394 ndiv_frac = (val >> ctrl->ndiv_frac.shift) & 395 bit_mask(ctrl->ndiv_frac.width); 396 ndiv += ndiv_frac; 397 } 398 399 val = readl(pll->control_base + ctrl->pdiv.offset); 400 pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width); 401 402 clk->rate = (ndiv * parent_rate) >> 20; 403 404 if (pdiv == 0) 405 clk->rate *= 2; 406 else 407 clk->rate /= pdiv; 408 409 return clk->rate; 410 } 411 412 static long iproc_pll_round_rate(struct clk_hw *hw, unsigned long rate, 413 unsigned long *parent_rate) 414 { 415 unsigned i; 416 struct iproc_clk *clk = to_iproc_clk(hw); 417 struct iproc_pll *pll = clk->pll; 418 419 if (rate == 0 || *parent_rate == 0 || !pll->vco_param) 420 return -EINVAL; 421 422 for (i = 0; i < pll->num_vco_entries; i++) { 423 if (rate <= pll->vco_param[i].rate) 424 break; 425 } 426 427 if (i == pll->num_vco_entries) 428 i--; 429 430 return pll->vco_param[i].rate; 431 } 432 433 static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate, 434 unsigned long parent_rate) 435 { 436 struct iproc_clk *clk = to_iproc_clk(hw); 437 struct iproc_pll *pll = clk->pll; 438 int rate_index, ret; 439 440 rate_index = pll_get_rate_index(pll, rate); 441 if (rate_index < 0) 442 return rate_index; 443 444 ret = pll_set_rate(clk, rate_index, parent_rate); 445 return ret; 446 } 447 448 static const struct clk_ops iproc_pll_ops = { 449 .enable = iproc_pll_enable, 450 .disable = iproc_pll_disable, 451 .recalc_rate = iproc_pll_recalc_rate, 452 .round_rate = iproc_pll_round_rate, 453 .set_rate = iproc_pll_set_rate, 454 }; 455 456 static int iproc_clk_enable(struct clk_hw *hw) 457 { 458 struct iproc_clk *clk = to_iproc_clk(hw); 459 const struct iproc_clk_ctrl *ctrl = clk->ctrl; 460 struct iproc_pll *pll = clk->pll; 461 u32 val; 462 463 /* channel enable is active low */ 464 val = readl(pll->control_base + ctrl->enable.offset); 465 val &= ~(1 << ctrl->enable.enable_shift); 466 iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val); 467 468 /* also make sure channel is not held */ 469 val = readl(pll->control_base + ctrl->enable.offset); 470 val &= ~(1 << ctrl->enable.hold_shift); 471 iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val); 472 473 return 0; 474 } 475 476 static void iproc_clk_disable(struct clk_hw *hw) 477 { 478 struct iproc_clk *clk = to_iproc_clk(hw); 479 const struct iproc_clk_ctrl *ctrl = clk->ctrl; 480 struct iproc_pll *pll = clk->pll; 481 u32 val; 482 483 if (ctrl->flags & IPROC_CLK_AON) 484 return; 485 486 val = readl(pll->control_base + ctrl->enable.offset); 487 val |= 1 << ctrl->enable.enable_shift; 488 iproc_pll_write(pll, pll->control_base, ctrl->enable.offset, val); 489 } 490 491 static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw, 492 unsigned long parent_rate) 493 { 494 struct iproc_clk *clk = to_iproc_clk(hw); 495 const struct iproc_clk_ctrl *ctrl = clk->ctrl; 496 struct iproc_pll *pll = clk->pll; 497 u32 val; 498 unsigned int mdiv; 499 500 if (parent_rate == 0) 501 return 0; 502 503 val = readl(pll->control_base + ctrl->mdiv.offset); 504 mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width); 505 if (mdiv == 0) 506 mdiv = 256; 507 508 clk->rate = parent_rate / mdiv; 509 510 return clk->rate; 511 } 512 513 static long iproc_clk_round_rate(struct clk_hw *hw, unsigned long rate, 514 unsigned long *parent_rate) 515 { 516 unsigned int div; 517 518 if (rate == 0 || *parent_rate == 0) 519 return -EINVAL; 520 521 if (rate == *parent_rate) 522 return *parent_rate; 523 524 div = DIV_ROUND_UP(*parent_rate, rate); 525 if (div < 2) 526 return *parent_rate; 527 528 if (div > 256) 529 div = 256; 530 531 return *parent_rate / div; 532 } 533 534 static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate, 535 unsigned long parent_rate) 536 { 537 struct iproc_clk *clk = to_iproc_clk(hw); 538 const struct iproc_clk_ctrl *ctrl = clk->ctrl; 539 struct iproc_pll *pll = clk->pll; 540 u32 val; 541 unsigned int div; 542 543 if (rate == 0 || parent_rate == 0) 544 return -EINVAL; 545 546 div = DIV_ROUND_UP(parent_rate, rate); 547 if (div > 256) 548 return -EINVAL; 549 550 val = readl(pll->control_base + ctrl->mdiv.offset); 551 if (div == 256) { 552 val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift); 553 } else { 554 val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift); 555 val |= div << ctrl->mdiv.shift; 556 } 557 iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val); 558 clk->rate = parent_rate / div; 559 560 return 0; 561 } 562 563 static const struct clk_ops iproc_clk_ops = { 564 .enable = iproc_clk_enable, 565 .disable = iproc_clk_disable, 566 .recalc_rate = iproc_clk_recalc_rate, 567 .round_rate = iproc_clk_round_rate, 568 .set_rate = iproc_clk_set_rate, 569 }; 570 571 /** 572 * Some PLLs require the PLL SW override bit to be set before changes can be 573 * applied to the PLL 574 */ 575 static void iproc_pll_sw_cfg(struct iproc_pll *pll) 576 { 577 const struct iproc_pll_ctrl *ctrl = pll->ctrl; 578 579 if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) { 580 u32 val; 581 582 val = readl(pll->control_base + ctrl->sw_ctrl.offset); 583 val |= BIT(ctrl->sw_ctrl.shift); 584 iproc_pll_write(pll, pll->control_base, ctrl->sw_ctrl.offset, 585 val); 586 } 587 } 588 589 void __init iproc_pll_clk_setup(struct device_node *node, 590 const struct iproc_pll_ctrl *pll_ctrl, 591 const struct iproc_pll_vco_param *vco, 592 unsigned int num_vco_entries, 593 const struct iproc_clk_ctrl *clk_ctrl, 594 unsigned int num_clks) 595 { 596 int i, ret; 597 struct clk *clk; 598 struct iproc_pll *pll; 599 struct iproc_clk *iclk; 600 struct clk_init_data init; 601 const char *parent_name; 602 603 if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl)) 604 return; 605 606 pll = kzalloc(sizeof(*pll), GFP_KERNEL); 607 if (WARN_ON(!pll)) 608 return; 609 610 pll->clk_data.clk_num = num_clks; 611 pll->clk_data.clks = kcalloc(num_clks, sizeof(*pll->clk_data.clks), 612 GFP_KERNEL); 613 if (WARN_ON(!pll->clk_data.clks)) 614 goto err_clk_data; 615 616 pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL); 617 if (WARN_ON(!pll->clks)) 618 goto err_clks; 619 620 pll->control_base = of_iomap(node, 0); 621 if (WARN_ON(!pll->control_base)) 622 goto err_pll_iomap; 623 624 /* Some SoCs do not require the pwr_base, thus failing is not fatal */ 625 pll->pwr_base = of_iomap(node, 1); 626 627 /* some PLLs require gating control at the top ASIU level */ 628 if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) { 629 pll->asiu_base = of_iomap(node, 2); 630 if (WARN_ON(!pll->asiu_base)) 631 goto err_asiu_iomap; 632 } 633 634 if (pll_ctrl->flags & IPROC_CLK_PLL_SPLIT_STAT_CTRL) { 635 /* Some SoCs have a split status/control. If this does not 636 * exist, assume they are unified. 637 */ 638 pll->status_base = of_iomap(node, 2); 639 if (!pll->status_base) 640 goto err_status_iomap; 641 } else 642 pll->status_base = pll->control_base; 643 644 /* initialize and register the PLL itself */ 645 pll->ctrl = pll_ctrl; 646 647 iclk = &pll->clks[0]; 648 iclk->pll = pll; 649 iclk->name = node->name; 650 651 init.name = node->name; 652 init.ops = &iproc_pll_ops; 653 init.flags = 0; 654 parent_name = of_clk_get_parent_name(node, 0); 655 init.parent_names = (parent_name ? &parent_name : NULL); 656 init.num_parents = (parent_name ? 1 : 0); 657 iclk->hw.init = &init; 658 659 if (vco) { 660 pll->num_vco_entries = num_vco_entries; 661 pll->vco_param = vco; 662 } 663 664 iproc_pll_sw_cfg(pll); 665 666 clk = clk_register(NULL, &iclk->hw); 667 if (WARN_ON(IS_ERR(clk))) 668 goto err_pll_register; 669 670 pll->clk_data.clks[0] = clk; 671 672 /* now initialize and register all leaf clocks */ 673 for (i = 1; i < num_clks; i++) { 674 const char *clk_name; 675 676 memset(&init, 0, sizeof(init)); 677 parent_name = node->name; 678 679 ret = of_property_read_string_index(node, "clock-output-names", 680 i, &clk_name); 681 if (WARN_ON(ret)) 682 goto err_clk_register; 683 684 iclk = &pll->clks[i]; 685 iclk->name = clk_name; 686 iclk->pll = pll; 687 iclk->ctrl = &clk_ctrl[i]; 688 689 init.name = clk_name; 690 init.ops = &iproc_clk_ops; 691 init.flags = 0; 692 init.parent_names = (parent_name ? &parent_name : NULL); 693 init.num_parents = (parent_name ? 1 : 0); 694 iclk->hw.init = &init; 695 696 clk = clk_register(NULL, &iclk->hw); 697 if (WARN_ON(IS_ERR(clk))) 698 goto err_clk_register; 699 700 pll->clk_data.clks[i] = clk; 701 } 702 703 ret = of_clk_add_provider(node, of_clk_src_onecell_get, &pll->clk_data); 704 if (WARN_ON(ret)) 705 goto err_clk_register; 706 707 return; 708 709 err_clk_register: 710 for (i = 0; i < num_clks; i++) 711 clk_unregister(pll->clk_data.clks[i]); 712 713 err_pll_register: 714 if (pll->status_base != pll->control_base) 715 iounmap(pll->status_base); 716 717 err_status_iomap: 718 if (pll->asiu_base) 719 iounmap(pll->asiu_base); 720 721 err_asiu_iomap: 722 if (pll->pwr_base) 723 iounmap(pll->pwr_base); 724 725 iounmap(pll->control_base); 726 727 err_pll_iomap: 728 kfree(pll->clks); 729 730 err_clks: 731 kfree(pll->clk_data.clks); 732 733 err_clk_data: 734 kfree(pll); 735 } 736