1 /* 2 * OMAP3/4 - specific DPLL control functions 3 * 4 * Copyright (C) 2009-2010 Texas Instruments, Inc. 5 * Copyright (C) 2009-2010 Nokia Corporation 6 * 7 * Written by Paul Walmsley 8 * Testing and integration fixes by Jouni Högander 9 * 10 * 36xx support added by Vishwanath BS, Richard Woodruff, and Nishanth 11 * Menon 12 * 13 * Parts of this code are based on code written by 14 * Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu 15 * 16 * This program is free software; you can redistribute it and/or modify 17 * it under the terms of the GNU General Public License version 2 as 18 * published by the Free Software Foundation. 19 */ 20 21 #include <linux/kernel.h> 22 #include <linux/device.h> 23 #include <linux/list.h> 24 #include <linux/errno.h> 25 #include <linux/delay.h> 26 #include <linux/clk.h> 27 #include <linux/io.h> 28 #include <linux/bitops.h> 29 #include <linux/clkdev.h> 30 #include <linux/clk/ti.h> 31 32 #include "clock.h" 33 34 /* CM_AUTOIDLE_PLL*.AUTO_* bit values */ 35 #define DPLL_AUTOIDLE_DISABLE 0x0 36 #define DPLL_AUTOIDLE_LOW_POWER_STOP 0x1 37 38 #define MAX_DPLL_WAIT_TRIES 1000000 39 40 #define OMAP3XXX_EN_DPLL_LOCKED 0x7 41 42 /* Forward declarations */ 43 static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk); 44 static void omap3_dpll_deny_idle(struct clk_hw_omap *clk); 45 static void omap3_dpll_allow_idle(struct clk_hw_omap *clk); 46 47 /* Private functions */ 48 49 /* _omap3_dpll_write_clken - write clken_bits arg to a DPLL's enable bits */ 50 static void _omap3_dpll_write_clken(struct clk_hw_omap *clk, u8 clken_bits) 51 { 52 const struct dpll_data *dd; 53 u32 v; 54 55 dd = clk->dpll_data; 56 57 v = ti_clk_ll_ops->clk_readl(&dd->control_reg); 58 v &= ~dd->enable_mask; 59 v |= clken_bits << __ffs(dd->enable_mask); 60 ti_clk_ll_ops->clk_writel(v, &dd->control_reg); 61 } 62 63 /* _omap3_wait_dpll_status: wait for a DPLL to enter a specific state */ 64 static int _omap3_wait_dpll_status(struct clk_hw_omap *clk, u8 state) 65 { 66 const struct dpll_data *dd; 67 int i = 0; 68 int ret = -EINVAL; 69 const char *clk_name; 70 71 dd = clk->dpll_data; 72 clk_name = clk_hw_get_name(&clk->hw); 73 74 state <<= __ffs(dd->idlest_mask); 75 76 while (((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) 77 != state) && i < MAX_DPLL_WAIT_TRIES) { 78 i++; 79 udelay(1); 80 } 81 82 if (i == MAX_DPLL_WAIT_TRIES) { 83 pr_err("clock: %s failed transition to '%s'\n", 84 clk_name, (state) ? "locked" : "bypassed"); 85 } else { 86 pr_debug("clock: %s transition to '%s' in %d loops\n", 87 clk_name, (state) ? "locked" : "bypassed", i); 88 89 ret = 0; 90 } 91 92 return ret; 93 } 94 95 /* From 3430 TRM ES2 4.7.6.2 */ 96 static u16 _omap3_dpll_compute_freqsel(struct clk_hw_omap *clk, u8 n) 97 { 98 unsigned long fint; 99 u16 f = 0; 100 101 fint = clk_hw_get_rate(clk->dpll_data->clk_ref) / n; 102 103 pr_debug("clock: fint is %lu\n", fint); 104 105 if (fint >= 750000 && fint <= 1000000) 106 f = 0x3; 107 else if (fint > 1000000 && fint <= 1250000) 108 f = 0x4; 109 else if (fint > 1250000 && fint <= 1500000) 110 f = 0x5; 111 else if (fint > 1500000 && fint <= 1750000) 112 f = 0x6; 113 else if (fint > 1750000 && fint <= 2100000) 114 f = 0x7; 115 else if (fint > 7500000 && fint <= 10000000) 116 f = 0xB; 117 else if (fint > 10000000 && fint <= 12500000) 118 f = 0xC; 119 else if (fint > 12500000 && fint <= 15000000) 120 f = 0xD; 121 else if (fint > 15000000 && fint <= 17500000) 122 f = 0xE; 123 else if (fint > 17500000 && fint <= 21000000) 124 f = 0xF; 125 else 126 pr_debug("clock: unknown freqsel setting for %d\n", n); 127 128 return f; 129 } 130 131 /* 132 * _omap3_noncore_dpll_lock - instruct a DPLL to lock and wait for readiness 133 * @clk: pointer to a DPLL struct clk 134 * 135 * Instructs a non-CORE DPLL to lock. Waits for the DPLL to report 136 * readiness before returning. Will save and restore the DPLL's 137 * autoidle state across the enable, per the CDP code. If the DPLL 138 * locked successfully, return 0; if the DPLL did not lock in the time 139 * allotted, or DPLL3 was passed in, return -EINVAL. 140 */ 141 static int _omap3_noncore_dpll_lock(struct clk_hw_omap *clk) 142 { 143 const struct dpll_data *dd; 144 u8 ai; 145 u8 state = 1; 146 int r = 0; 147 148 pr_debug("clock: locking DPLL %s\n", clk_hw_get_name(&clk->hw)); 149 150 dd = clk->dpll_data; 151 state <<= __ffs(dd->idlest_mask); 152 153 /* Check if already locked */ 154 if ((ti_clk_ll_ops->clk_readl(&dd->idlest_reg) & dd->idlest_mask) == 155 state) 156 goto done; 157 158 ai = omap3_dpll_autoidle_read(clk); 159 160 if (ai) 161 omap3_dpll_deny_idle(clk); 162 163 _omap3_dpll_write_clken(clk, DPLL_LOCKED); 164 165 r = _omap3_wait_dpll_status(clk, 1); 166 167 if (ai) 168 omap3_dpll_allow_idle(clk); 169 170 done: 171 return r; 172 } 173 174 /* 175 * _omap3_noncore_dpll_bypass - instruct a DPLL to bypass and wait for readiness 176 * @clk: pointer to a DPLL struct clk 177 * 178 * Instructs a non-CORE DPLL to enter low-power bypass mode. In 179 * bypass mode, the DPLL's rate is set equal to its parent clock's 180 * rate. Waits for the DPLL to report readiness before returning. 181 * Will save and restore the DPLL's autoidle state across the enable, 182 * per the CDP code. If the DPLL entered bypass mode successfully, 183 * return 0; if the DPLL did not enter bypass in the time allotted, or 184 * DPLL3 was passed in, or the DPLL does not support low-power bypass, 185 * return -EINVAL. 186 */ 187 static int _omap3_noncore_dpll_bypass(struct clk_hw_omap *clk) 188 { 189 int r; 190 u8 ai; 191 192 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_BYPASS))) 193 return -EINVAL; 194 195 pr_debug("clock: configuring DPLL %s for low-power bypass\n", 196 clk_hw_get_name(&clk->hw)); 197 198 ai = omap3_dpll_autoidle_read(clk); 199 200 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_BYPASS); 201 202 r = _omap3_wait_dpll_status(clk, 0); 203 204 if (ai) 205 omap3_dpll_allow_idle(clk); 206 207 return r; 208 } 209 210 /* 211 * _omap3_noncore_dpll_stop - instruct a DPLL to stop 212 * @clk: pointer to a DPLL struct clk 213 * 214 * Instructs a non-CORE DPLL to enter low-power stop. Will save and 215 * restore the DPLL's autoidle state across the stop, per the CDP 216 * code. If DPLL3 was passed in, or the DPLL does not support 217 * low-power stop, return -EINVAL; otherwise, return 0. 218 */ 219 static int _omap3_noncore_dpll_stop(struct clk_hw_omap *clk) 220 { 221 u8 ai; 222 223 if (!(clk->dpll_data->modes & (1 << DPLL_LOW_POWER_STOP))) 224 return -EINVAL; 225 226 pr_debug("clock: stopping DPLL %s\n", clk_hw_get_name(&clk->hw)); 227 228 ai = omap3_dpll_autoidle_read(clk); 229 230 _omap3_dpll_write_clken(clk, DPLL_LOW_POWER_STOP); 231 232 if (ai) 233 omap3_dpll_allow_idle(clk); 234 235 return 0; 236 } 237 238 /** 239 * _lookup_dco - Lookup DCO used by j-type DPLL 240 * @clk: pointer to a DPLL struct clk 241 * @dco: digital control oscillator selector 242 * @m: DPLL multiplier to set 243 * @n: DPLL divider to set 244 * 245 * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" 246 * 247 * XXX This code is not needed for 3430/AM35xx; can it be optimized 248 * out in non-multi-OMAP builds for those chips? 249 */ 250 static void _lookup_dco(struct clk_hw_omap *clk, u8 *dco, u16 m, u8 n) 251 { 252 unsigned long fint, clkinp; /* watch out for overflow */ 253 254 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); 255 fint = (clkinp / n) * m; 256 257 if (fint < 1000000000) 258 *dco = 2; 259 else 260 *dco = 4; 261 } 262 263 /** 264 * _lookup_sddiv - Calculate sigma delta divider for j-type DPLL 265 * @clk: pointer to a DPLL struct clk 266 * @sd_div: target sigma-delta divider 267 * @m: DPLL multiplier to set 268 * @n: DPLL divider to set 269 * 270 * See 36xx TRM section 3.5.3.3.3.2 "Type B DPLL (Low-Jitter)" 271 * 272 * XXX This code is not needed for 3430/AM35xx; can it be optimized 273 * out in non-multi-OMAP builds for those chips? 274 */ 275 static void _lookup_sddiv(struct clk_hw_omap *clk, u8 *sd_div, u16 m, u8 n) 276 { 277 unsigned long clkinp, sd; /* watch out for overflow */ 278 int mod1, mod2; 279 280 clkinp = clk_hw_get_rate(clk_hw_get_parent(&clk->hw)); 281 282 /* 283 * target sigma-delta to near 250MHz 284 * sd = ceil[(m/(n+1)) * (clkinp_MHz / 250)] 285 */ 286 clkinp /= 100000; /* shift from MHz to 10*Hz for 38.4 and 19.2 */ 287 mod1 = (clkinp * m) % (250 * n); 288 sd = (clkinp * m) / (250 * n); 289 mod2 = sd % 10; 290 sd /= 10; 291 292 if (mod1 || mod2) 293 sd++; 294 *sd_div = sd; 295 } 296 297 /* 298 * _omap3_noncore_dpll_program - set non-core DPLL M,N values directly 299 * @clk: struct clk * of DPLL to set 300 * @freqsel: FREQSEL value to set 301 * 302 * Program the DPLL with the last M, N values calculated, and wait for 303 * the DPLL to lock. Returns -EINVAL upon error, or 0 upon success. 304 */ 305 static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) 306 { 307 struct dpll_data *dd = clk->dpll_data; 308 u8 dco, sd_div, ai = 0; 309 u32 v; 310 bool errata_i810; 311 312 /* 3430 ES2 TRM: 4.7.6.9 DPLL Programming Sequence */ 313 _omap3_noncore_dpll_bypass(clk); 314 315 /* 316 * Set jitter correction. Jitter correction applicable for OMAP343X 317 * only since freqsel field is no longer present on other devices. 318 */ 319 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { 320 v = ti_clk_ll_ops->clk_readl(&dd->control_reg); 321 v &= ~dd->freqsel_mask; 322 v |= freqsel << __ffs(dd->freqsel_mask); 323 ti_clk_ll_ops->clk_writel(v, &dd->control_reg); 324 } 325 326 /* Set DPLL multiplier, divider */ 327 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); 328 329 /* Handle Duty Cycle Correction */ 330 if (dd->dcc_mask) { 331 if (dd->last_rounded_rate >= dd->dcc_rate) 332 v |= dd->dcc_mask; /* Enable DCC */ 333 else 334 v &= ~dd->dcc_mask; /* Disable DCC */ 335 } 336 337 v &= ~(dd->mult_mask | dd->div1_mask); 338 v |= dd->last_rounded_m << __ffs(dd->mult_mask); 339 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); 340 341 /* Configure dco and sd_div for dplls that have these fields */ 342 if (dd->dco_mask) { 343 _lookup_dco(clk, &dco, dd->last_rounded_m, dd->last_rounded_n); 344 v &= ~(dd->dco_mask); 345 v |= dco << __ffs(dd->dco_mask); 346 } 347 if (dd->sddiv_mask) { 348 _lookup_sddiv(clk, &sd_div, dd->last_rounded_m, 349 dd->last_rounded_n); 350 v &= ~(dd->sddiv_mask); 351 v |= sd_div << __ffs(dd->sddiv_mask); 352 } 353 354 /* 355 * Errata i810 - DPLL controller can get stuck while transitioning 356 * to a power saving state. Software must ensure the DPLL can not 357 * transition to a low power state while changing M/N values. 358 * Easiest way to accomplish this is to prevent DPLL autoidle 359 * before doing the M/N re-program. 360 */ 361 errata_i810 = ti_clk_get_features()->flags & TI_CLK_ERRATA_I810; 362 363 if (errata_i810) { 364 ai = omap3_dpll_autoidle_read(clk); 365 if (ai) { 366 omap3_dpll_deny_idle(clk); 367 368 /* OCP barrier */ 369 omap3_dpll_autoidle_read(clk); 370 } 371 } 372 373 ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg); 374 375 /* Set 4X multiplier and low-power mode */ 376 if (dd->m4xen_mask || dd->lpmode_mask) { 377 v = ti_clk_ll_ops->clk_readl(&dd->control_reg); 378 379 if (dd->m4xen_mask) { 380 if (dd->last_rounded_m4xen) 381 v |= dd->m4xen_mask; 382 else 383 v &= ~dd->m4xen_mask; 384 } 385 386 if (dd->lpmode_mask) { 387 if (dd->last_rounded_lpmode) 388 v |= dd->lpmode_mask; 389 else 390 v &= ~dd->lpmode_mask; 391 } 392 393 ti_clk_ll_ops->clk_writel(v, &dd->control_reg); 394 } 395 396 /* We let the clock framework set the other output dividers later */ 397 398 /* REVISIT: Set ramp-up delay? */ 399 400 _omap3_noncore_dpll_lock(clk); 401 402 if (errata_i810 && ai) 403 omap3_dpll_allow_idle(clk); 404 405 return 0; 406 } 407 408 /* Public functions */ 409 410 /** 411 * omap3_dpll_recalc - recalculate DPLL rate 412 * @clk: DPLL struct clk 413 * 414 * Recalculate and propagate the DPLL rate. 415 */ 416 unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate) 417 { 418 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 419 420 return omap2_get_dpll_rate(clk); 421 } 422 423 /* Non-CORE DPLL (e.g., DPLLs that do not control SDRC) clock functions */ 424 425 /** 426 * omap3_noncore_dpll_enable - instruct a DPLL to enter bypass or lock mode 427 * @clk: pointer to a DPLL struct clk 428 * 429 * Instructs a non-CORE DPLL to enable, e.g., to enter bypass or lock. 430 * The choice of modes depends on the DPLL's programmed rate: if it is 431 * the same as the DPLL's parent clock, it will enter bypass; 432 * otherwise, it will enter lock. This code will wait for the DPLL to 433 * indicate readiness before returning, unless the DPLL takes too long 434 * to enter the target state. Intended to be used as the struct clk's 435 * enable function. If DPLL3 was passed in, or the DPLL does not 436 * support low-power stop, or if the DPLL took too long to enter 437 * bypass or lock, return -EINVAL; otherwise, return 0. 438 */ 439 int omap3_noncore_dpll_enable(struct clk_hw *hw) 440 { 441 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 442 int r; 443 struct dpll_data *dd; 444 struct clk_hw *parent; 445 446 dd = clk->dpll_data; 447 if (!dd) 448 return -EINVAL; 449 450 if (clk->clkdm) { 451 r = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk); 452 if (r) { 453 WARN(1, 454 "%s: could not enable %s's clockdomain %s: %d\n", 455 __func__, clk_hw_get_name(hw), 456 clk->clkdm_name, r); 457 return r; 458 } 459 } 460 461 parent = clk_hw_get_parent(hw); 462 463 if (clk_hw_get_rate(hw) == clk_hw_get_rate(dd->clk_bypass)) { 464 WARN_ON(parent != dd->clk_bypass); 465 r = _omap3_noncore_dpll_bypass(clk); 466 } else { 467 WARN_ON(parent != dd->clk_ref); 468 r = _omap3_noncore_dpll_lock(clk); 469 } 470 471 return r; 472 } 473 474 /** 475 * omap3_noncore_dpll_disable - instruct a DPLL to enter low-power stop 476 * @clk: pointer to a DPLL struct clk 477 * 478 * Instructs a non-CORE DPLL to enter low-power stop. This function is 479 * intended for use in struct clkops. No return value. 480 */ 481 void omap3_noncore_dpll_disable(struct clk_hw *hw) 482 { 483 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 484 485 _omap3_noncore_dpll_stop(clk); 486 if (clk->clkdm) 487 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk); 488 } 489 490 /* Non-CORE DPLL rate set code */ 491 492 /** 493 * omap3_noncore_dpll_determine_rate - determine rate for a DPLL 494 * @hw: pointer to the clock to determine rate for 495 * @req: target rate request 496 * 497 * Determines which DPLL mode to use for reaching a desired target rate. 498 * Checks whether the DPLL shall be in bypass or locked mode, and if 499 * locked, calculates the M,N values for the DPLL via round-rate. 500 * Returns a 0 on success, negative error value in failure. 501 */ 502 int omap3_noncore_dpll_determine_rate(struct clk_hw *hw, 503 struct clk_rate_request *req) 504 { 505 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 506 struct dpll_data *dd; 507 508 if (!req->rate) 509 return -EINVAL; 510 511 dd = clk->dpll_data; 512 if (!dd) 513 return -EINVAL; 514 515 if (clk_hw_get_rate(dd->clk_bypass) == req->rate && 516 (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) { 517 req->best_parent_hw = dd->clk_bypass; 518 } else { 519 req->rate = omap2_dpll_round_rate(hw, req->rate, 520 &req->best_parent_rate); 521 req->best_parent_hw = dd->clk_ref; 522 } 523 524 req->best_parent_rate = req->rate; 525 526 return 0; 527 } 528 529 /** 530 * omap3_noncore_dpll_set_parent - set parent for a DPLL clock 531 * @hw: pointer to the clock to set parent for 532 * @index: parent index to select 533 * 534 * Sets parent for a DPLL clock. This sets the DPLL into bypass or 535 * locked mode. Returns 0 with success, negative error value otherwise. 536 */ 537 int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index) 538 { 539 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 540 int ret; 541 542 if (!hw) 543 return -EINVAL; 544 545 if (index) 546 ret = _omap3_noncore_dpll_bypass(clk); 547 else 548 ret = _omap3_noncore_dpll_lock(clk); 549 550 return ret; 551 } 552 553 /** 554 * omap3_noncore_dpll_set_rate - set rate for a DPLL clock 555 * @hw: pointer to the clock to set parent for 556 * @rate: target rate for the clock 557 * @parent_rate: rate of the parent clock 558 * 559 * Sets rate for a DPLL clock. First checks if the clock parent is 560 * reference clock (in bypass mode, the rate of the clock can't be 561 * changed) and proceeds with the rate change operation. Returns 0 562 * with success, negative error value otherwise. 563 */ 564 int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, 565 unsigned long parent_rate) 566 { 567 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 568 struct dpll_data *dd; 569 u16 freqsel = 0; 570 int ret; 571 572 if (!hw || !rate) 573 return -EINVAL; 574 575 dd = clk->dpll_data; 576 if (!dd) 577 return -EINVAL; 578 579 if (clk_hw_get_parent(hw) != dd->clk_ref) 580 return -EINVAL; 581 582 if (dd->last_rounded_rate == 0) 583 return -EINVAL; 584 585 /* Freqsel is available only on OMAP343X devices */ 586 if (ti_clk_get_features()->flags & TI_CLK_DPLL_HAS_FREQSEL) { 587 freqsel = _omap3_dpll_compute_freqsel(clk, dd->last_rounded_n); 588 WARN_ON(!freqsel); 589 } 590 591 pr_debug("%s: %s: set rate: locking rate to %lu.\n", __func__, 592 clk_hw_get_name(hw), rate); 593 594 ret = omap3_noncore_dpll_program(clk, freqsel); 595 596 return ret; 597 } 598 599 /** 600 * omap3_noncore_dpll_set_rate_and_parent - set rate and parent for a DPLL clock 601 * @hw: pointer to the clock to set rate and parent for 602 * @rate: target rate for the DPLL 603 * @parent_rate: clock rate of the DPLL parent 604 * @index: new parent index for the DPLL, 0 - reference, 1 - bypass 605 * 606 * Sets rate and parent for a DPLL clock. If new parent is the bypass 607 * clock, only selects the parent. Otherwise proceeds with a rate 608 * change, as this will effectively also change the parent as the 609 * DPLL is put into locked mode. Returns 0 with success, negative error 610 * value otherwise. 611 */ 612 int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, 613 unsigned long rate, 614 unsigned long parent_rate, 615 u8 index) 616 { 617 int ret; 618 619 if (!hw || !rate) 620 return -EINVAL; 621 622 /* 623 * clk-ref at index[0], in which case we only need to set rate, 624 * the parent will be changed automatically with the lock sequence. 625 * With clk-bypass case we only need to change parent. 626 */ 627 if (index) 628 ret = omap3_noncore_dpll_set_parent(hw, index); 629 else 630 ret = omap3_noncore_dpll_set_rate(hw, rate, parent_rate); 631 632 return ret; 633 } 634 635 /* DPLL autoidle read/set code */ 636 637 /** 638 * omap3_dpll_autoidle_read - read a DPLL's autoidle bits 639 * @clk: struct clk * of the DPLL to read 640 * 641 * Return the DPLL's autoidle bits, shifted down to bit 0. Returns 642 * -EINVAL if passed a null pointer or if the struct clk does not 643 * appear to refer to a DPLL. 644 */ 645 static u32 omap3_dpll_autoidle_read(struct clk_hw_omap *clk) 646 { 647 const struct dpll_data *dd; 648 u32 v; 649 650 if (!clk || !clk->dpll_data) 651 return -EINVAL; 652 653 dd = clk->dpll_data; 654 655 if (!dd->autoidle_mask) 656 return -EINVAL; 657 658 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); 659 v &= dd->autoidle_mask; 660 v >>= __ffs(dd->autoidle_mask); 661 662 return v; 663 } 664 665 /** 666 * omap3_dpll_allow_idle - enable DPLL autoidle bits 667 * @clk: struct clk * of the DPLL to operate on 668 * 669 * Enable DPLL automatic idle control. This automatic idle mode 670 * switching takes effect only when the DPLL is locked, at least on 671 * OMAP3430. The DPLL will enter low-power stop when its downstream 672 * clocks are gated. No return value. 673 */ 674 static void omap3_dpll_allow_idle(struct clk_hw_omap *clk) 675 { 676 const struct dpll_data *dd; 677 u32 v; 678 679 if (!clk || !clk->dpll_data) 680 return; 681 682 dd = clk->dpll_data; 683 684 if (!dd->autoidle_mask) 685 return; 686 687 /* 688 * REVISIT: CORE DPLL can optionally enter low-power bypass 689 * by writing 0x5 instead of 0x1. Add some mechanism to 690 * optionally enter this mode. 691 */ 692 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); 693 v &= ~dd->autoidle_mask; 694 v |= DPLL_AUTOIDLE_LOW_POWER_STOP << __ffs(dd->autoidle_mask); 695 ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); 696 } 697 698 /** 699 * omap3_dpll_deny_idle - prevent DPLL from automatically idling 700 * @clk: struct clk * of the DPLL to operate on 701 * 702 * Disable DPLL automatic idle control. No return value. 703 */ 704 static void omap3_dpll_deny_idle(struct clk_hw_omap *clk) 705 { 706 const struct dpll_data *dd; 707 u32 v; 708 709 if (!clk || !clk->dpll_data) 710 return; 711 712 dd = clk->dpll_data; 713 714 if (!dd->autoidle_mask) 715 return; 716 717 v = ti_clk_ll_ops->clk_readl(&dd->autoidle_reg); 718 v &= ~dd->autoidle_mask; 719 v |= DPLL_AUTOIDLE_DISABLE << __ffs(dd->autoidle_mask); 720 ti_clk_ll_ops->clk_writel(v, &dd->autoidle_reg); 721 } 722 723 /* Clock control for DPLL outputs */ 724 725 /* Find the parent DPLL for the given clkoutx2 clock */ 726 static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) 727 { 728 struct clk_hw_omap *pclk = NULL; 729 730 /* Walk up the parents of clk, looking for a DPLL */ 731 do { 732 do { 733 hw = clk_hw_get_parent(hw); 734 } while (hw && (clk_hw_get_flags(hw) & CLK_IS_BASIC)); 735 if (!hw) 736 break; 737 pclk = to_clk_hw_omap(hw); 738 } while (pclk && !pclk->dpll_data); 739 740 /* clk does not have a DPLL as a parent? error in the clock data */ 741 if (!pclk) { 742 WARN_ON(1); 743 return NULL; 744 } 745 746 return pclk; 747 } 748 749 /** 750 * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate 751 * @clk: DPLL output struct clk 752 * 753 * Using parent clock DPLL data, look up DPLL state. If locked, set our 754 * rate to the dpll_clk * 2; otherwise, just use dpll_clk. 755 */ 756 unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, 757 unsigned long parent_rate) 758 { 759 const struct dpll_data *dd; 760 unsigned long rate; 761 u32 v; 762 struct clk_hw_omap *pclk = NULL; 763 764 if (!parent_rate) 765 return 0; 766 767 pclk = omap3_find_clkoutx2_dpll(hw); 768 769 if (!pclk) 770 return 0; 771 772 dd = pclk->dpll_data; 773 774 WARN_ON(!dd->enable_mask); 775 776 v = ti_clk_ll_ops->clk_readl(&dd->control_reg) & dd->enable_mask; 777 v >>= __ffs(dd->enable_mask); 778 if ((v != OMAP3XXX_EN_DPLL_LOCKED) || (dd->flags & DPLL_J_TYPE)) 779 rate = parent_rate; 780 else 781 rate = parent_rate * 2; 782 return rate; 783 } 784 785 /** 786 * omap3_core_dpll_save_context - Save the m and n values of the divider 787 * @hw: pointer struct clk_hw 788 * 789 * Before the dpll registers are lost save the last rounded rate m and n 790 * and the enable mask. 791 */ 792 int omap3_core_dpll_save_context(struct clk_hw *hw) 793 { 794 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 795 struct dpll_data *dd; 796 u32 v; 797 798 dd = clk->dpll_data; 799 800 v = ti_clk_ll_ops->clk_readl(&dd->control_reg); 801 clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask); 802 803 if (clk->context == DPLL_LOCKED) { 804 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); 805 dd->last_rounded_m = (v & dd->mult_mask) >> 806 __ffs(dd->mult_mask); 807 dd->last_rounded_n = ((v & dd->div1_mask) >> 808 __ffs(dd->div1_mask)) + 1; 809 } 810 811 return 0; 812 } 813 814 /** 815 * omap3_core_dpll_restore_context - restore the m and n values of the divider 816 * @hw: pointer struct clk_hw 817 * 818 * Restore the last rounded rate m and n 819 * and the enable mask. 820 */ 821 void omap3_core_dpll_restore_context(struct clk_hw *hw) 822 { 823 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 824 const struct dpll_data *dd; 825 u32 v; 826 827 dd = clk->dpll_data; 828 829 if (clk->context == DPLL_LOCKED) { 830 _omap3_dpll_write_clken(clk, 0x4); 831 _omap3_wait_dpll_status(clk, 0); 832 833 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); 834 v &= ~(dd->mult_mask | dd->div1_mask); 835 v |= dd->last_rounded_m << __ffs(dd->mult_mask); 836 v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); 837 ti_clk_ll_ops->clk_writel(v, &dd->mult_div1_reg); 838 839 _omap3_dpll_write_clken(clk, DPLL_LOCKED); 840 _omap3_wait_dpll_status(clk, 1); 841 } else { 842 _omap3_dpll_write_clken(clk, clk->context); 843 } 844 } 845 846 /** 847 * omap3_non_core_dpll_save_context - Save the m and n values of the divider 848 * @hw: pointer struct clk_hw 849 * 850 * Before the dpll registers are lost save the last rounded rate m and n 851 * and the enable mask. 852 */ 853 int omap3_noncore_dpll_save_context(struct clk_hw *hw) 854 { 855 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 856 struct dpll_data *dd; 857 u32 v; 858 859 dd = clk->dpll_data; 860 861 v = ti_clk_ll_ops->clk_readl(&dd->control_reg); 862 clk->context = (v & dd->enable_mask) >> __ffs(dd->enable_mask); 863 864 if (clk->context == DPLL_LOCKED) { 865 v = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); 866 dd->last_rounded_m = (v & dd->mult_mask) >> 867 __ffs(dd->mult_mask); 868 dd->last_rounded_n = ((v & dd->div1_mask) >> 869 __ffs(dd->div1_mask)) + 1; 870 } 871 872 return 0; 873 } 874 875 /** 876 * omap3_core_dpll_restore_context - restore the m and n values of the divider 877 * @hw: pointer struct clk_hw 878 * 879 * Restore the last rounded rate m and n 880 * and the enable mask. 881 */ 882 void omap3_noncore_dpll_restore_context(struct clk_hw *hw) 883 { 884 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 885 const struct dpll_data *dd; 886 u32 ctrl, mult_div1; 887 888 dd = clk->dpll_data; 889 890 ctrl = ti_clk_ll_ops->clk_readl(&dd->control_reg); 891 mult_div1 = ti_clk_ll_ops->clk_readl(&dd->mult_div1_reg); 892 893 if (clk->context == ((ctrl & dd->enable_mask) >> 894 __ffs(dd->enable_mask)) && 895 dd->last_rounded_m == ((mult_div1 & dd->mult_mask) >> 896 __ffs(dd->mult_mask)) && 897 dd->last_rounded_n == ((mult_div1 & dd->div1_mask) >> 898 __ffs(dd->div1_mask)) + 1) { 899 /* nothing to be done */ 900 return; 901 } 902 903 if (clk->context == DPLL_LOCKED) 904 omap3_noncore_dpll_program(clk, 0); 905 else 906 _omap3_dpll_write_clken(clk, clk->context); 907 } 908 909 /* OMAP3/4 non-CORE DPLL clkops */ 910 const struct clk_hw_omap_ops clkhwops_omap3_dpll = { 911 .allow_idle = omap3_dpll_allow_idle, 912 .deny_idle = omap3_dpll_deny_idle, 913 }; 914 915 /** 916 * omap3_dpll4_set_rate - set rate for omap3 per-dpll 917 * @hw: clock to change 918 * @rate: target rate for clock 919 * @parent_rate: rate of the parent clock 920 * 921 * Check if the current SoC supports the per-dpll reprogram operation 922 * or not, and then do the rate change if supported. Returns -EINVAL 923 * if not supported, 0 for success, and potential error codes from the 924 * clock rate change. 925 */ 926 int omap3_dpll4_set_rate(struct clk_hw *hw, unsigned long rate, 927 unsigned long parent_rate) 928 { 929 /* 930 * According to the 12-5 CDP code from TI, "Limitation 2.5" 931 * on 3430ES1 prevents us from changing DPLL multipliers or dividers 932 * on DPLL4. 933 */ 934 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) { 935 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); 936 return -EINVAL; 937 } 938 939 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate); 940 } 941 942 /** 943 * omap3_dpll4_set_rate_and_parent - set rate and parent for omap3 per-dpll 944 * @hw: clock to change 945 * @rate: target rate for clock 946 * @parent_rate: rate of the parent clock 947 * @index: parent index, 0 - reference clock, 1 - bypass clock 948 * 949 * Check if the current SoC support the per-dpll reprogram operation 950 * or not, and then do the rate + parent change if supported. Returns 951 * -EINVAL if not supported, 0 for success, and potential error codes 952 * from the clock rate change. 953 */ 954 int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate, 955 unsigned long parent_rate, u8 index) 956 { 957 if (ti_clk_get_features()->flags & TI_CLK_DPLL4_DENY_REPROGRAM) { 958 pr_err("clock: DPLL4 cannot change rate due to silicon 'Limitation 2.5' on 3430ES1.\n"); 959 return -EINVAL; 960 } 961 962 return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate, 963 index); 964 } 965 966 /* Apply DM3730 errata sprz319 advisory 2.1. */ 967 static bool omap3_dpll5_apply_errata(struct clk_hw *hw, 968 unsigned long parent_rate) 969 { 970 struct omap3_dpll5_settings { 971 unsigned int rate, m, n; 972 }; 973 974 static const struct omap3_dpll5_settings precomputed[] = { 975 /* 976 * From DM3730 errata advisory 2.1, table 35 and 36. 977 * The N value is increased by 1 compared to the tables as the 978 * errata lists register values while last_rounded_field is the 979 * real divider value. 980 */ 981 { 12000000, 80, 0 + 1 }, 982 { 13000000, 443, 5 + 1 }, 983 { 19200000, 50, 0 + 1 }, 984 { 26000000, 443, 11 + 1 }, 985 { 38400000, 25, 0 + 1 } 986 }; 987 988 const struct omap3_dpll5_settings *d; 989 struct clk_hw_omap *clk = to_clk_hw_omap(hw); 990 struct dpll_data *dd; 991 unsigned int i; 992 993 for (i = 0; i < ARRAY_SIZE(precomputed); ++i) { 994 if (parent_rate == precomputed[i].rate) 995 break; 996 } 997 998 if (i == ARRAY_SIZE(precomputed)) 999 return false; 1000 1001 d = &precomputed[i]; 1002 1003 /* Update the M, N and rounded rate values and program the DPLL. */ 1004 dd = clk->dpll_data; 1005 dd->last_rounded_m = d->m; 1006 dd->last_rounded_n = d->n; 1007 dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n); 1008 omap3_noncore_dpll_program(clk, 0); 1009 1010 return true; 1011 } 1012 1013 /** 1014 * omap3_dpll5_set_rate - set rate for omap3 dpll5 1015 * @hw: clock to change 1016 * @rate: target rate for clock 1017 * @parent_rate: rate of the parent clock 1018 * 1019 * Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if 1020 * the DPLL is used for USB host (detected through the requested rate). 1021 */ 1022 int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate, 1023 unsigned long parent_rate) 1024 { 1025 if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) { 1026 if (omap3_dpll5_apply_errata(hw, parent_rate)) 1027 return 0; 1028 } 1029 1030 return omap3_noncore_dpll_set_rate(hw, rate, parent_rate); 1031 } 1032