1 /* 2 * linux/arch/arm/mach-omap2/clock.c 3 * 4 * Copyright (C) 2005-2008 Texas Instruments, Inc. 5 * Copyright (C) 2004-2008 Nokia Corporation 6 * 7 * Contacts: 8 * Richard Woodruff <r-woodruff2@ti.com> 9 * Paul Walmsley 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 #undef DEBUG 16 17 #include <linux/module.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/list.h> 21 #include <linux/errno.h> 22 #include <linux/delay.h> 23 #include <linux/clk.h> 24 #include <linux/io.h> 25 #include <linux/bitops.h> 26 27 #include <mach/clock.h> 28 #include <mach/clockdomain.h> 29 #include <mach/cpu.h> 30 #include <asm/div64.h> 31 32 #include <mach/sdrc.h> 33 #include "sdrc.h" 34 #include "clock.h" 35 #include "prm.h" 36 #include "prm-regbits-24xx.h" 37 #include "cm.h" 38 #include "cm-regbits-24xx.h" 39 #include "cm-regbits-34xx.h" 40 41 #define MAX_CLOCK_ENABLE_WAIT 100000 42 43 /* DPLL rate rounding: minimum DPLL multiplier, divider values */ 44 #define DPLL_MIN_MULTIPLIER 1 45 #define DPLL_MIN_DIVIDER 1 46 47 /* Possible error results from _dpll_test_mult */ 48 #define DPLL_MULT_UNDERFLOW -1 49 50 /* 51 * Scale factor to mitigate roundoff errors in DPLL rate rounding. 52 * The higher the scale factor, the greater the risk of arithmetic overflow, 53 * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR 54 * must be a power of DPLL_SCALE_BASE. 55 */ 56 #define DPLL_SCALE_FACTOR 64 57 #define DPLL_SCALE_BASE 2 58 #define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ 59 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) 60 61 /* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */ 62 #define DPLL_FINT_BAND1_MIN 750000 63 #define DPLL_FINT_BAND1_MAX 2100000 64 #define DPLL_FINT_BAND2_MIN 7500000 65 #define DPLL_FINT_BAND2_MAX 21000000 66 67 /* _dpll_test_fint() return codes */ 68 #define DPLL_FINT_UNDERFLOW -1 69 #define DPLL_FINT_INVALID -2 70 71 u8 cpu_mask; 72 73 /*------------------------------------------------------------------------- 74 * OMAP2/3 specific clock functions 75 *-------------------------------------------------------------------------*/ 76 77 /** 78 * _omap2xxx_clk_commit - commit clock parent/rate changes in hardware 79 * @clk: struct clk * 80 * 81 * If @clk has the DELAYED_APP flag set, meaning that parent/rate changes 82 * don't take effect until the VALID_CONFIG bit is written, write the 83 * VALID_CONFIG bit and wait for the write to complete. No return value. 84 */ 85 static void _omap2xxx_clk_commit(struct clk *clk) 86 { 87 if (!cpu_is_omap24xx()) 88 return; 89 90 if (!(clk->flags & DELAYED_APP)) 91 return; 92 93 prm_write_mod_reg(OMAP24XX_VALID_CONFIG, OMAP24XX_GR_MOD, 94 OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET); 95 /* OCP barrier */ 96 prm_read_mod_reg(OMAP24XX_GR_MOD, OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET); 97 } 98 99 /* 100 * _dpll_test_fint - test whether an Fint value is valid for the DPLL 101 * @clk: DPLL struct clk to test 102 * @n: divider value (N) to test 103 * 104 * Tests whether a particular divider @n will result in a valid DPLL 105 * internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter 106 * Correction". Returns 0 if OK, -1 if the enclosing loop can terminate 107 * (assuming that it is counting N upwards), or -2 if the enclosing loop 108 * should skip to the next iteration (again assuming N is increasing). 109 */ 110 static int _dpll_test_fint(struct clk *clk, u8 n) 111 { 112 struct dpll_data *dd; 113 long fint; 114 int ret = 0; 115 116 dd = clk->dpll_data; 117 118 /* DPLL divider must result in a valid jitter correction val */ 119 fint = clk->parent->rate / (n + 1); 120 if (fint < DPLL_FINT_BAND1_MIN) { 121 122 pr_debug("rejecting n=%d due to Fint failure, " 123 "lowering max_divider\n", n); 124 dd->max_divider = n; 125 ret = DPLL_FINT_UNDERFLOW; 126 127 } else if (fint > DPLL_FINT_BAND1_MAX && 128 fint < DPLL_FINT_BAND2_MIN) { 129 130 pr_debug("rejecting n=%d due to Fint failure\n", n); 131 ret = DPLL_FINT_INVALID; 132 133 } else if (fint > DPLL_FINT_BAND2_MAX) { 134 135 pr_debug("rejecting n=%d due to Fint failure, " 136 "boosting min_divider\n", n); 137 dd->min_divider = n; 138 ret = DPLL_FINT_INVALID; 139 140 } 141 142 return ret; 143 } 144 145 /** 146 * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk 147 * @clk: OMAP clock struct ptr to use 148 * 149 * Convert a clockdomain name stored in a struct clk 'clk' into a 150 * clockdomain pointer, and save it into the struct clk. Intended to be 151 * called during clk_register(). No return value. 152 */ 153 void omap2_init_clk_clkdm(struct clk *clk) 154 { 155 struct clockdomain *clkdm; 156 157 if (!clk->clkdm_name) 158 return; 159 160 clkdm = clkdm_lookup(clk->clkdm_name); 161 if (clkdm) { 162 pr_debug("clock: associated clk %s to clkdm %s\n", 163 clk->name, clk->clkdm_name); 164 clk->clkdm = clkdm; 165 } else { 166 pr_debug("clock: could not associate clk %s to " 167 "clkdm %s\n", clk->name, clk->clkdm_name); 168 } 169 } 170 171 /** 172 * omap2_init_clksel_parent - set a clksel clk's parent field from the hardware 173 * @clk: OMAP clock struct ptr to use 174 * 175 * Given a pointer to a source-selectable struct clk, read the hardware 176 * register and determine what its parent is currently set to. Update the 177 * clk->parent field with the appropriate clk ptr. 178 */ 179 void omap2_init_clksel_parent(struct clk *clk) 180 { 181 const struct clksel *clks; 182 const struct clksel_rate *clkr; 183 u32 r, found = 0; 184 185 if (!clk->clksel) 186 return; 187 188 r = __raw_readl(clk->clksel_reg) & clk->clksel_mask; 189 r >>= __ffs(clk->clksel_mask); 190 191 for (clks = clk->clksel; clks->parent && !found; clks++) { 192 for (clkr = clks->rates; clkr->div && !found; clkr++) { 193 if ((clkr->flags & cpu_mask) && (clkr->val == r)) { 194 if (clk->parent != clks->parent) { 195 pr_debug("clock: inited %s parent " 196 "to %s (was %s)\n", 197 clk->name, clks->parent->name, 198 ((clk->parent) ? 199 clk->parent->name : "NULL")); 200 clk_reparent(clk, clks->parent); 201 }; 202 found = 1; 203 } 204 } 205 } 206 207 if (!found) 208 printk(KERN_ERR "clock: init parent: could not find " 209 "regval %0x for clock %s\n", r, clk->name); 210 211 return; 212 } 213 214 /* Returns the DPLL rate */ 215 u32 omap2_get_dpll_rate(struct clk *clk) 216 { 217 long long dpll_clk; 218 u32 dpll_mult, dpll_div, dpll; 219 struct dpll_data *dd; 220 221 dd = clk->dpll_data; 222 /* REVISIT: What do we return on error? */ 223 if (!dd) 224 return 0; 225 226 dpll = __raw_readl(dd->mult_div1_reg); 227 dpll_mult = dpll & dd->mult_mask; 228 dpll_mult >>= __ffs(dd->mult_mask); 229 dpll_div = dpll & dd->div1_mask; 230 dpll_div >>= __ffs(dd->div1_mask); 231 232 dpll_clk = (long long)clk->parent->rate * dpll_mult; 233 do_div(dpll_clk, dpll_div + 1); 234 235 return dpll_clk; 236 } 237 238 /* 239 * Used for clocks that have the same value as the parent clock, 240 * divided by some factor 241 */ 242 void omap2_fixed_divisor_recalc(struct clk *clk) 243 { 244 WARN_ON(!clk->fixed_div); 245 246 clk->rate = clk->parent->rate / clk->fixed_div; 247 } 248 249 /** 250 * omap2_wait_clock_ready - wait for clock to enable 251 * @reg: physical address of clock IDLEST register 252 * @mask: value to mask against to determine if the clock is active 253 * @name: name of the clock (for printk) 254 * 255 * Returns 1 if the clock enabled in time, or 0 if it failed to enable 256 * in roughly MAX_CLOCK_ENABLE_WAIT microseconds. 257 */ 258 int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name) 259 { 260 int i = 0; 261 int ena = 0; 262 263 /* 264 * 24xx uses 0 to indicate not ready, and 1 to indicate ready. 265 * 34xx reverses this, just to keep us on our toes 266 */ 267 if (cpu_mask & (RATE_IN_242X | RATE_IN_243X)) 268 ena = mask; 269 else if (cpu_mask & RATE_IN_343X) 270 ena = 0; 271 272 /* Wait for lock */ 273 while (((__raw_readl(reg) & mask) != ena) && 274 (i++ < MAX_CLOCK_ENABLE_WAIT)) { 275 udelay(1); 276 } 277 278 if (i < MAX_CLOCK_ENABLE_WAIT) 279 pr_debug("Clock %s stable after %d loops\n", name, i); 280 else 281 printk(KERN_ERR "Clock %s didn't enable in %d tries\n", 282 name, MAX_CLOCK_ENABLE_WAIT); 283 284 285 return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0; 286 }; 287 288 289 /* 290 * Note: We don't need special code here for INVERT_ENABLE 291 * for the time being since INVERT_ENABLE only applies to clocks enabled by 292 * CM_CLKEN_PLL 293 */ 294 static void omap2_clk_wait_ready(struct clk *clk) 295 { 296 void __iomem *reg, *other_reg, *st_reg; 297 u32 bit; 298 299 /* 300 * REVISIT: This code is pretty ugly. It would be nice to generalize 301 * it and pull it into struct clk itself somehow. 302 */ 303 reg = clk->enable_reg; 304 305 /* 306 * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes 307 * it's just a matter of XORing the bits. 308 */ 309 other_reg = (void __iomem *)((u32)reg ^ (CM_FCLKEN ^ CM_ICLKEN)); 310 311 /* Check if both functional and interface clocks 312 * are running. */ 313 bit = 1 << clk->enable_bit; 314 if (!(__raw_readl(other_reg) & bit)) 315 return; 316 st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */ 317 318 omap2_wait_clock_ready(st_reg, bit, clk->name); 319 } 320 321 static int omap2_dflt_clk_enable(struct clk *clk) 322 { 323 u32 v; 324 325 if (unlikely(clk->enable_reg == NULL)) { 326 printk(KERN_ERR "clock.c: Enable for %s without enable code\n", 327 clk->name); 328 return 0; /* REVISIT: -EINVAL */ 329 } 330 331 v = __raw_readl(clk->enable_reg); 332 if (clk->flags & INVERT_ENABLE) 333 v &= ~(1 << clk->enable_bit); 334 else 335 v |= (1 << clk->enable_bit); 336 __raw_writel(v, clk->enable_reg); 337 v = __raw_readl(clk->enable_reg); /* OCP barrier */ 338 339 return 0; 340 } 341 342 static int omap2_dflt_clk_enable_wait(struct clk *clk) 343 { 344 int ret; 345 346 if (!clk->enable_reg) { 347 printk(KERN_ERR "clock.c: Enable for %s without enable code\n", 348 clk->name); 349 return 0; /* REVISIT: -EINVAL */ 350 } 351 352 ret = omap2_dflt_clk_enable(clk); 353 if (ret == 0) 354 omap2_clk_wait_ready(clk); 355 return ret; 356 } 357 358 static void omap2_dflt_clk_disable(struct clk *clk) 359 { 360 u32 v; 361 362 if (!clk->enable_reg) { 363 /* 364 * 'Independent' here refers to a clock which is not 365 * controlled by its parent. 366 */ 367 printk(KERN_ERR "clock: clk_disable called on independent " 368 "clock %s which has no enable_reg\n", clk->name); 369 return; 370 } 371 372 v = __raw_readl(clk->enable_reg); 373 if (clk->flags & INVERT_ENABLE) 374 v |= (1 << clk->enable_bit); 375 else 376 v &= ~(1 << clk->enable_bit); 377 __raw_writel(v, clk->enable_reg); 378 /* No OCP barrier needed here since it is a disable operation */ 379 } 380 381 const struct clkops clkops_omap2_dflt_wait = { 382 .enable = omap2_dflt_clk_enable_wait, 383 .disable = omap2_dflt_clk_disable, 384 }; 385 386 const struct clkops clkops_omap2_dflt = { 387 .enable = omap2_dflt_clk_enable, 388 .disable = omap2_dflt_clk_disable, 389 }; 390 391 /* Enables clock without considering parent dependencies or use count 392 * REVISIT: Maybe change this to use clk->enable like on omap1? 393 */ 394 static int _omap2_clk_enable(struct clk *clk) 395 { 396 return clk->ops->enable(clk); 397 } 398 399 /* Disables clock without considering parent dependencies or use count */ 400 static void _omap2_clk_disable(struct clk *clk) 401 { 402 clk->ops->disable(clk); 403 } 404 405 void omap2_clk_disable(struct clk *clk) 406 { 407 if (clk->usecount > 0 && !(--clk->usecount)) { 408 _omap2_clk_disable(clk); 409 if (clk->parent) 410 omap2_clk_disable(clk->parent); 411 if (clk->clkdm) 412 omap2_clkdm_clk_disable(clk->clkdm, clk); 413 414 } 415 } 416 417 int omap2_clk_enable(struct clk *clk) 418 { 419 int ret = 0; 420 421 if (clk->usecount++ == 0) { 422 if (clk->clkdm) 423 omap2_clkdm_clk_enable(clk->clkdm, clk); 424 425 if (clk->parent) { 426 ret = omap2_clk_enable(clk->parent); 427 if (ret) 428 goto err; 429 } 430 431 ret = _omap2_clk_enable(clk); 432 if (ret) { 433 if (clk->parent) 434 omap2_clk_disable(clk->parent); 435 436 goto err; 437 } 438 } 439 return ret; 440 441 err: 442 if (clk->clkdm) 443 omap2_clkdm_clk_disable(clk->clkdm, clk); 444 clk->usecount--; 445 return ret; 446 } 447 448 /* 449 * Used for clocks that are part of CLKSEL_xyz governed clocks. 450 * REVISIT: Maybe change to use clk->enable() functions like on omap1? 451 */ 452 void omap2_clksel_recalc(struct clk *clk) 453 { 454 u32 div = 0; 455 456 pr_debug("clock: recalc'ing clksel clk %s\n", clk->name); 457 458 div = omap2_clksel_get_divisor(clk); 459 if (div == 0) 460 return; 461 462 if (clk->rate == (clk->parent->rate / div)) 463 return; 464 clk->rate = clk->parent->rate / div; 465 466 pr_debug("clock: new clock rate is %ld (div %d)\n", clk->rate, div); 467 } 468 469 /** 470 * omap2_get_clksel_by_parent - return clksel struct for a given clk & parent 471 * @clk: OMAP struct clk ptr to inspect 472 * @src_clk: OMAP struct clk ptr of the parent clk to search for 473 * 474 * Scan the struct clksel array associated with the clock to find 475 * the element associated with the supplied parent clock address. 476 * Returns a pointer to the struct clksel on success or NULL on error. 477 */ 478 static const struct clksel *omap2_get_clksel_by_parent(struct clk *clk, 479 struct clk *src_clk) 480 { 481 const struct clksel *clks; 482 483 if (!clk->clksel) 484 return NULL; 485 486 for (clks = clk->clksel; clks->parent; clks++) { 487 if (clks->parent == src_clk) 488 break; /* Found the requested parent */ 489 } 490 491 if (!clks->parent) { 492 printk(KERN_ERR "clock: Could not find parent clock %s in " 493 "clksel array of clock %s\n", src_clk->name, 494 clk->name); 495 return NULL; 496 } 497 498 return clks; 499 } 500 501 /** 502 * omap2_clksel_round_rate_div - find divisor for the given clock and rate 503 * @clk: OMAP struct clk to use 504 * @target_rate: desired clock rate 505 * @new_div: ptr to where we should store the divisor 506 * 507 * Finds 'best' divider value in an array based on the source and target 508 * rates. The divider array must be sorted with smallest divider first. 509 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT, 510 * they are only settable as part of virtual_prcm set. 511 * 512 * Returns the rounded clock rate or returns 0xffffffff on error. 513 */ 514 u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate, 515 u32 *new_div) 516 { 517 unsigned long test_rate; 518 const struct clksel *clks; 519 const struct clksel_rate *clkr; 520 u32 last_div = 0; 521 522 printk(KERN_INFO "clock: clksel_round_rate_div: %s target_rate %ld\n", 523 clk->name, target_rate); 524 525 *new_div = 1; 526 527 clks = omap2_get_clksel_by_parent(clk, clk->parent); 528 if (!clks) 529 return ~0; 530 531 for (clkr = clks->rates; clkr->div; clkr++) { 532 if (!(clkr->flags & cpu_mask)) 533 continue; 534 535 /* Sanity check */ 536 if (clkr->div <= last_div) 537 printk(KERN_ERR "clock: clksel_rate table not sorted " 538 "for clock %s", clk->name); 539 540 last_div = clkr->div; 541 542 test_rate = clk->parent->rate / clkr->div; 543 544 if (test_rate <= target_rate) 545 break; /* found it */ 546 } 547 548 if (!clkr->div) { 549 printk(KERN_ERR "clock: Could not find divisor for target " 550 "rate %ld for clock %s parent %s\n", target_rate, 551 clk->name, clk->parent->name); 552 return ~0; 553 } 554 555 *new_div = clkr->div; 556 557 printk(KERN_INFO "clock: new_div = %d, new_rate = %ld\n", *new_div, 558 (clk->parent->rate / clkr->div)); 559 560 return (clk->parent->rate / clkr->div); 561 } 562 563 /** 564 * omap2_clksel_round_rate - find rounded rate for the given clock and rate 565 * @clk: OMAP struct clk to use 566 * @target_rate: desired clock rate 567 * 568 * Compatibility wrapper for OMAP clock framework 569 * Finds best target rate based on the source clock and possible dividers. 570 * rates. The divider array must be sorted with smallest divider first. 571 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT, 572 * they are only settable as part of virtual_prcm set. 573 * 574 * Returns the rounded clock rate or returns 0xffffffff on error. 575 */ 576 long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate) 577 { 578 u32 new_div; 579 580 return omap2_clksel_round_rate_div(clk, target_rate, &new_div); 581 } 582 583 584 /* Given a clock and a rate apply a clock specific rounding function */ 585 long omap2_clk_round_rate(struct clk *clk, unsigned long rate) 586 { 587 if (clk->round_rate) 588 return clk->round_rate(clk, rate); 589 590 if (clk->flags & RATE_FIXED) 591 printk(KERN_ERR "clock: generic omap2_clk_round_rate called " 592 "on fixed-rate clock %s\n", clk->name); 593 594 return clk->rate; 595 } 596 597 /** 598 * omap2_clksel_to_divisor() - turn clksel field value into integer divider 599 * @clk: OMAP struct clk to use 600 * @field_val: register field value to find 601 * 602 * Given a struct clk of a rate-selectable clksel clock, and a register field 603 * value to search for, find the corresponding clock divisor. The register 604 * field value should be pre-masked and shifted down so the LSB is at bit 0 605 * before calling. Returns 0 on error 606 */ 607 u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val) 608 { 609 const struct clksel *clks; 610 const struct clksel_rate *clkr; 611 612 clks = omap2_get_clksel_by_parent(clk, clk->parent); 613 if (!clks) 614 return 0; 615 616 for (clkr = clks->rates; clkr->div; clkr++) { 617 if ((clkr->flags & cpu_mask) && (clkr->val == field_val)) 618 break; 619 } 620 621 if (!clkr->div) { 622 printk(KERN_ERR "clock: Could not find fieldval %d for " 623 "clock %s parent %s\n", field_val, clk->name, 624 clk->parent->name); 625 return 0; 626 } 627 628 return clkr->div; 629 } 630 631 /** 632 * omap2_divisor_to_clksel() - turn clksel integer divisor into a field value 633 * @clk: OMAP struct clk to use 634 * @div: integer divisor to search for 635 * 636 * Given a struct clk of a rate-selectable clksel clock, and a clock divisor, 637 * find the corresponding register field value. The return register value is 638 * the value before left-shifting. Returns 0xffffffff on error 639 */ 640 u32 omap2_divisor_to_clksel(struct clk *clk, u32 div) 641 { 642 const struct clksel *clks; 643 const struct clksel_rate *clkr; 644 645 /* should never happen */ 646 WARN_ON(div == 0); 647 648 clks = omap2_get_clksel_by_parent(clk, clk->parent); 649 if (!clks) 650 return 0; 651 652 for (clkr = clks->rates; clkr->div; clkr++) { 653 if ((clkr->flags & cpu_mask) && (clkr->div == div)) 654 break; 655 } 656 657 if (!clkr->div) { 658 printk(KERN_ERR "clock: Could not find divisor %d for " 659 "clock %s parent %s\n", div, clk->name, 660 clk->parent->name); 661 return 0; 662 } 663 664 return clkr->val; 665 } 666 667 /** 668 * omap2_clksel_get_divisor - get current divider applied to parent clock. 669 * @clk: OMAP struct clk to use. 670 * 671 * Returns the integer divisor upon success or 0 on error. 672 */ 673 u32 omap2_clksel_get_divisor(struct clk *clk) 674 { 675 u32 v; 676 677 if (!clk->clksel_mask) 678 return 0; 679 680 v = __raw_readl(clk->clksel_reg) & clk->clksel_mask; 681 v >>= __ffs(clk->clksel_mask); 682 683 return omap2_clksel_to_divisor(clk, v); 684 } 685 686 int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) 687 { 688 u32 v, field_val, validrate, new_div = 0; 689 690 if (!clk->clksel_mask) 691 return -EINVAL; 692 693 validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); 694 if (validrate != rate) 695 return -EINVAL; 696 697 field_val = omap2_divisor_to_clksel(clk, new_div); 698 if (field_val == ~0) 699 return -EINVAL; 700 701 v = __raw_readl(clk->clksel_reg); 702 v &= ~clk->clksel_mask; 703 v |= field_val << __ffs(clk->clksel_mask); 704 __raw_writel(v, clk->clksel_reg); 705 v = __raw_readl(clk->clksel_reg); /* OCP barrier */ 706 707 clk->rate = clk->parent->rate / new_div; 708 709 _omap2xxx_clk_commit(clk); 710 711 return 0; 712 } 713 714 715 /* Set the clock rate for a clock source */ 716 int omap2_clk_set_rate(struct clk *clk, unsigned long rate) 717 { 718 int ret = -EINVAL; 719 720 pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate); 721 722 /* CONFIG_PARTICIPANT clocks are changed only in sets via the 723 rate table mechanism, driven by mpu_speed */ 724 if (clk->flags & CONFIG_PARTICIPANT) 725 return -EINVAL; 726 727 /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */ 728 if (clk->set_rate) 729 ret = clk->set_rate(clk, rate); 730 731 return ret; 732 } 733 734 /* 735 * Converts encoded control register address into a full address 736 * On error, the return value (parent_div) will be 0. 737 */ 738 static u32 _omap2_clksel_get_src_field(struct clk *src_clk, struct clk *clk, 739 u32 *field_val) 740 { 741 const struct clksel *clks; 742 const struct clksel_rate *clkr; 743 744 clks = omap2_get_clksel_by_parent(clk, src_clk); 745 if (!clks) 746 return 0; 747 748 for (clkr = clks->rates; clkr->div; clkr++) { 749 if (clkr->flags & (cpu_mask | DEFAULT_RATE)) 750 break; /* Found the default rate for this platform */ 751 } 752 753 if (!clkr->div) { 754 printk(KERN_ERR "clock: Could not find default rate for " 755 "clock %s parent %s\n", clk->name, 756 src_clk->parent->name); 757 return 0; 758 } 759 760 /* Should never happen. Add a clksel mask to the struct clk. */ 761 WARN_ON(clk->clksel_mask == 0); 762 763 *field_val = clkr->val; 764 765 return clkr->div; 766 } 767 768 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent) 769 { 770 u32 field_val, v, parent_div; 771 772 if (clk->flags & CONFIG_PARTICIPANT) 773 return -EINVAL; 774 775 if (!clk->clksel) 776 return -EINVAL; 777 778 parent_div = _omap2_clksel_get_src_field(new_parent, clk, &field_val); 779 if (!parent_div) 780 return -EINVAL; 781 782 if (clk->usecount > 0) 783 _omap2_clk_disable(clk); 784 785 /* Set new source value (previous dividers if any in effect) */ 786 v = __raw_readl(clk->clksel_reg); 787 v &= ~clk->clksel_mask; 788 v |= field_val << __ffs(clk->clksel_mask); 789 __raw_writel(v, clk->clksel_reg); 790 v = __raw_readl(clk->clksel_reg); /* OCP barrier */ 791 792 _omap2xxx_clk_commit(clk); 793 794 if (clk->usecount > 0) 795 _omap2_clk_enable(clk); 796 797 clk_reparent(clk, new_parent); 798 799 /* CLKSEL clocks follow their parents' rates, divided by a divisor */ 800 clk->rate = new_parent->rate; 801 802 if (parent_div > 0) 803 clk->rate /= parent_div; 804 805 pr_debug("clock: set parent of %s to %s (new rate %ld)\n", 806 clk->name, clk->parent->name, clk->rate); 807 808 return 0; 809 } 810 811 /* DPLL rate rounding code */ 812 813 /** 814 * omap2_dpll_set_rate_tolerance: set the error tolerance during rate rounding 815 * @clk: struct clk * of the DPLL 816 * @tolerance: maximum rate error tolerance 817 * 818 * Set the maximum DPLL rate error tolerance for the rate rounding 819 * algorithm. The rate tolerance is an attempt to balance DPLL power 820 * saving (the least divider value "n") vs. rate fidelity (the least 821 * difference between the desired DPLL target rate and the rounded 822 * rate out of the algorithm). So, increasing the tolerance is likely 823 * to decrease DPLL power consumption and increase DPLL rate error. 824 * Returns -EINVAL if provided a null clock ptr or a clk that is not a 825 * DPLL; or 0 upon success. 826 */ 827 int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance) 828 { 829 if (!clk || !clk->dpll_data) 830 return -EINVAL; 831 832 clk->dpll_data->rate_tolerance = tolerance; 833 834 return 0; 835 } 836 837 static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, 838 unsigned int m, unsigned int n) 839 { 840 unsigned long long num; 841 842 num = (unsigned long long)parent_rate * m; 843 do_div(num, n); 844 return num; 845 } 846 847 /* 848 * _dpll_test_mult - test a DPLL multiplier value 849 * @m: pointer to the DPLL m (multiplier) value under test 850 * @n: current DPLL n (divider) value under test 851 * @new_rate: pointer to storage for the resulting rounded rate 852 * @target_rate: the desired DPLL rate 853 * @parent_rate: the DPLL's parent clock rate 854 * 855 * This code tests a DPLL multiplier value, ensuring that the 856 * resulting rate will not be higher than the target_rate, and that 857 * the multiplier value itself is valid for the DPLL. Initially, the 858 * integer pointed to by the m argument should be prescaled by 859 * multiplying by DPLL_SCALE_FACTOR. The code will replace this with 860 * a non-scaled m upon return. This non-scaled m will result in a 861 * new_rate as close as possible to target_rate (but not greater than 862 * target_rate) given the current (parent_rate, n, prescaled m) 863 * triple. Returns DPLL_MULT_UNDERFLOW in the event that the 864 * non-scaled m attempted to underflow, which can allow the calling 865 * function to bail out early; or 0 upon success. 866 */ 867 static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, 868 unsigned long target_rate, 869 unsigned long parent_rate) 870 { 871 int r = 0, carry = 0; 872 873 /* Unscale m and round if necessary */ 874 if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) 875 carry = 1; 876 *m = (*m / DPLL_SCALE_FACTOR) + carry; 877 878 /* 879 * The new rate must be <= the target rate to avoid programming 880 * a rate that is impossible for the hardware to handle 881 */ 882 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); 883 if (*new_rate > target_rate) { 884 (*m)--; 885 *new_rate = 0; 886 } 887 888 /* Guard against m underflow */ 889 if (*m < DPLL_MIN_MULTIPLIER) { 890 *m = DPLL_MIN_MULTIPLIER; 891 *new_rate = 0; 892 r = DPLL_MULT_UNDERFLOW; 893 } 894 895 if (*new_rate == 0) 896 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); 897 898 return r; 899 } 900 901 /** 902 * omap2_dpll_round_rate - round a target rate for an OMAP DPLL 903 * @clk: struct clk * for a DPLL 904 * @target_rate: desired DPLL clock rate 905 * 906 * Given a DPLL, a desired target rate, and a rate tolerance, round 907 * the target rate to a possible, programmable rate for this DPLL. 908 * Rate tolerance is assumed to be set by the caller before this 909 * function is called. Attempts to select the minimum possible n 910 * within the tolerance to reduce power consumption. Stores the 911 * computed (m, n) in the DPLL's dpll_data structure so set_rate() 912 * will not need to call this (expensive) function again. Returns ~0 913 * if the target rate cannot be rounded, either because the rate is 914 * too low or because the rate tolerance is set too tightly; or the 915 * rounded rate upon success. 916 */ 917 long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) 918 { 919 int m, n, r, e, scaled_max_m; 920 unsigned long scaled_rt_rp, new_rate; 921 int min_e = -1, min_e_m = -1, min_e_n = -1; 922 struct dpll_data *dd; 923 924 if (!clk || !clk->dpll_data) 925 return ~0; 926 927 dd = clk->dpll_data; 928 929 pr_debug("clock: starting DPLL round_rate for clock %s, target rate " 930 "%ld\n", clk->name, target_rate); 931 932 scaled_rt_rp = target_rate / (clk->parent->rate / DPLL_SCALE_FACTOR); 933 scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR; 934 935 dd->last_rounded_rate = 0; 936 937 for (n = dd->min_divider; n <= dd->max_divider; n++) { 938 939 /* Is the (input clk, divider) pair valid for the DPLL? */ 940 r = _dpll_test_fint(clk, n); 941 if (r == DPLL_FINT_UNDERFLOW) 942 break; 943 else if (r == DPLL_FINT_INVALID) 944 continue; 945 946 /* Compute the scaled DPLL multiplier, based on the divider */ 947 m = scaled_rt_rp * n; 948 949 /* 950 * Since we're counting n up, a m overflow means we 951 * can bail out completely (since as n increases in 952 * the next iteration, there's no way that m can 953 * increase beyond the current m) 954 */ 955 if (m > scaled_max_m) 956 break; 957 958 r = _dpll_test_mult(&m, n, &new_rate, target_rate, 959 clk->parent->rate); 960 961 /* m can't be set low enough for this n - try with a larger n */ 962 if (r == DPLL_MULT_UNDERFLOW) 963 continue; 964 965 e = target_rate - new_rate; 966 pr_debug("clock: n = %d: m = %d: rate error is %d " 967 "(new_rate = %ld)\n", n, m, e, new_rate); 968 969 if (min_e == -1 || 970 min_e >= (int)(abs(e) - dd->rate_tolerance)) { 971 min_e = e; 972 min_e_m = m; 973 min_e_n = n; 974 975 pr_debug("clock: found new least error %d\n", min_e); 976 977 /* We found good settings -- bail out now */ 978 if (min_e <= dd->rate_tolerance) 979 break; 980 } 981 } 982 983 if (min_e < 0) { 984 pr_debug("clock: error: target rate or tolerance too low\n"); 985 return ~0; 986 } 987 988 dd->last_rounded_m = min_e_m; 989 dd->last_rounded_n = min_e_n; 990 dd->last_rounded_rate = _dpll_compute_new_rate(clk->parent->rate, 991 min_e_m, min_e_n); 992 993 pr_debug("clock: final least error: e = %d, m = %d, n = %d\n", 994 min_e, min_e_m, min_e_n); 995 pr_debug("clock: final rate: %ld (target rate: %ld)\n", 996 dd->last_rounded_rate, target_rate); 997 998 return dd->last_rounded_rate; 999 } 1000 1001 /*------------------------------------------------------------------------- 1002 * Omap2 clock reset and init functions 1003 *-------------------------------------------------------------------------*/ 1004 1005 #ifdef CONFIG_OMAP_RESET_CLOCKS 1006 void omap2_clk_disable_unused(struct clk *clk) 1007 { 1008 u32 regval32, v; 1009 1010 v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0; 1011 1012 regval32 = __raw_readl(clk->enable_reg); 1013 if ((regval32 & (1 << clk->enable_bit)) == v) 1014 return; 1015 1016 printk(KERN_INFO "Disabling unused clock \"%s\"\n", clk->name); 1017 if (cpu_is_omap34xx()) { 1018 omap2_clk_enable(clk); 1019 omap2_clk_disable(clk); 1020 } else 1021 _omap2_clk_disable(clk); 1022 } 1023 #endif 1024