1 /* 2 * linux/arch/arm/mach-omap2/clock.c 3 * 4 * Copyright (C) 2005-2008 Texas Instruments, Inc. 5 * Copyright (C) 2004-2008 Nokia Corporation 6 * 7 * Contacts: 8 * Richard Woodruff <r-woodruff2@ti.com> 9 * Paul Walmsley 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 */ 15 #undef DEBUG 16 17 #include <linux/module.h> 18 #include <linux/kernel.h> 19 #include <linux/device.h> 20 #include <linux/list.h> 21 #include <linux/errno.h> 22 #include <linux/delay.h> 23 #include <linux/clk.h> 24 #include <asm/bitops.h> 25 26 #include <asm/io.h> 27 28 #include <mach/clock.h> 29 #include <mach/sram.h> 30 #include <mach/cpu.h> 31 #include <asm/div64.h> 32 33 #include "memory.h" 34 #include "sdrc.h" 35 #include "clock.h" 36 #include "prm.h" 37 #include "prm-regbits-24xx.h" 38 #include "cm.h" 39 #include "cm-regbits-24xx.h" 40 #include "cm-regbits-34xx.h" 41 42 #define MAX_CLOCK_ENABLE_WAIT 100000 43 44 /* DPLL rate rounding: minimum DPLL multiplier, divider values */ 45 #define DPLL_MIN_MULTIPLIER 1 46 #define DPLL_MIN_DIVIDER 1 47 48 /* Possible error results from _dpll_test_mult */ 49 #define DPLL_MULT_UNDERFLOW (1 << 0) 50 51 /* 52 * Scale factor to mitigate roundoff errors in DPLL rate rounding. 53 * The higher the scale factor, the greater the risk of arithmetic overflow, 54 * but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR 55 * must be a power of DPLL_SCALE_BASE. 56 */ 57 #define DPLL_SCALE_FACTOR 64 58 #define DPLL_SCALE_BASE 2 59 #define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \ 60 (DPLL_SCALE_FACTOR / DPLL_SCALE_BASE)) 61 62 u8 cpu_mask; 63 64 /*------------------------------------------------------------------------- 65 * Omap2 specific clock functions 66 *-------------------------------------------------------------------------*/ 67 68 /** 69 * omap2_init_clksel_parent - set a clksel clk's parent field from the hardware 70 * @clk: OMAP clock struct ptr to use 71 * 72 * Given a pointer to a source-selectable struct clk, read the hardware 73 * register and determine what its parent is currently set to. Update the 74 * clk->parent field with the appropriate clk ptr. 75 */ 76 void omap2_init_clksel_parent(struct clk *clk) 77 { 78 const struct clksel *clks; 79 const struct clksel_rate *clkr; 80 u32 r, found = 0; 81 82 if (!clk->clksel) 83 return; 84 85 r = __raw_readl(clk->clksel_reg) & clk->clksel_mask; 86 r >>= __ffs(clk->clksel_mask); 87 88 for (clks = clk->clksel; clks->parent && !found; clks++) { 89 for (clkr = clks->rates; clkr->div && !found; clkr++) { 90 if ((clkr->flags & cpu_mask) && (clkr->val == r)) { 91 if (clk->parent != clks->parent) { 92 pr_debug("clock: inited %s parent " 93 "to %s (was %s)\n", 94 clk->name, clks->parent->name, 95 ((clk->parent) ? 96 clk->parent->name : "NULL")); 97 clk->parent = clks->parent; 98 }; 99 found = 1; 100 } 101 } 102 } 103 104 if (!found) 105 printk(KERN_ERR "clock: init parent: could not find " 106 "regval %0x for clock %s\n", r, clk->name); 107 108 return; 109 } 110 111 /* Returns the DPLL rate */ 112 u32 omap2_get_dpll_rate(struct clk *clk) 113 { 114 long long dpll_clk; 115 u32 dpll_mult, dpll_div, dpll; 116 struct dpll_data *dd; 117 118 dd = clk->dpll_data; 119 /* REVISIT: What do we return on error? */ 120 if (!dd) 121 return 0; 122 123 dpll = __raw_readl(dd->mult_div1_reg); 124 dpll_mult = dpll & dd->mult_mask; 125 dpll_mult >>= __ffs(dd->mult_mask); 126 dpll_div = dpll & dd->div1_mask; 127 dpll_div >>= __ffs(dd->div1_mask); 128 129 dpll_clk = (long long)clk->parent->rate * dpll_mult; 130 do_div(dpll_clk, dpll_div + 1); 131 132 return dpll_clk; 133 } 134 135 /* 136 * Used for clocks that have the same value as the parent clock, 137 * divided by some factor 138 */ 139 void omap2_fixed_divisor_recalc(struct clk *clk) 140 { 141 WARN_ON(!clk->fixed_div); 142 143 clk->rate = clk->parent->rate / clk->fixed_div; 144 145 if (clk->flags & RATE_PROPAGATES) 146 propagate_rate(clk); 147 } 148 149 /** 150 * omap2_wait_clock_ready - wait for clock to enable 151 * @reg: physical address of clock IDLEST register 152 * @mask: value to mask against to determine if the clock is active 153 * @name: name of the clock (for printk) 154 * 155 * Returns 1 if the clock enabled in time, or 0 if it failed to enable 156 * in roughly MAX_CLOCK_ENABLE_WAIT microseconds. 157 */ 158 int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name) 159 { 160 int i = 0; 161 int ena = 0; 162 163 /* 164 * 24xx uses 0 to indicate not ready, and 1 to indicate ready. 165 * 34xx reverses this, just to keep us on our toes 166 */ 167 if (cpu_mask & (RATE_IN_242X | RATE_IN_243X)) { 168 ena = mask; 169 } else if (cpu_mask & RATE_IN_343X) { 170 ena = 0; 171 } 172 173 /* Wait for lock */ 174 while (((__raw_readl(reg) & mask) != ena) && 175 (i++ < MAX_CLOCK_ENABLE_WAIT)) { 176 udelay(1); 177 } 178 179 if (i < MAX_CLOCK_ENABLE_WAIT) 180 pr_debug("Clock %s stable after %d loops\n", name, i); 181 else 182 printk(KERN_ERR "Clock %s didn't enable in %d tries\n", 183 name, MAX_CLOCK_ENABLE_WAIT); 184 185 186 return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0; 187 }; 188 189 190 /* 191 * Note: We don't need special code here for INVERT_ENABLE 192 * for the time being since INVERT_ENABLE only applies to clocks enabled by 193 * CM_CLKEN_PLL 194 */ 195 static void omap2_clk_wait_ready(struct clk *clk) 196 { 197 void __iomem *reg, *other_reg, *st_reg; 198 u32 bit; 199 200 /* 201 * REVISIT: This code is pretty ugly. It would be nice to generalize 202 * it and pull it into struct clk itself somehow. 203 */ 204 reg = clk->enable_reg; 205 if ((((u32)reg & 0xff) >= CM_FCLKEN1) && 206 (((u32)reg & 0xff) <= OMAP24XX_CM_FCLKEN2)) 207 other_reg = (void __iomem *)(((u32)reg & ~0xf0) | 0x10); /* CM_ICLKEN* */ 208 else if ((((u32)reg & 0xff) >= CM_ICLKEN1) && 209 (((u32)reg & 0xff) <= OMAP24XX_CM_ICLKEN4)) 210 other_reg = (void __iomem *)(((u32)reg & ~0xf0) | 0x00); /* CM_FCLKEN* */ 211 else 212 return; 213 214 /* REVISIT: What are the appropriate exclusions for 34XX? */ 215 /* No check for DSS or cam clocks */ 216 if (cpu_is_omap24xx() && ((u32)reg & 0x0f) == 0) { /* CM_{F,I}CLKEN1 */ 217 if (clk->enable_bit == OMAP24XX_EN_DSS2_SHIFT || 218 clk->enable_bit == OMAP24XX_EN_DSS1_SHIFT || 219 clk->enable_bit == OMAP24XX_EN_CAM_SHIFT) 220 return; 221 } 222 223 /* REVISIT: What are the appropriate exclusions for 34XX? */ 224 /* OMAP3: ignore DSS-mod clocks */ 225 if (cpu_is_omap34xx() && 226 (((u32)reg & ~0xff) == (u32)OMAP_CM_REGADDR(OMAP3430_DSS_MOD, 0) || 227 ((((u32)reg & ~0xff) == (u32)OMAP_CM_REGADDR(CORE_MOD, 0)) && 228 clk->enable_bit == OMAP3430_EN_SSI_SHIFT))) 229 return; 230 231 /* Check if both functional and interface clocks 232 * are running. */ 233 bit = 1 << clk->enable_bit; 234 if (!(__raw_readl(other_reg) & bit)) 235 return; 236 st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */ 237 238 omap2_wait_clock_ready(st_reg, bit, clk->name); 239 } 240 241 /* Enables clock without considering parent dependencies or use count 242 * REVISIT: Maybe change this to use clk->enable like on omap1? 243 */ 244 int _omap2_clk_enable(struct clk *clk) 245 { 246 u32 regval32; 247 248 if (clk->flags & (ALWAYS_ENABLED | PARENT_CONTROLS_CLOCK)) 249 return 0; 250 251 if (clk->enable) 252 return clk->enable(clk); 253 254 if (unlikely(clk->enable_reg == 0)) { 255 printk(KERN_ERR "clock.c: Enable for %s without enable code\n", 256 clk->name); 257 return 0; /* REVISIT: -EINVAL */ 258 } 259 260 regval32 = __raw_readl(clk->enable_reg); 261 if (clk->flags & INVERT_ENABLE) 262 regval32 &= ~(1 << clk->enable_bit); 263 else 264 regval32 |= (1 << clk->enable_bit); 265 __raw_writel(regval32, clk->enable_reg); 266 wmb(); 267 268 omap2_clk_wait_ready(clk); 269 270 return 0; 271 } 272 273 /* Disables clock without considering parent dependencies or use count */ 274 void _omap2_clk_disable(struct clk *clk) 275 { 276 u32 regval32; 277 278 if (clk->flags & (ALWAYS_ENABLED | PARENT_CONTROLS_CLOCK)) 279 return; 280 281 if (clk->disable) { 282 clk->disable(clk); 283 return; 284 } 285 286 if (clk->enable_reg == 0) { 287 /* 288 * 'Independent' here refers to a clock which is not 289 * controlled by its parent. 290 */ 291 printk(KERN_ERR "clock: clk_disable called on independent " 292 "clock %s which has no enable_reg\n", clk->name); 293 return; 294 } 295 296 regval32 = __raw_readl(clk->enable_reg); 297 if (clk->flags & INVERT_ENABLE) 298 regval32 |= (1 << clk->enable_bit); 299 else 300 regval32 &= ~(1 << clk->enable_bit); 301 __raw_writel(regval32, clk->enable_reg); 302 wmb(); 303 } 304 305 void omap2_clk_disable(struct clk *clk) 306 { 307 if (clk->usecount > 0 && !(--clk->usecount)) { 308 _omap2_clk_disable(clk); 309 if (likely((u32)clk->parent)) 310 omap2_clk_disable(clk->parent); 311 } 312 } 313 314 int omap2_clk_enable(struct clk *clk) 315 { 316 int ret = 0; 317 318 if (clk->usecount++ == 0) { 319 if (likely((u32)clk->parent)) 320 ret = omap2_clk_enable(clk->parent); 321 322 if (unlikely(ret != 0)) { 323 clk->usecount--; 324 return ret; 325 } 326 327 ret = _omap2_clk_enable(clk); 328 329 if (unlikely(ret != 0) && clk->parent) { 330 omap2_clk_disable(clk->parent); 331 clk->usecount--; 332 } 333 } 334 335 return ret; 336 } 337 338 /* 339 * Used for clocks that are part of CLKSEL_xyz governed clocks. 340 * REVISIT: Maybe change to use clk->enable() functions like on omap1? 341 */ 342 void omap2_clksel_recalc(struct clk *clk) 343 { 344 u32 div = 0; 345 346 pr_debug("clock: recalc'ing clksel clk %s\n", clk->name); 347 348 div = omap2_clksel_get_divisor(clk); 349 if (div == 0) 350 return; 351 352 if (unlikely(clk->rate == clk->parent->rate / div)) 353 return; 354 clk->rate = clk->parent->rate / div; 355 356 pr_debug("clock: new clock rate is %ld (div %d)\n", clk->rate, div); 357 358 if (unlikely(clk->flags & RATE_PROPAGATES)) 359 propagate_rate(clk); 360 } 361 362 /** 363 * omap2_get_clksel_by_parent - return clksel struct for a given clk & parent 364 * @clk: OMAP struct clk ptr to inspect 365 * @src_clk: OMAP struct clk ptr of the parent clk to search for 366 * 367 * Scan the struct clksel array associated with the clock to find 368 * the element associated with the supplied parent clock address. 369 * Returns a pointer to the struct clksel on success or NULL on error. 370 */ 371 const struct clksel *omap2_get_clksel_by_parent(struct clk *clk, 372 struct clk *src_clk) 373 { 374 const struct clksel *clks; 375 376 if (!clk->clksel) 377 return NULL; 378 379 for (clks = clk->clksel; clks->parent; clks++) { 380 if (clks->parent == src_clk) 381 break; /* Found the requested parent */ 382 } 383 384 if (!clks->parent) { 385 printk(KERN_ERR "clock: Could not find parent clock %s in " 386 "clksel array of clock %s\n", src_clk->name, 387 clk->name); 388 return NULL; 389 } 390 391 return clks; 392 } 393 394 /** 395 * omap2_clksel_round_rate_div - find divisor for the given clock and rate 396 * @clk: OMAP struct clk to use 397 * @target_rate: desired clock rate 398 * @new_div: ptr to where we should store the divisor 399 * 400 * Finds 'best' divider value in an array based on the source and target 401 * rates. The divider array must be sorted with smallest divider first. 402 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT, 403 * they are only settable as part of virtual_prcm set. 404 * 405 * Returns the rounded clock rate or returns 0xffffffff on error. 406 */ 407 u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate, 408 u32 *new_div) 409 { 410 unsigned long test_rate; 411 const struct clksel *clks; 412 const struct clksel_rate *clkr; 413 u32 last_div = 0; 414 415 printk(KERN_INFO "clock: clksel_round_rate_div: %s target_rate %ld\n", 416 clk->name, target_rate); 417 418 *new_div = 1; 419 420 clks = omap2_get_clksel_by_parent(clk, clk->parent); 421 if (clks == NULL) 422 return ~0; 423 424 for (clkr = clks->rates; clkr->div; clkr++) { 425 if (!(clkr->flags & cpu_mask)) 426 continue; 427 428 /* Sanity check */ 429 if (clkr->div <= last_div) 430 printk(KERN_ERR "clock: clksel_rate table not sorted " 431 "for clock %s", clk->name); 432 433 last_div = clkr->div; 434 435 test_rate = clk->parent->rate / clkr->div; 436 437 if (test_rate <= target_rate) 438 break; /* found it */ 439 } 440 441 if (!clkr->div) { 442 printk(KERN_ERR "clock: Could not find divisor for target " 443 "rate %ld for clock %s parent %s\n", target_rate, 444 clk->name, clk->parent->name); 445 return ~0; 446 } 447 448 *new_div = clkr->div; 449 450 printk(KERN_INFO "clock: new_div = %d, new_rate = %ld\n", *new_div, 451 (clk->parent->rate / clkr->div)); 452 453 return (clk->parent->rate / clkr->div); 454 } 455 456 /** 457 * omap2_clksel_round_rate - find rounded rate for the given clock and rate 458 * @clk: OMAP struct clk to use 459 * @target_rate: desired clock rate 460 * 461 * Compatibility wrapper for OMAP clock framework 462 * Finds best target rate based on the source clock and possible dividers. 463 * rates. The divider array must be sorted with smallest divider first. 464 * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT, 465 * they are only settable as part of virtual_prcm set. 466 * 467 * Returns the rounded clock rate or returns 0xffffffff on error. 468 */ 469 long omap2_clksel_round_rate(struct clk *clk, unsigned long target_rate) 470 { 471 u32 new_div; 472 473 return omap2_clksel_round_rate_div(clk, target_rate, &new_div); 474 } 475 476 477 /* Given a clock and a rate apply a clock specific rounding function */ 478 long omap2_clk_round_rate(struct clk *clk, unsigned long rate) 479 { 480 if (clk->round_rate != 0) 481 return clk->round_rate(clk, rate); 482 483 if (clk->flags & RATE_FIXED) 484 printk(KERN_ERR "clock: generic omap2_clk_round_rate called " 485 "on fixed-rate clock %s\n", clk->name); 486 487 return clk->rate; 488 } 489 490 /** 491 * omap2_clksel_to_divisor() - turn clksel field value into integer divider 492 * @clk: OMAP struct clk to use 493 * @field_val: register field value to find 494 * 495 * Given a struct clk of a rate-selectable clksel clock, and a register field 496 * value to search for, find the corresponding clock divisor. The register 497 * field value should be pre-masked and shifted down so the LSB is at bit 0 498 * before calling. Returns 0 on error 499 */ 500 u32 omap2_clksel_to_divisor(struct clk *clk, u32 field_val) 501 { 502 const struct clksel *clks; 503 const struct clksel_rate *clkr; 504 505 clks = omap2_get_clksel_by_parent(clk, clk->parent); 506 if (clks == NULL) 507 return 0; 508 509 for (clkr = clks->rates; clkr->div; clkr++) { 510 if ((clkr->flags & cpu_mask) && (clkr->val == field_val)) 511 break; 512 } 513 514 if (!clkr->div) { 515 printk(KERN_ERR "clock: Could not find fieldval %d for " 516 "clock %s parent %s\n", field_val, clk->name, 517 clk->parent->name); 518 return 0; 519 } 520 521 return clkr->div; 522 } 523 524 /** 525 * omap2_divisor_to_clksel() - turn clksel integer divisor into a field value 526 * @clk: OMAP struct clk to use 527 * @div: integer divisor to search for 528 * 529 * Given a struct clk of a rate-selectable clksel clock, and a clock divisor, 530 * find the corresponding register field value. The return register value is 531 * the value before left-shifting. Returns 0xffffffff on error 532 */ 533 u32 omap2_divisor_to_clksel(struct clk *clk, u32 div) 534 { 535 const struct clksel *clks; 536 const struct clksel_rate *clkr; 537 538 /* should never happen */ 539 WARN_ON(div == 0); 540 541 clks = omap2_get_clksel_by_parent(clk, clk->parent); 542 if (clks == NULL) 543 return 0; 544 545 for (clkr = clks->rates; clkr->div; clkr++) { 546 if ((clkr->flags & cpu_mask) && (clkr->div == div)) 547 break; 548 } 549 550 if (!clkr->div) { 551 printk(KERN_ERR "clock: Could not find divisor %d for " 552 "clock %s parent %s\n", div, clk->name, 553 clk->parent->name); 554 return 0; 555 } 556 557 return clkr->val; 558 } 559 560 /** 561 * omap2_get_clksel - find clksel register addr & field mask for a clk 562 * @clk: struct clk to use 563 * @field_mask: ptr to u32 to store the register field mask 564 * 565 * Returns the address of the clksel register upon success or NULL on error. 566 */ 567 void __iomem *omap2_get_clksel(struct clk *clk, u32 *field_mask) 568 { 569 if (unlikely((clk->clksel_reg == 0) || (clk->clksel_mask == 0))) 570 return NULL; 571 572 *field_mask = clk->clksel_mask; 573 574 return clk->clksel_reg; 575 } 576 577 /** 578 * omap2_clksel_get_divisor - get current divider applied to parent clock. 579 * @clk: OMAP struct clk to use. 580 * 581 * Returns the integer divisor upon success or 0 on error. 582 */ 583 u32 omap2_clksel_get_divisor(struct clk *clk) 584 { 585 u32 field_mask, field_val; 586 void __iomem *div_addr; 587 588 div_addr = omap2_get_clksel(clk, &field_mask); 589 if (div_addr == 0) 590 return 0; 591 592 field_val = __raw_readl(div_addr) & field_mask; 593 field_val >>= __ffs(field_mask); 594 595 return omap2_clksel_to_divisor(clk, field_val); 596 } 597 598 int omap2_clksel_set_rate(struct clk *clk, unsigned long rate) 599 { 600 u32 field_mask, field_val, reg_val, validrate, new_div = 0; 601 void __iomem *div_addr; 602 603 validrate = omap2_clksel_round_rate_div(clk, rate, &new_div); 604 if (validrate != rate) 605 return -EINVAL; 606 607 div_addr = omap2_get_clksel(clk, &field_mask); 608 if (div_addr == 0) 609 return -EINVAL; 610 611 field_val = omap2_divisor_to_clksel(clk, new_div); 612 if (field_val == ~0) 613 return -EINVAL; 614 615 reg_val = __raw_readl(div_addr); 616 reg_val &= ~field_mask; 617 reg_val |= (field_val << __ffs(field_mask)); 618 __raw_writel(reg_val, div_addr); 619 wmb(); 620 621 clk->rate = clk->parent->rate / new_div; 622 623 if (clk->flags & DELAYED_APP && cpu_is_omap24xx()) { 624 prm_write_mod_reg(OMAP24XX_VALID_CONFIG, 625 OMAP24XX_GR_MOD, OMAP24XX_PRCM_CLKCFG_CTRL_OFFSET); 626 wmb(); 627 } 628 629 return 0; 630 } 631 632 633 /* Set the clock rate for a clock source */ 634 int omap2_clk_set_rate(struct clk *clk, unsigned long rate) 635 { 636 int ret = -EINVAL; 637 638 pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate); 639 640 /* CONFIG_PARTICIPANT clocks are changed only in sets via the 641 rate table mechanism, driven by mpu_speed */ 642 if (clk->flags & CONFIG_PARTICIPANT) 643 return -EINVAL; 644 645 /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */ 646 if (clk->set_rate != 0) 647 ret = clk->set_rate(clk, rate); 648 649 if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES))) 650 propagate_rate(clk); 651 652 return ret; 653 } 654 655 /* 656 * Converts encoded control register address into a full address 657 * On error, *src_addr will be returned as 0. 658 */ 659 static u32 omap2_clksel_get_src_field(void __iomem **src_addr, 660 struct clk *src_clk, u32 *field_mask, 661 struct clk *clk, u32 *parent_div) 662 { 663 const struct clksel *clks; 664 const struct clksel_rate *clkr; 665 666 *parent_div = 0; 667 *src_addr = 0; 668 669 clks = omap2_get_clksel_by_parent(clk, src_clk); 670 if (clks == NULL) 671 return 0; 672 673 for (clkr = clks->rates; clkr->div; clkr++) { 674 if (clkr->flags & (cpu_mask | DEFAULT_RATE)) 675 break; /* Found the default rate for this platform */ 676 } 677 678 if (!clkr->div) { 679 printk(KERN_ERR "clock: Could not find default rate for " 680 "clock %s parent %s\n", clk->name, 681 src_clk->parent->name); 682 return 0; 683 } 684 685 /* Should never happen. Add a clksel mask to the struct clk. */ 686 WARN_ON(clk->clksel_mask == 0); 687 688 *field_mask = clk->clksel_mask; 689 *src_addr = clk->clksel_reg; 690 *parent_div = clkr->div; 691 692 return clkr->val; 693 } 694 695 int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent) 696 { 697 void __iomem *src_addr; 698 u32 field_val, field_mask, reg_val, parent_div; 699 700 if (unlikely(clk->flags & CONFIG_PARTICIPANT)) 701 return -EINVAL; 702 703 if (!clk->clksel) 704 return -EINVAL; 705 706 field_val = omap2_clksel_get_src_field(&src_addr, new_parent, 707 &field_mask, clk, &parent_div); 708 if (src_addr == 0) 709 return -EINVAL; 710 711 if (clk->usecount > 0) 712 _omap2_clk_disable(clk); 713 714 /* Set new source value (previous dividers if any in effect) */ 715 reg_val = __raw_readl(src_addr) & ~field_mask; 716 reg_val |= (field_val << __ffs(field_mask)); 717 __raw_writel(reg_val, src_addr); 718 wmb(); 719 720 if (clk->flags & DELAYED_APP && cpu_is_omap24xx()) { 721 __raw_writel(OMAP24XX_VALID_CONFIG, OMAP24XX_PRCM_CLKCFG_CTRL); 722 wmb(); 723 } 724 725 if (clk->usecount > 0) 726 _omap2_clk_enable(clk); 727 728 clk->parent = new_parent; 729 730 /* CLKSEL clocks follow their parents' rates, divided by a divisor */ 731 clk->rate = new_parent->rate; 732 733 if (parent_div > 0) 734 clk->rate /= parent_div; 735 736 pr_debug("clock: set parent of %s to %s (new rate %ld)\n", 737 clk->name, clk->parent->name, clk->rate); 738 739 if (unlikely(clk->flags & RATE_PROPAGATES)) 740 propagate_rate(clk); 741 742 return 0; 743 } 744 745 /* DPLL rate rounding code */ 746 747 /** 748 * omap2_dpll_set_rate_tolerance: set the error tolerance during rate rounding 749 * @clk: struct clk * of the DPLL 750 * @tolerance: maximum rate error tolerance 751 * 752 * Set the maximum DPLL rate error tolerance for the rate rounding 753 * algorithm. The rate tolerance is an attempt to balance DPLL power 754 * saving (the least divider value "n") vs. rate fidelity (the least 755 * difference between the desired DPLL target rate and the rounded 756 * rate out of the algorithm). So, increasing the tolerance is likely 757 * to decrease DPLL power consumption and increase DPLL rate error. 758 * Returns -EINVAL if provided a null clock ptr or a clk that is not a 759 * DPLL; or 0 upon success. 760 */ 761 int omap2_dpll_set_rate_tolerance(struct clk *clk, unsigned int tolerance) 762 { 763 if (!clk || !clk->dpll_data) 764 return -EINVAL; 765 766 clk->dpll_data->rate_tolerance = tolerance; 767 768 return 0; 769 } 770 771 static unsigned long _dpll_compute_new_rate(unsigned long parent_rate, unsigned int m, unsigned int n) 772 { 773 unsigned long long num; 774 775 num = (unsigned long long)parent_rate * m; 776 do_div(num, n); 777 return num; 778 } 779 780 /* 781 * _dpll_test_mult - test a DPLL multiplier value 782 * @m: pointer to the DPLL m (multiplier) value under test 783 * @n: current DPLL n (divider) value under test 784 * @new_rate: pointer to storage for the resulting rounded rate 785 * @target_rate: the desired DPLL rate 786 * @parent_rate: the DPLL's parent clock rate 787 * 788 * This code tests a DPLL multiplier value, ensuring that the 789 * resulting rate will not be higher than the target_rate, and that 790 * the multiplier value itself is valid for the DPLL. Initially, the 791 * integer pointed to by the m argument should be prescaled by 792 * multiplying by DPLL_SCALE_FACTOR. The code will replace this with 793 * a non-scaled m upon return. This non-scaled m will result in a 794 * new_rate as close as possible to target_rate (but not greater than 795 * target_rate) given the current (parent_rate, n, prescaled m) 796 * triple. Returns DPLL_MULT_UNDERFLOW in the event that the 797 * non-scaled m attempted to underflow, which can allow the calling 798 * function to bail out early; or 0 upon success. 799 */ 800 static int _dpll_test_mult(int *m, int n, unsigned long *new_rate, 801 unsigned long target_rate, 802 unsigned long parent_rate) 803 { 804 int flags = 0, carry = 0; 805 806 /* Unscale m and round if necessary */ 807 if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL) 808 carry = 1; 809 *m = (*m / DPLL_SCALE_FACTOR) + carry; 810 811 /* 812 * The new rate must be <= the target rate to avoid programming 813 * a rate that is impossible for the hardware to handle 814 */ 815 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); 816 if (*new_rate > target_rate) { 817 (*m)--; 818 *new_rate = 0; 819 } 820 821 /* Guard against m underflow */ 822 if (*m < DPLL_MIN_MULTIPLIER) { 823 *m = DPLL_MIN_MULTIPLIER; 824 *new_rate = 0; 825 flags = DPLL_MULT_UNDERFLOW; 826 } 827 828 if (*new_rate == 0) 829 *new_rate = _dpll_compute_new_rate(parent_rate, *m, n); 830 831 return flags; 832 } 833 834 /** 835 * omap2_dpll_round_rate - round a target rate for an OMAP DPLL 836 * @clk: struct clk * for a DPLL 837 * @target_rate: desired DPLL clock rate 838 * 839 * Given a DPLL, a desired target rate, and a rate tolerance, round 840 * the target rate to a possible, programmable rate for this DPLL. 841 * Rate tolerance is assumed to be set by the caller before this 842 * function is called. Attempts to select the minimum possible n 843 * within the tolerance to reduce power consumption. Stores the 844 * computed (m, n) in the DPLL's dpll_data structure so set_rate() 845 * will not need to call this (expensive) function again. Returns ~0 846 * if the target rate cannot be rounded, either because the rate is 847 * too low or because the rate tolerance is set too tightly; or the 848 * rounded rate upon success. 849 */ 850 long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate) 851 { 852 int m, n, r, e, scaled_max_m; 853 unsigned long scaled_rt_rp, new_rate; 854 int min_e = -1, min_e_m = -1, min_e_n = -1; 855 856 if (!clk || !clk->dpll_data) 857 return ~0; 858 859 pr_debug("clock: starting DPLL round_rate for clock %s, target rate " 860 "%ld\n", clk->name, target_rate); 861 862 scaled_rt_rp = target_rate / (clk->parent->rate / DPLL_SCALE_FACTOR); 863 scaled_max_m = clk->dpll_data->max_multiplier * DPLL_SCALE_FACTOR; 864 865 clk->dpll_data->last_rounded_rate = 0; 866 867 for (n = clk->dpll_data->max_divider; n >= DPLL_MIN_DIVIDER; n--) { 868 869 /* Compute the scaled DPLL multiplier, based on the divider */ 870 m = scaled_rt_rp * n; 871 872 /* 873 * Since we're counting n down, a m overflow means we can 874 * can immediately skip to the next n 875 */ 876 if (m > scaled_max_m) 877 continue; 878 879 r = _dpll_test_mult(&m, n, &new_rate, target_rate, 880 clk->parent->rate); 881 882 e = target_rate - new_rate; 883 pr_debug("clock: n = %d: m = %d: rate error is %d " 884 "(new_rate = %ld)\n", n, m, e, new_rate); 885 886 if (min_e == -1 || 887 min_e >= (int)(abs(e) - clk->dpll_data->rate_tolerance)) { 888 min_e = e; 889 min_e_m = m; 890 min_e_n = n; 891 892 pr_debug("clock: found new least error %d\n", min_e); 893 } 894 895 /* 896 * Since we're counting n down, a m underflow means we 897 * can bail out completely (since as n decreases in 898 * the next iteration, there's no way that m can 899 * increase beyond the current m) 900 */ 901 if (r & DPLL_MULT_UNDERFLOW) 902 break; 903 } 904 905 if (min_e < 0) { 906 pr_debug("clock: error: target rate or tolerance too low\n"); 907 return ~0; 908 } 909 910 clk->dpll_data->last_rounded_m = min_e_m; 911 clk->dpll_data->last_rounded_n = min_e_n; 912 clk->dpll_data->last_rounded_rate = 913 _dpll_compute_new_rate(clk->parent->rate, min_e_m, min_e_n); 914 915 pr_debug("clock: final least error: e = %d, m = %d, n = %d\n", 916 min_e, min_e_m, min_e_n); 917 pr_debug("clock: final rate: %ld (target rate: %ld)\n", 918 clk->dpll_data->last_rounded_rate, target_rate); 919 920 return clk->dpll_data->last_rounded_rate; 921 } 922 923 /*------------------------------------------------------------------------- 924 * Omap2 clock reset and init functions 925 *-------------------------------------------------------------------------*/ 926 927 #ifdef CONFIG_OMAP_RESET_CLOCKS 928 void omap2_clk_disable_unused(struct clk *clk) 929 { 930 u32 regval32, v; 931 932 v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0; 933 934 regval32 = __raw_readl(clk->enable_reg); 935 if ((regval32 & (1 << clk->enable_bit)) == v) 936 return; 937 938 printk(KERN_INFO "Disabling unused clock \"%s\"\n", clk->name); 939 _omap2_clk_disable(clk); 940 } 941 #endif 942