1 /* 2 * Copyright (C) 2013 Broadcom Corporation 3 * Copyright 2013 Linaro Limited 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation version 2. 8 * 9 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 * kind, whether express or implied; without even the implied warranty 11 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 */ 14 15 #include "clk-kona.h" 16 17 #include <linux/delay.h> 18 19 /* 20 * "Policies" affect the frequencies of bus clocks provided by a 21 * CCU. (I believe these polices are named "Deep Sleep", "Economy", 22 * "Normal", and "Turbo".) A lower policy number has lower power 23 * consumption, and policy 2 is the default. 24 */ 25 #define CCU_POLICY_COUNT 4 26 27 #define CCU_ACCESS_PASSWORD 0xA5A500 28 #define CLK_GATE_DELAY_LOOP 2000 29 30 /* Bitfield operations */ 31 32 /* Produces a mask of set bits covering a range of a 32-bit value */ 33 static inline u32 bitfield_mask(u32 shift, u32 width) 34 { 35 return ((1 << width) - 1) << shift; 36 } 37 38 /* Extract the value of a bitfield found within a given register value */ 39 static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width) 40 { 41 return (reg_val & bitfield_mask(shift, width)) >> shift; 42 } 43 44 /* Replace the value of a bitfield found within a given register value */ 45 static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val) 46 { 47 u32 mask = bitfield_mask(shift, width); 48 49 return (reg_val & ~mask) | (val << shift); 50 } 51 52 /* Divider and scaling helpers */ 53 54 /* 55 * Implement DIV_ROUND_CLOSEST() for 64-bit dividend and both values 56 * unsigned. Note that unlike do_div(), the remainder is discarded 57 * and the return value is the quotient (not the remainder). 58 */ 59 u64 do_div_round_closest(u64 dividend, unsigned long divisor) 60 { 61 u64 result; 62 63 result = dividend + ((u64)divisor >> 1); 64 (void)do_div(result, divisor); 65 66 return result; 67 } 68 69 /* Convert a divider into the scaled divisor value it represents. */ 70 static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) 71 { 72 return (u64)reg_div + ((u64)1 << div->u.s.frac_width); 73 } 74 75 /* 76 * Build a scaled divider value as close as possible to the 77 * given whole part (div_value) and fractional part (expressed 78 * in billionths). 79 */ 80 u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) 81 { 82 u64 combined; 83 84 BUG_ON(!div_value); 85 BUG_ON(billionths >= BILLION); 86 87 combined = (u64)div_value * BILLION + billionths; 88 combined <<= div->u.s.frac_width; 89 90 return do_div_round_closest(combined, BILLION); 91 } 92 93 /* The scaled minimum divisor representable by a divider */ 94 static inline u64 95 scaled_div_min(struct bcm_clk_div *div) 96 { 97 if (divider_is_fixed(div)) 98 return (u64)div->u.fixed; 99 100 return scaled_div_value(div, 0); 101 } 102 103 /* The scaled maximum divisor representable by a divider */ 104 u64 scaled_div_max(struct bcm_clk_div *div) 105 { 106 u32 reg_div; 107 108 if (divider_is_fixed(div)) 109 return (u64)div->u.fixed; 110 111 reg_div = ((u32)1 << div->u.s.width) - 1; 112 113 return scaled_div_value(div, reg_div); 114 } 115 116 /* 117 * Convert a scaled divisor into its divider representation as 118 * stored in a divider register field. 119 */ 120 static inline u32 121 divider(struct bcm_clk_div *div, u64 scaled_div) 122 { 123 BUG_ON(scaled_div < scaled_div_min(div)); 124 BUG_ON(scaled_div > scaled_div_max(div)); 125 126 return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); 127 } 128 129 /* Return a rate scaled for use when dividing by a scaled divisor. */ 130 static inline u64 131 scale_rate(struct bcm_clk_div *div, u32 rate) 132 { 133 if (divider_is_fixed(div)) 134 return (u64)rate; 135 136 return (u64)rate << div->u.s.frac_width; 137 } 138 139 /* CCU access */ 140 141 /* Read a 32-bit register value from a CCU's address space. */ 142 static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset) 143 { 144 return readl(ccu->base + reg_offset); 145 } 146 147 /* Write a 32-bit register value into a CCU's address space. */ 148 static inline void 149 __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val) 150 { 151 writel(reg_val, ccu->base + reg_offset); 152 } 153 154 static inline unsigned long ccu_lock(struct ccu_data *ccu) 155 { 156 unsigned long flags; 157 158 spin_lock_irqsave(&ccu->lock, flags); 159 160 return flags; 161 } 162 static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags) 163 { 164 spin_unlock_irqrestore(&ccu->lock, flags); 165 } 166 167 /* 168 * Enable/disable write access to CCU protected registers. The 169 * WR_ACCESS register for all CCUs is at offset 0. 170 */ 171 static inline void __ccu_write_enable(struct ccu_data *ccu) 172 { 173 if (ccu->write_enabled) { 174 pr_err("%s: access already enabled for %s\n", __func__, 175 ccu->name); 176 return; 177 } 178 ccu->write_enabled = true; 179 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1); 180 } 181 182 static inline void __ccu_write_disable(struct ccu_data *ccu) 183 { 184 if (!ccu->write_enabled) { 185 pr_err("%s: access wasn't enabled for %s\n", __func__, 186 ccu->name); 187 return; 188 } 189 190 __ccu_write(ccu, 0, CCU_ACCESS_PASSWORD); 191 ccu->write_enabled = false; 192 } 193 194 /* 195 * Poll a register in a CCU's address space, returning when the 196 * specified bit in that register's value is set (or clear). Delay 197 * a microsecond after each read of the register. Returns true if 198 * successful, or false if we gave up trying. 199 * 200 * Caller must ensure the CCU lock is held. 201 */ 202 static inline bool 203 __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want) 204 { 205 unsigned int tries; 206 u32 bit_mask = 1 << bit; 207 208 for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) { 209 u32 val; 210 bool bit_val; 211 212 val = __ccu_read(ccu, reg_offset); 213 bit_val = (val & bit_mask) != 0; 214 if (bit_val == want) 215 return true; 216 udelay(1); 217 } 218 pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__, 219 ccu->name, reg_offset, bit, want ? "set" : "clear"); 220 221 return false; 222 } 223 224 /* Policy operations */ 225 226 static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync) 227 { 228 struct bcm_policy_ctl *control = &ccu->policy.control; 229 u32 offset; 230 u32 go_bit; 231 u32 mask; 232 bool ret; 233 234 /* If we don't need to control policy for this CCU, we're done. */ 235 if (!policy_ctl_exists(control)) 236 return true; 237 238 offset = control->offset; 239 go_bit = control->go_bit; 240 241 /* Ensure we're not busy before we start */ 242 ret = __ccu_wait_bit(ccu, offset, go_bit, false); 243 if (!ret) { 244 pr_err("%s: ccu %s policy engine wouldn't go idle\n", 245 __func__, ccu->name); 246 return false; 247 } 248 249 /* 250 * If it's a synchronous request, we'll wait for the voltage 251 * and frequency of the active load to stabilize before 252 * returning. To do this we select the active load by 253 * setting the ATL bit. 254 * 255 * An asynchronous request instead ramps the voltage in the 256 * background, and when that process stabilizes, the target 257 * load is copied to the active load and the CCU frequency 258 * is switched. We do this by selecting the target load 259 * (ATL bit clear) and setting the request auto-copy (AC bit 260 * set). 261 * 262 * Note, we do NOT read-modify-write this register. 263 */ 264 mask = (u32)1 << go_bit; 265 if (sync) 266 mask |= 1 << control->atl_bit; 267 else 268 mask |= 1 << control->ac_bit; 269 __ccu_write(ccu, offset, mask); 270 271 /* Wait for indication that operation is complete. */ 272 ret = __ccu_wait_bit(ccu, offset, go_bit, false); 273 if (!ret) 274 pr_err("%s: ccu %s policy engine never started\n", 275 __func__, ccu->name); 276 277 return ret; 278 } 279 280 static bool __ccu_policy_engine_stop(struct ccu_data *ccu) 281 { 282 struct bcm_lvm_en *enable = &ccu->policy.enable; 283 u32 offset; 284 u32 enable_bit; 285 bool ret; 286 287 /* If we don't need to control policy for this CCU, we're done. */ 288 if (!policy_lvm_en_exists(enable)) 289 return true; 290 291 /* Ensure we're not busy before we start */ 292 offset = enable->offset; 293 enable_bit = enable->bit; 294 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); 295 if (!ret) { 296 pr_err("%s: ccu %s policy engine already stopped\n", 297 __func__, ccu->name); 298 return false; 299 } 300 301 /* Now set the bit to stop the engine (NO read-modify-write) */ 302 __ccu_write(ccu, offset, (u32)1 << enable_bit); 303 304 /* Wait for indication that it has stopped. */ 305 ret = __ccu_wait_bit(ccu, offset, enable_bit, false); 306 if (!ret) 307 pr_err("%s: ccu %s policy engine never stopped\n", 308 __func__, ccu->name); 309 310 return ret; 311 } 312 313 /* 314 * A CCU has four operating conditions ("policies"), and some clocks 315 * can be disabled or enabled based on which policy is currently in 316 * effect. Such clocks have a bit in a "policy mask" register for 317 * each policy indicating whether the clock is enabled for that 318 * policy or not. The bit position for a clock is the same for all 319 * four registers, and the 32-bit registers are at consecutive 320 * addresses. 321 */ 322 static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy) 323 { 324 u32 offset; 325 u32 mask; 326 int i; 327 bool ret; 328 329 if (!policy_exists(policy)) 330 return true; 331 332 /* 333 * We need to stop the CCU policy engine to allow update 334 * of our policy bits. 335 */ 336 if (!__ccu_policy_engine_stop(ccu)) { 337 pr_err("%s: unable to stop CCU %s policy engine\n", 338 __func__, ccu->name); 339 return false; 340 } 341 342 /* 343 * For now, if a clock defines its policy bit we just mark 344 * it "enabled" for all four policies. 345 */ 346 offset = policy->offset; 347 mask = (u32)1 << policy->bit; 348 for (i = 0; i < CCU_POLICY_COUNT; i++) { 349 u32 reg_val; 350 351 reg_val = __ccu_read(ccu, offset); 352 reg_val |= mask; 353 __ccu_write(ccu, offset, reg_val); 354 offset += sizeof(u32); 355 } 356 357 /* We're done updating; fire up the policy engine again. */ 358 ret = __ccu_policy_engine_start(ccu, true); 359 if (!ret) 360 pr_err("%s: unable to restart CCU %s policy engine\n", 361 __func__, ccu->name); 362 363 return ret; 364 } 365 366 /* Gate operations */ 367 368 /* Determine whether a clock is gated. CCU lock must be held. */ 369 static bool 370 __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) 371 { 372 u32 bit_mask; 373 u32 reg_val; 374 375 /* If there is no gate we can assume it's enabled. */ 376 if (!gate_exists(gate)) 377 return true; 378 379 bit_mask = 1 << gate->status_bit; 380 reg_val = __ccu_read(ccu, gate->offset); 381 382 return (reg_val & bit_mask) != 0; 383 } 384 385 /* Determine whether a clock is gated. */ 386 static bool 387 is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate) 388 { 389 long flags; 390 bool ret; 391 392 /* Avoid taking the lock if we can */ 393 if (!gate_exists(gate)) 394 return true; 395 396 flags = ccu_lock(ccu); 397 ret = __is_clk_gate_enabled(ccu, gate); 398 ccu_unlock(ccu, flags); 399 400 return ret; 401 } 402 403 /* 404 * Commit our desired gate state to the hardware. 405 * Returns true if successful, false otherwise. 406 */ 407 static bool 408 __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate) 409 { 410 u32 reg_val; 411 u32 mask; 412 bool enabled = false; 413 414 BUG_ON(!gate_exists(gate)); 415 if (!gate_is_sw_controllable(gate)) 416 return true; /* Nothing we can change */ 417 418 reg_val = __ccu_read(ccu, gate->offset); 419 420 /* For a hardware/software gate, set which is in control */ 421 if (gate_is_hw_controllable(gate)) { 422 mask = (u32)1 << gate->hw_sw_sel_bit; 423 if (gate_is_sw_managed(gate)) 424 reg_val |= mask; 425 else 426 reg_val &= ~mask; 427 } 428 429 /* 430 * If software is in control, enable or disable the gate. 431 * If hardware is, clear the enabled bit for good measure. 432 * If a software controlled gate can't be disabled, we're 433 * required to write a 0 into the enable bit (but the gate 434 * will be enabled). 435 */ 436 mask = (u32)1 << gate->en_bit; 437 if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) && 438 !gate_is_no_disable(gate)) 439 reg_val |= mask; 440 else 441 reg_val &= ~mask; 442 443 __ccu_write(ccu, gate->offset, reg_val); 444 445 /* For a hardware controlled gate, we're done */ 446 if (!gate_is_sw_managed(gate)) 447 return true; 448 449 /* Otherwise wait for the gate to be in desired state */ 450 return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled); 451 } 452 453 /* 454 * Initialize a gate. Our desired state (hardware/software select, 455 * and if software, its enable state) is committed to hardware 456 * without the usual checks to see if it's already set up that way. 457 * Returns true if successful, false otherwise. 458 */ 459 static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate) 460 { 461 if (!gate_exists(gate)) 462 return true; 463 return __gate_commit(ccu, gate); 464 } 465 466 /* 467 * Set a gate to enabled or disabled state. Does nothing if the 468 * gate is not currently under software control, or if it is already 469 * in the requested state. Returns true if successful, false 470 * otherwise. CCU lock must be held. 471 */ 472 static bool 473 __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable) 474 { 475 bool ret; 476 477 if (!gate_exists(gate) || !gate_is_sw_managed(gate)) 478 return true; /* Nothing to do */ 479 480 if (!enable && gate_is_no_disable(gate)) { 481 pr_warn("%s: invalid gate disable request (ignoring)\n", 482 __func__); 483 return true; 484 } 485 486 if (enable == gate_is_enabled(gate)) 487 return true; /* No change */ 488 489 gate_flip_enabled(gate); 490 ret = __gate_commit(ccu, gate); 491 if (!ret) 492 gate_flip_enabled(gate); /* Revert the change */ 493 494 return ret; 495 } 496 497 /* Enable or disable a gate. Returns 0 if successful, -EIO otherwise */ 498 static int clk_gate(struct ccu_data *ccu, const char *name, 499 struct bcm_clk_gate *gate, bool enable) 500 { 501 unsigned long flags; 502 bool success; 503 504 /* 505 * Avoid taking the lock if we can. We quietly ignore 506 * requests to change state that don't make sense. 507 */ 508 if (!gate_exists(gate) || !gate_is_sw_managed(gate)) 509 return 0; 510 if (!enable && gate_is_no_disable(gate)) 511 return 0; 512 513 flags = ccu_lock(ccu); 514 __ccu_write_enable(ccu); 515 516 success = __clk_gate(ccu, gate, enable); 517 518 __ccu_write_disable(ccu); 519 ccu_unlock(ccu, flags); 520 521 if (success) 522 return 0; 523 524 pr_err("%s: failed to %s gate for %s\n", __func__, 525 enable ? "enable" : "disable", name); 526 527 return -EIO; 528 } 529 530 /* Hysteresis operations */ 531 532 /* 533 * If a clock gate requires a turn-off delay it will have 534 * "hysteresis" register bits defined. The first, if set, enables 535 * the delay; and if enabled, the second bit determines whether the 536 * delay is "low" or "high" (1 means high). For now, if it's 537 * defined for a clock, we set it. 538 */ 539 static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst) 540 { 541 u32 offset; 542 u32 reg_val; 543 u32 mask; 544 545 if (!hyst_exists(hyst)) 546 return true; 547 548 offset = hyst->offset; 549 mask = (u32)1 << hyst->en_bit; 550 mask |= (u32)1 << hyst->val_bit; 551 552 reg_val = __ccu_read(ccu, offset); 553 reg_val |= mask; 554 __ccu_write(ccu, offset, reg_val); 555 556 return true; 557 } 558 559 /* Trigger operations */ 560 561 /* 562 * Caller must ensure CCU lock is held and access is enabled. 563 * Returns true if successful, false otherwise. 564 */ 565 static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig) 566 { 567 /* Trigger the clock and wait for it to finish */ 568 __ccu_write(ccu, trig->offset, 1 << trig->bit); 569 570 return __ccu_wait_bit(ccu, trig->offset, trig->bit, false); 571 } 572 573 /* Divider operations */ 574 575 /* Read a divider value and return the scaled divisor it represents. */ 576 static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) 577 { 578 unsigned long flags; 579 u32 reg_val; 580 u32 reg_div; 581 582 if (divider_is_fixed(div)) 583 return (u64)div->u.fixed; 584 585 flags = ccu_lock(ccu); 586 reg_val = __ccu_read(ccu, div->u.s.offset); 587 ccu_unlock(ccu, flags); 588 589 /* Extract the full divider field from the register value */ 590 reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); 591 592 /* Return the scaled divisor value it represents */ 593 return scaled_div_value(div, reg_div); 594 } 595 596 /* 597 * Convert a divider's scaled divisor value into its recorded form 598 * and commit it into the hardware divider register. 599 * 600 * Returns 0 on success. Returns -EINVAL for invalid arguments. 601 * Returns -ENXIO if gating failed, and -EIO if a trigger failed. 602 */ 603 static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, 604 struct bcm_clk_div *div, struct bcm_clk_trig *trig) 605 { 606 bool enabled; 607 u32 reg_div; 608 u32 reg_val; 609 int ret = 0; 610 611 BUG_ON(divider_is_fixed(div)); 612 613 /* 614 * If we're just initializing the divider, and no initial 615 * state was defined in the device tree, we just find out 616 * what its current value is rather than updating it. 617 */ 618 if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { 619 reg_val = __ccu_read(ccu, div->u.s.offset); 620 reg_div = bitfield_extract(reg_val, div->u.s.shift, 621 div->u.s.width); 622 div->u.s.scaled_div = scaled_div_value(div, reg_div); 623 624 return 0; 625 } 626 627 /* Convert the scaled divisor to the value we need to record */ 628 reg_div = divider(div, div->u.s.scaled_div); 629 630 /* Clock needs to be enabled before changing the rate */ 631 enabled = __is_clk_gate_enabled(ccu, gate); 632 if (!enabled && !__clk_gate(ccu, gate, true)) { 633 ret = -ENXIO; 634 goto out; 635 } 636 637 /* Replace the divider value and record the result */ 638 reg_val = __ccu_read(ccu, div->u.s.offset); 639 reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, 640 reg_div); 641 __ccu_write(ccu, div->u.s.offset, reg_val); 642 643 /* If the trigger fails we still want to disable the gate */ 644 if (!__clk_trigger(ccu, trig)) 645 ret = -EIO; 646 647 /* Disable the clock again if it was disabled to begin with */ 648 if (!enabled && !__clk_gate(ccu, gate, false)) 649 ret = ret ? ret : -ENXIO; /* return first error */ 650 out: 651 return ret; 652 } 653 654 /* 655 * Initialize a divider by committing our desired state to hardware 656 * without the usual checks to see if it's already set up that way. 657 * Returns true if successful, false otherwise. 658 */ 659 static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, 660 struct bcm_clk_div *div, struct bcm_clk_trig *trig) 661 { 662 if (!divider_exists(div) || divider_is_fixed(div)) 663 return true; 664 return !__div_commit(ccu, gate, div, trig); 665 } 666 667 static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, 668 struct bcm_clk_div *div, struct bcm_clk_trig *trig, 669 u64 scaled_div) 670 { 671 unsigned long flags; 672 u64 previous; 673 int ret; 674 675 BUG_ON(divider_is_fixed(div)); 676 677 previous = div->u.s.scaled_div; 678 if (previous == scaled_div) 679 return 0; /* No change */ 680 681 div->u.s.scaled_div = scaled_div; 682 683 flags = ccu_lock(ccu); 684 __ccu_write_enable(ccu); 685 686 ret = __div_commit(ccu, gate, div, trig); 687 688 __ccu_write_disable(ccu); 689 ccu_unlock(ccu, flags); 690 691 if (ret) 692 div->u.s.scaled_div = previous; /* Revert the change */ 693 694 return ret; 695 696 } 697 698 /* Common clock rate helpers */ 699 700 /* 701 * Implement the common clock framework recalc_rate method, taking 702 * into account a divider and an optional pre-divider. The 703 * pre-divider register pointer may be NULL. 704 */ 705 static unsigned long clk_recalc_rate(struct ccu_data *ccu, 706 struct bcm_clk_div *div, struct bcm_clk_div *pre_div, 707 unsigned long parent_rate) 708 { 709 u64 scaled_parent_rate; 710 u64 scaled_div; 711 u64 result; 712 713 if (!divider_exists(div)) 714 return parent_rate; 715 716 if (parent_rate > (unsigned long)LONG_MAX) 717 return 0; /* actually this would be a caller bug */ 718 719 /* 720 * If there is a pre-divider, divide the scaled parent rate 721 * by the pre-divider value first. In this case--to improve 722 * accuracy--scale the parent rate by *both* the pre-divider 723 * value and the divider before actually computing the 724 * result of the pre-divider. 725 * 726 * If there's only one divider, just scale the parent rate. 727 */ 728 if (pre_div && divider_exists(pre_div)) { 729 u64 scaled_rate; 730 731 scaled_rate = scale_rate(pre_div, parent_rate); 732 scaled_rate = scale_rate(div, scaled_rate); 733 scaled_div = divider_read_scaled(ccu, pre_div); 734 scaled_parent_rate = do_div_round_closest(scaled_rate, 735 scaled_div); 736 } else { 737 scaled_parent_rate = scale_rate(div, parent_rate); 738 } 739 740 /* 741 * Get the scaled divisor value, and divide the scaled 742 * parent rate by that to determine this clock's resulting 743 * rate. 744 */ 745 scaled_div = divider_read_scaled(ccu, div); 746 result = do_div_round_closest(scaled_parent_rate, scaled_div); 747 748 return (unsigned long)result; 749 } 750 751 /* 752 * Compute the output rate produced when a given parent rate is fed 753 * into two dividers. The pre-divider can be NULL, and even if it's 754 * non-null it may be nonexistent. It's also OK for the divider to 755 * be nonexistent, and in that case the pre-divider is also ignored. 756 * 757 * If scaled_div is non-null, it is used to return the scaled divisor 758 * value used by the (downstream) divider to produce that rate. 759 */ 760 static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div, 761 struct bcm_clk_div *pre_div, 762 unsigned long rate, unsigned long parent_rate, 763 u64 *scaled_div) 764 { 765 u64 scaled_parent_rate; 766 u64 min_scaled_div; 767 u64 max_scaled_div; 768 u64 best_scaled_div; 769 u64 result; 770 771 BUG_ON(!divider_exists(div)); 772 BUG_ON(!rate); 773 BUG_ON(parent_rate > (u64)LONG_MAX); 774 775 /* 776 * If there is a pre-divider, divide the scaled parent rate 777 * by the pre-divider value first. In this case--to improve 778 * accuracy--scale the parent rate by *both* the pre-divider 779 * value and the divider before actually computing the 780 * result of the pre-divider. 781 * 782 * If there's only one divider, just scale the parent rate. 783 * 784 * For simplicity we treat the pre-divider as fixed (for now). 785 */ 786 if (divider_exists(pre_div)) { 787 u64 scaled_rate; 788 u64 scaled_pre_div; 789 790 scaled_rate = scale_rate(pre_div, parent_rate); 791 scaled_rate = scale_rate(div, scaled_rate); 792 scaled_pre_div = divider_read_scaled(ccu, pre_div); 793 scaled_parent_rate = do_div_round_closest(scaled_rate, 794 scaled_pre_div); 795 } else { 796 scaled_parent_rate = scale_rate(div, parent_rate); 797 } 798 799 /* 800 * Compute the best possible divider and ensure it is in 801 * range. A fixed divider can't be changed, so just report 802 * the best we can do. 803 */ 804 if (!divider_is_fixed(div)) { 805 best_scaled_div = do_div_round_closest(scaled_parent_rate, 806 rate); 807 min_scaled_div = scaled_div_min(div); 808 max_scaled_div = scaled_div_max(div); 809 if (best_scaled_div > max_scaled_div) 810 best_scaled_div = max_scaled_div; 811 else if (best_scaled_div < min_scaled_div) 812 best_scaled_div = min_scaled_div; 813 } else { 814 best_scaled_div = divider_read_scaled(ccu, div); 815 } 816 817 /* OK, figure out the resulting rate */ 818 result = do_div_round_closest(scaled_parent_rate, best_scaled_div); 819 820 if (scaled_div) 821 *scaled_div = best_scaled_div; 822 823 return (long)result; 824 } 825 826 /* Common clock parent helpers */ 827 828 /* 829 * For a given parent selector (register field) value, find the 830 * index into a selector's parent_sel array that contains it. 831 * Returns the index, or BAD_CLK_INDEX if it's not found. 832 */ 833 static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel) 834 { 835 u8 i; 836 837 BUG_ON(sel->parent_count > (u32)U8_MAX); 838 for (i = 0; i < sel->parent_count; i++) 839 if (sel->parent_sel[i] == parent_sel) 840 return i; 841 return BAD_CLK_INDEX; 842 } 843 844 /* 845 * Fetch the current value of the selector, and translate that into 846 * its corresponding index in the parent array we registered with 847 * the clock framework. 848 * 849 * Returns parent array index that corresponds with the value found, 850 * or BAD_CLK_INDEX if the found value is out of range. 851 */ 852 static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel) 853 { 854 unsigned long flags; 855 u32 reg_val; 856 u32 parent_sel; 857 u8 index; 858 859 /* If there's no selector, there's only one parent */ 860 if (!selector_exists(sel)) 861 return 0; 862 863 /* Get the value in the selector register */ 864 flags = ccu_lock(ccu); 865 reg_val = __ccu_read(ccu, sel->offset); 866 ccu_unlock(ccu, flags); 867 868 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); 869 870 /* Look up that selector's parent array index and return it */ 871 index = parent_index(sel, parent_sel); 872 if (index == BAD_CLK_INDEX) 873 pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n", 874 __func__, parent_sel, ccu->name, sel->offset); 875 876 return index; 877 } 878 879 /* 880 * Commit our desired selector value to the hardware. 881 * 882 * Returns 0 on success. Returns -EINVAL for invalid arguments. 883 * Returns -ENXIO if gating failed, and -EIO if a trigger failed. 884 */ 885 static int 886 __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, 887 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) 888 { 889 u32 parent_sel; 890 u32 reg_val; 891 bool enabled; 892 int ret = 0; 893 894 BUG_ON(!selector_exists(sel)); 895 896 /* 897 * If we're just initializing the selector, and no initial 898 * state was defined in the device tree, we just find out 899 * what its current value is rather than updating it. 900 */ 901 if (sel->clk_index == BAD_CLK_INDEX) { 902 u8 index; 903 904 reg_val = __ccu_read(ccu, sel->offset); 905 parent_sel = bitfield_extract(reg_val, sel->shift, sel->width); 906 index = parent_index(sel, parent_sel); 907 if (index == BAD_CLK_INDEX) 908 return -EINVAL; 909 sel->clk_index = index; 910 911 return 0; 912 } 913 914 BUG_ON((u32)sel->clk_index >= sel->parent_count); 915 parent_sel = sel->parent_sel[sel->clk_index]; 916 917 /* Clock needs to be enabled before changing the parent */ 918 enabled = __is_clk_gate_enabled(ccu, gate); 919 if (!enabled && !__clk_gate(ccu, gate, true)) 920 return -ENXIO; 921 922 /* Replace the selector value and record the result */ 923 reg_val = __ccu_read(ccu, sel->offset); 924 reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel); 925 __ccu_write(ccu, sel->offset, reg_val); 926 927 /* If the trigger fails we still want to disable the gate */ 928 if (!__clk_trigger(ccu, trig)) 929 ret = -EIO; 930 931 /* Disable the clock again if it was disabled to begin with */ 932 if (!enabled && !__clk_gate(ccu, gate, false)) 933 ret = ret ? ret : -ENXIO; /* return first error */ 934 935 return ret; 936 } 937 938 /* 939 * Initialize a selector by committing our desired state to hardware 940 * without the usual checks to see if it's already set up that way. 941 * Returns true if successful, false otherwise. 942 */ 943 static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate, 944 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig) 945 { 946 if (!selector_exists(sel)) 947 return true; 948 return !__sel_commit(ccu, gate, sel, trig); 949 } 950 951 /* 952 * Write a new value into a selector register to switch to a 953 * different parent clock. Returns 0 on success, or an error code 954 * (from __sel_commit()) otherwise. 955 */ 956 static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, 957 struct bcm_clk_sel *sel, struct bcm_clk_trig *trig, 958 u8 index) 959 { 960 unsigned long flags; 961 u8 previous; 962 int ret; 963 964 previous = sel->clk_index; 965 if (previous == index) 966 return 0; /* No change */ 967 968 sel->clk_index = index; 969 970 flags = ccu_lock(ccu); 971 __ccu_write_enable(ccu); 972 973 ret = __sel_commit(ccu, gate, sel, trig); 974 975 __ccu_write_disable(ccu); 976 ccu_unlock(ccu, flags); 977 978 if (ret) 979 sel->clk_index = previous; /* Revert the change */ 980 981 return ret; 982 } 983 984 /* Clock operations */ 985 986 static int kona_peri_clk_enable(struct clk_hw *hw) 987 { 988 struct kona_clk *bcm_clk = to_kona_clk(hw); 989 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 990 991 return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true); 992 } 993 994 static void kona_peri_clk_disable(struct clk_hw *hw) 995 { 996 struct kona_clk *bcm_clk = to_kona_clk(hw); 997 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 998 999 (void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false); 1000 } 1001 1002 static int kona_peri_clk_is_enabled(struct clk_hw *hw) 1003 { 1004 struct kona_clk *bcm_clk = to_kona_clk(hw); 1005 struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; 1006 1007 return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; 1008 } 1009 1010 static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, 1011 unsigned long parent_rate) 1012 { 1013 struct kona_clk *bcm_clk = to_kona_clk(hw); 1014 struct peri_clk_data *data = bcm_clk->u.peri; 1015 1016 return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, 1017 parent_rate); 1018 } 1019 1020 static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, 1021 unsigned long *parent_rate) 1022 { 1023 struct kona_clk *bcm_clk = to_kona_clk(hw); 1024 struct bcm_clk_div *div = &bcm_clk->u.peri->div; 1025 1026 if (!divider_exists(div)) 1027 return __clk_get_rate(hw->clk); 1028 1029 /* Quietly avoid a zero rate */ 1030 return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, 1031 rate ? rate : 1, *parent_rate, NULL); 1032 } 1033 1034 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, 1035 unsigned long *best_parent_rate, struct clk **best_parent) 1036 { 1037 struct kona_clk *bcm_clk = to_kona_clk(hw); 1038 struct clk *clk = hw->clk; 1039 struct clk *current_parent; 1040 unsigned long parent_rate; 1041 unsigned long best_delta; 1042 unsigned long best_rate; 1043 u32 parent_count; 1044 u32 which; 1045 1046 /* 1047 * If there is no other parent to choose, use the current one. 1048 * Note: We don't honor (or use) CLK_SET_RATE_NO_REPARENT. 1049 */ 1050 WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT); 1051 parent_count = (u32)bcm_clk->init_data.num_parents; 1052 if (parent_count < 2) 1053 return kona_peri_clk_round_rate(hw, rate, best_parent_rate); 1054 1055 /* Unless we can do better, stick with current parent */ 1056 current_parent = clk_get_parent(clk); 1057 parent_rate = __clk_get_rate(current_parent); 1058 best_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); 1059 best_delta = abs(best_rate - rate); 1060 1061 /* Check whether any other parent clock can produce a better result */ 1062 for (which = 0; which < parent_count; which++) { 1063 struct clk *parent = clk_get_parent_by_index(clk, which); 1064 unsigned long delta; 1065 unsigned long other_rate; 1066 1067 BUG_ON(!parent); 1068 if (parent == current_parent) 1069 continue; 1070 1071 /* We don't support CLK_SET_RATE_PARENT */ 1072 parent_rate = __clk_get_rate(parent); 1073 other_rate = kona_peri_clk_round_rate(hw, rate, &parent_rate); 1074 delta = abs(other_rate - rate); 1075 if (delta < best_delta) { 1076 best_delta = delta; 1077 best_rate = other_rate; 1078 *best_parent = parent; 1079 *best_parent_rate = parent_rate; 1080 } 1081 } 1082 1083 return best_rate; 1084 } 1085 1086 static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) 1087 { 1088 struct kona_clk *bcm_clk = to_kona_clk(hw); 1089 struct peri_clk_data *data = bcm_clk->u.peri; 1090 struct bcm_clk_sel *sel = &data->sel; 1091 struct bcm_clk_trig *trig; 1092 int ret; 1093 1094 BUG_ON(index >= sel->parent_count); 1095 1096 /* If there's only one parent we don't require a selector */ 1097 if (!selector_exists(sel)) 1098 return 0; 1099 1100 /* 1101 * The regular trigger is used by default, but if there's a 1102 * pre-trigger we want to use that instead. 1103 */ 1104 trig = trigger_exists(&data->pre_trig) ? &data->pre_trig 1105 : &data->trig; 1106 1107 ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index); 1108 if (ret == -ENXIO) { 1109 pr_err("%s: gating failure for %s\n", __func__, 1110 bcm_clk->init_data.name); 1111 ret = -EIO; /* Don't proliferate weird errors */ 1112 } else if (ret == -EIO) { 1113 pr_err("%s: %strigger failed for %s\n", __func__, 1114 trig == &data->pre_trig ? "pre-" : "", 1115 bcm_clk->init_data.name); 1116 } 1117 1118 return ret; 1119 } 1120 1121 static u8 kona_peri_clk_get_parent(struct clk_hw *hw) 1122 { 1123 struct kona_clk *bcm_clk = to_kona_clk(hw); 1124 struct peri_clk_data *data = bcm_clk->u.peri; 1125 u8 index; 1126 1127 index = selector_read_index(bcm_clk->ccu, &data->sel); 1128 1129 /* Not all callers would handle an out-of-range value gracefully */ 1130 return index == BAD_CLK_INDEX ? 0 : index; 1131 } 1132 1133 static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, 1134 unsigned long parent_rate) 1135 { 1136 struct kona_clk *bcm_clk = to_kona_clk(hw); 1137 struct peri_clk_data *data = bcm_clk->u.peri; 1138 struct bcm_clk_div *div = &data->div; 1139 u64 scaled_div = 0; 1140 int ret; 1141 1142 if (parent_rate > (unsigned long)LONG_MAX) 1143 return -EINVAL; 1144 1145 if (rate == __clk_get_rate(hw->clk)) 1146 return 0; 1147 1148 if (!divider_exists(div)) 1149 return rate == parent_rate ? 0 : -EINVAL; 1150 1151 /* 1152 * A fixed divider can't be changed. (Nor can a fixed 1153 * pre-divider be, but for now we never actually try to 1154 * change that.) Tolerate a request for a no-op change. 1155 */ 1156 if (divider_is_fixed(&data->div)) 1157 return rate == parent_rate ? 0 : -EINVAL; 1158 1159 /* 1160 * Get the scaled divisor value needed to achieve a clock 1161 * rate as close as possible to what was requested, given 1162 * the parent clock rate supplied. 1163 */ 1164 (void)round_rate(bcm_clk->ccu, div, &data->pre_div, 1165 rate ? rate : 1, parent_rate, &scaled_div); 1166 1167 /* 1168 * We aren't updating any pre-divider at this point, so 1169 * we'll use the regular trigger. 1170 */ 1171 ret = divider_write(bcm_clk->ccu, &data->gate, &data->div, 1172 &data->trig, scaled_div); 1173 if (ret == -ENXIO) { 1174 pr_err("%s: gating failure for %s\n", __func__, 1175 bcm_clk->init_data.name); 1176 ret = -EIO; /* Don't proliferate weird errors */ 1177 } else if (ret == -EIO) { 1178 pr_err("%s: trigger failed for %s\n", __func__, 1179 bcm_clk->init_data.name); 1180 } 1181 1182 return ret; 1183 } 1184 1185 struct clk_ops kona_peri_clk_ops = { 1186 .enable = kona_peri_clk_enable, 1187 .disable = kona_peri_clk_disable, 1188 .is_enabled = kona_peri_clk_is_enabled, 1189 .recalc_rate = kona_peri_clk_recalc_rate, 1190 .determine_rate = kona_peri_clk_determine_rate, 1191 .set_parent = kona_peri_clk_set_parent, 1192 .get_parent = kona_peri_clk_get_parent, 1193 .set_rate = kona_peri_clk_set_rate, 1194 }; 1195 1196 /* Put a peripheral clock into its initial state */ 1197 static bool __peri_clk_init(struct kona_clk *bcm_clk) 1198 { 1199 struct ccu_data *ccu = bcm_clk->ccu; 1200 struct peri_clk_data *peri = bcm_clk->u.peri; 1201 const char *name = bcm_clk->init_data.name; 1202 struct bcm_clk_trig *trig; 1203 1204 BUG_ON(bcm_clk->type != bcm_clk_peri); 1205 1206 if (!policy_init(ccu, &peri->policy)) { 1207 pr_err("%s: error initializing policy for %s\n", 1208 __func__, name); 1209 return false; 1210 } 1211 if (!gate_init(ccu, &peri->gate)) { 1212 pr_err("%s: error initializing gate for %s\n", __func__, name); 1213 return false; 1214 } 1215 if (!hyst_init(ccu, &peri->hyst)) { 1216 pr_err("%s: error initializing hyst for %s\n", __func__, name); 1217 return false; 1218 } 1219 if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) { 1220 pr_err("%s: error initializing divider for %s\n", __func__, 1221 name); 1222 return false; 1223 } 1224 1225 /* 1226 * For the pre-divider and selector, the pre-trigger is used 1227 * if it's present, otherwise we just use the regular trigger. 1228 */ 1229 trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig 1230 : &peri->trig; 1231 1232 if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) { 1233 pr_err("%s: error initializing pre-divider for %s\n", __func__, 1234 name); 1235 return false; 1236 } 1237 1238 if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) { 1239 pr_err("%s: error initializing selector for %s\n", __func__, 1240 name); 1241 return false; 1242 } 1243 1244 return true; 1245 } 1246 1247 static bool __kona_clk_init(struct kona_clk *bcm_clk) 1248 { 1249 switch (bcm_clk->type) { 1250 case bcm_clk_peri: 1251 return __peri_clk_init(bcm_clk); 1252 default: 1253 BUG(); 1254 } 1255 return -EINVAL; 1256 } 1257 1258 /* Set a CCU and all its clocks into their desired initial state */ 1259 bool __init kona_ccu_init(struct ccu_data *ccu) 1260 { 1261 unsigned long flags; 1262 unsigned int which; 1263 struct clk **clks = ccu->clk_data.clks; 1264 bool success = true; 1265 1266 flags = ccu_lock(ccu); 1267 __ccu_write_enable(ccu); 1268 1269 for (which = 0; which < ccu->clk_data.clk_num; which++) { 1270 struct kona_clk *bcm_clk; 1271 1272 if (!clks[which]) 1273 continue; 1274 bcm_clk = to_kona_clk(__clk_get_hw(clks[which])); 1275 success &= __kona_clk_init(bcm_clk); 1276 } 1277 1278 __ccu_write_disable(ccu); 1279 ccu_unlock(ccu, flags); 1280 return success; 1281 } 1282