1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2014 STMicroelectronics R&D Ltd 4 */ 5 6 /* 7 * Authors: 8 * Stephen Gallimore <stephen.gallimore@st.com>, 9 * Pankaj Dev <pankaj.dev@st.com>. 10 */ 11 12 #include <linux/slab.h> 13 #include <linux/of_address.h> 14 #include <linux/clk.h> 15 #include <linux/clk-provider.h> 16 17 #include "clkgen.h" 18 19 /* 20 * Maximum input clock to the PLL before we divide it down by 2 21 * although in reality in actual systems this has never been seen to 22 * be used. 23 */ 24 #define QUADFS_NDIV_THRESHOLD 30000000 25 26 #define PLL_BW_GOODREF (0L) 27 #define PLL_BW_VBADREF (1L) 28 #define PLL_BW_BADREF (2L) 29 #define PLL_BW_VGOODREF (3L) 30 31 #define QUADFS_MAX_CHAN 4 32 33 struct stm_fs { 34 unsigned long ndiv; 35 unsigned long mdiv; 36 unsigned long pe; 37 unsigned long sdiv; 38 unsigned long nsdiv; 39 }; 40 41 struct clkgen_quadfs_data { 42 bool reset_present; 43 bool bwfilter_present; 44 bool lockstatus_present; 45 bool powerup_polarity; 46 bool standby_polarity; 47 bool nsdiv_present; 48 bool nrst_present; 49 struct clkgen_field ndiv; 50 struct clkgen_field ref_bw; 51 struct clkgen_field nreset; 52 struct clkgen_field npda; 53 struct clkgen_field lock_status; 54 55 struct clkgen_field nrst[QUADFS_MAX_CHAN]; 56 struct clkgen_field nsb[QUADFS_MAX_CHAN]; 57 struct clkgen_field en[QUADFS_MAX_CHAN]; 58 struct clkgen_field mdiv[QUADFS_MAX_CHAN]; 59 struct clkgen_field pe[QUADFS_MAX_CHAN]; 60 struct clkgen_field sdiv[QUADFS_MAX_CHAN]; 61 struct clkgen_field nsdiv[QUADFS_MAX_CHAN]; 62 63 const struct clk_ops *pll_ops; 64 int (*get_params)(unsigned long, unsigned long, struct stm_fs *); 65 int (*get_rate)(unsigned long , const struct stm_fs *, 66 unsigned long *); 67 }; 68 69 struct clkgen_clk_out { 70 const char *name; 71 unsigned long flags; 72 }; 73 74 struct clkgen_quadfs_data_clks { 75 struct clkgen_quadfs_data *data; 76 const struct clkgen_clk_out *outputs; 77 }; 78 79 static const struct clk_ops st_quadfs_pll_c32_ops; 80 81 static int clk_fs660c32_dig_get_params(unsigned long input, 82 unsigned long output, struct stm_fs *fs); 83 static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *, 84 unsigned long *); 85 86 static const struct clkgen_quadfs_data st_fs660c32_C = { 87 .nrst_present = true, 88 .nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0), 89 CLKGEN_FIELD(0x2f0, 0x1, 1), 90 CLKGEN_FIELD(0x2f0, 0x1, 2), 91 CLKGEN_FIELD(0x2f0, 0x1, 3) }, 92 .npda = CLKGEN_FIELD(0x2f0, 0x1, 12), 93 .nsb = { CLKGEN_FIELD(0x2f0, 0x1, 8), 94 CLKGEN_FIELD(0x2f0, 0x1, 9), 95 CLKGEN_FIELD(0x2f0, 0x1, 10), 96 CLKGEN_FIELD(0x2f0, 0x1, 11) }, 97 .nsdiv_present = true, 98 .nsdiv = { CLKGEN_FIELD(0x304, 0x1, 24), 99 CLKGEN_FIELD(0x308, 0x1, 24), 100 CLKGEN_FIELD(0x30c, 0x1, 24), 101 CLKGEN_FIELD(0x310, 0x1, 24) }, 102 .mdiv = { CLKGEN_FIELD(0x304, 0x1f, 15), 103 CLKGEN_FIELD(0x308, 0x1f, 15), 104 CLKGEN_FIELD(0x30c, 0x1f, 15), 105 CLKGEN_FIELD(0x310, 0x1f, 15) }, 106 .en = { CLKGEN_FIELD(0x2fc, 0x1, 0), 107 CLKGEN_FIELD(0x2fc, 0x1, 1), 108 CLKGEN_FIELD(0x2fc, 0x1, 2), 109 CLKGEN_FIELD(0x2fc, 0x1, 3) }, 110 .ndiv = CLKGEN_FIELD(0x2f4, 0x7, 16), 111 .pe = { CLKGEN_FIELD(0x304, 0x7fff, 0), 112 CLKGEN_FIELD(0x308, 0x7fff, 0), 113 CLKGEN_FIELD(0x30c, 0x7fff, 0), 114 CLKGEN_FIELD(0x310, 0x7fff, 0) }, 115 .sdiv = { CLKGEN_FIELD(0x304, 0xf, 20), 116 CLKGEN_FIELD(0x308, 0xf, 20), 117 CLKGEN_FIELD(0x30c, 0xf, 20), 118 CLKGEN_FIELD(0x310, 0xf, 20) }, 119 .lockstatus_present = true, 120 .lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24), 121 .powerup_polarity = 1, 122 .standby_polarity = 1, 123 .pll_ops = &st_quadfs_pll_c32_ops, 124 .get_params = clk_fs660c32_dig_get_params, 125 .get_rate = clk_fs660c32_dig_get_rate, 126 }; 127 128 static const struct clkgen_clk_out st_fs660c32_C_clks[] = { 129 { .name = "clk-s-c0-fs0-ch0", }, 130 { .name = "clk-s-c0-fs0-ch1", }, 131 { .name = "clk-s-c0-fs0-ch2", }, 132 { .name = "clk-s-c0-fs0-ch3", }, 133 }; 134 135 static const struct clkgen_quadfs_data_clks st_fs660c32_C_data = { 136 .data = (struct clkgen_quadfs_data *)&st_fs660c32_C, 137 .outputs = st_fs660c32_C_clks, 138 }; 139 140 static const struct clkgen_quadfs_data st_fs660c32_D = { 141 .nrst_present = true, 142 .nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0), 143 CLKGEN_FIELD(0x2a0, 0x1, 1), 144 CLKGEN_FIELD(0x2a0, 0x1, 2), 145 CLKGEN_FIELD(0x2a0, 0x1, 3) }, 146 .ndiv = CLKGEN_FIELD(0x2a4, 0x7, 16), 147 .pe = { CLKGEN_FIELD(0x2b4, 0x7fff, 0), 148 CLKGEN_FIELD(0x2b8, 0x7fff, 0), 149 CLKGEN_FIELD(0x2bc, 0x7fff, 0), 150 CLKGEN_FIELD(0x2c0, 0x7fff, 0) }, 151 .sdiv = { CLKGEN_FIELD(0x2b4, 0xf, 20), 152 CLKGEN_FIELD(0x2b8, 0xf, 20), 153 CLKGEN_FIELD(0x2bc, 0xf, 20), 154 CLKGEN_FIELD(0x2c0, 0xf, 20) }, 155 .npda = CLKGEN_FIELD(0x2a0, 0x1, 12), 156 .nsb = { CLKGEN_FIELD(0x2a0, 0x1, 8), 157 CLKGEN_FIELD(0x2a0, 0x1, 9), 158 CLKGEN_FIELD(0x2a0, 0x1, 10), 159 CLKGEN_FIELD(0x2a0, 0x1, 11) }, 160 .nsdiv_present = true, 161 .nsdiv = { CLKGEN_FIELD(0x2b4, 0x1, 24), 162 CLKGEN_FIELD(0x2b8, 0x1, 24), 163 CLKGEN_FIELD(0x2bc, 0x1, 24), 164 CLKGEN_FIELD(0x2c0, 0x1, 24) }, 165 .mdiv = { CLKGEN_FIELD(0x2b4, 0x1f, 15), 166 CLKGEN_FIELD(0x2b8, 0x1f, 15), 167 CLKGEN_FIELD(0x2bc, 0x1f, 15), 168 CLKGEN_FIELD(0x2c0, 0x1f, 15) }, 169 .en = { CLKGEN_FIELD(0x2ac, 0x1, 0), 170 CLKGEN_FIELD(0x2ac, 0x1, 1), 171 CLKGEN_FIELD(0x2ac, 0x1, 2), 172 CLKGEN_FIELD(0x2ac, 0x1, 3) }, 173 .lockstatus_present = true, 174 .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24), 175 .powerup_polarity = 1, 176 .standby_polarity = 1, 177 .pll_ops = &st_quadfs_pll_c32_ops, 178 .get_params = clk_fs660c32_dig_get_params, 179 .get_rate = clk_fs660c32_dig_get_rate,}; 180 181 static const struct clkgen_quadfs_data_clks st_fs660c32_D_data = { 182 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 183 }; 184 185 static const struct clkgen_clk_out st_fs660c32_D0_clks[] = { 186 { .name = "clk-s-d0-fs0-ch0", }, 187 { .name = "clk-s-d0-fs0-ch1", }, 188 { .name = "clk-s-d0-fs0-ch2", }, 189 { .name = "clk-s-d0-fs0-ch3", }, 190 }; 191 192 static const struct clkgen_quadfs_data_clks st_fs660c32_D0_data = { 193 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 194 .outputs = st_fs660c32_D0_clks, 195 }; 196 197 static const struct clkgen_clk_out st_fs660c32_D2_clks[] = { 198 { .name = "clk-s-d2-fs0-ch0", }, 199 { .name = "clk-s-d2-fs0-ch1", }, 200 { .name = "clk-s-d2-fs0-ch2", }, 201 { .name = "clk-s-d2-fs0-ch3", }, 202 }; 203 204 static const struct clkgen_quadfs_data_clks st_fs660c32_D2_data = { 205 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 206 .outputs = st_fs660c32_D2_clks, 207 }; 208 209 static const struct clkgen_clk_out st_fs660c32_D3_clks[] = { 210 { .name = "clk-s-d3-fs0-ch0", }, 211 { .name = "clk-s-d3-fs0-ch1", }, 212 { .name = "clk-s-d3-fs0-ch2", }, 213 { .name = "clk-s-d3-fs0-ch3", }, 214 }; 215 216 static const struct clkgen_quadfs_data_clks st_fs660c32_D3_data = { 217 .data = (struct clkgen_quadfs_data *)&st_fs660c32_D, 218 .outputs = st_fs660c32_D3_clks, 219 }; 220 221 /** 222 * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor 223 * 224 * Traits of this clock: 225 * prepare - clk_(un)prepare only ensures parent is (un)prepared 226 * enable - clk_enable and clk_disable are functional & control the Fsyn 227 * rate - inherits rate from parent. set_rate/round_rate/recalc_rate 228 * parent - fixed parent. No clk_set_parent support 229 */ 230 231 /** 232 * struct st_clk_quadfs_pll - A pll which outputs a fixed multiplier of 233 * its parent clock, found inside a type of 234 * ST quad channel frequency synthesizer block 235 * 236 * @hw: handle between common and hardware-specific interfaces. 237 * @regs_base: base address of the configuration registers. 238 * @lock: spinlock. 239 * @data: local driver data 240 * @ndiv: regmap field for the ndiv control. 241 */ 242 struct st_clk_quadfs_pll { 243 struct clk_hw hw; 244 void __iomem *regs_base; 245 spinlock_t *lock; 246 struct clkgen_quadfs_data *data; 247 u32 ndiv; 248 }; 249 250 #define to_quadfs_pll(_hw) container_of(_hw, struct st_clk_quadfs_pll, hw) 251 252 static int quadfs_pll_enable(struct clk_hw *hw) 253 { 254 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 255 unsigned long flags = 0, timeout = jiffies + msecs_to_jiffies(10); 256 257 if (pll->lock) 258 spin_lock_irqsave(pll->lock, flags); 259 260 /* 261 * Bring block out of reset if we have reset control. 262 */ 263 if (pll->data->reset_present) 264 CLKGEN_WRITE(pll, nreset, 1); 265 266 /* 267 * Use a fixed input clock noise bandwidth filter for the moment 268 */ 269 if (pll->data->bwfilter_present) 270 CLKGEN_WRITE(pll, ref_bw, PLL_BW_GOODREF); 271 272 273 CLKGEN_WRITE(pll, ndiv, pll->ndiv); 274 275 /* 276 * Power up the PLL 277 */ 278 CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity); 279 280 if (pll->lock) 281 spin_unlock_irqrestore(pll->lock, flags); 282 283 if (pll->data->lockstatus_present) 284 while (!CLKGEN_READ(pll, lock_status)) { 285 if (time_after(jiffies, timeout)) 286 return -ETIMEDOUT; 287 cpu_relax(); 288 } 289 290 return 0; 291 } 292 293 static void quadfs_pll_disable(struct clk_hw *hw) 294 { 295 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 296 unsigned long flags = 0; 297 298 if (pll->lock) 299 spin_lock_irqsave(pll->lock, flags); 300 301 /* 302 * Powerdown the PLL and then put block into soft reset if we have 303 * reset control. 304 */ 305 CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity); 306 307 if (pll->data->reset_present) 308 CLKGEN_WRITE(pll, nreset, 0); 309 310 if (pll->lock) 311 spin_unlock_irqrestore(pll->lock, flags); 312 } 313 314 static int quadfs_pll_is_enabled(struct clk_hw *hw) 315 { 316 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 317 u32 npda = CLKGEN_READ(pll, npda); 318 319 return pll->data->powerup_polarity ? !npda : !!npda; 320 } 321 322 static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, 323 unsigned long *rate) 324 { 325 unsigned long nd = fs->ndiv + 16; /* ndiv value */ 326 327 *rate = input * nd; 328 329 return 0; 330 } 331 332 static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw, 333 unsigned long parent_rate) 334 { 335 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 336 unsigned long rate = 0; 337 struct stm_fs params; 338 339 params.ndiv = CLKGEN_READ(pll, ndiv); 340 if (clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &rate)) 341 pr_err("%s:%s error calculating rate\n", 342 clk_hw_get_name(hw), __func__); 343 344 pll->ndiv = params.ndiv; 345 346 return rate; 347 } 348 349 static int clk_fs660c32_vco_get_params(unsigned long input, 350 unsigned long output, struct stm_fs *fs) 351 { 352 /* Formula 353 VCO frequency = (fin x ndiv) / pdiv 354 ndiv = VCOfreq * pdiv / fin 355 */ 356 unsigned long pdiv = 1, n; 357 358 /* Output clock range: 384Mhz to 660Mhz */ 359 if (output < 384000000 || output > 660000000) 360 return -EINVAL; 361 362 if (input > 40000000) 363 /* This means that PDIV would be 2 instead of 1. 364 Not supported today. */ 365 return -EINVAL; 366 367 input /= 1000; 368 output /= 1000; 369 370 n = output * pdiv / input; 371 if (n < 16) 372 n = 16; 373 fs->ndiv = n - 16; /* Converting formula value to reg value */ 374 375 return 0; 376 } 377 378 static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw, 379 unsigned long rate, 380 unsigned long *prate) 381 { 382 struct stm_fs params; 383 384 if (clk_fs660c32_vco_get_params(*prate, rate, ¶ms)) 385 return rate; 386 387 clk_fs660c32_vco_get_rate(*prate, ¶ms, &rate); 388 389 pr_debug("%s: %s new rate %ld [ndiv=%u]\n", 390 __func__, clk_hw_get_name(hw), 391 rate, (unsigned int)params.ndiv); 392 393 return rate; 394 } 395 396 static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate, 397 unsigned long parent_rate) 398 { 399 struct st_clk_quadfs_pll *pll = to_quadfs_pll(hw); 400 struct stm_fs params; 401 long hwrate = 0; 402 unsigned long flags = 0; 403 int ret; 404 405 if (!rate || !parent_rate) 406 return -EINVAL; 407 408 ret = clk_fs660c32_vco_get_params(parent_rate, rate, ¶ms); 409 if (ret) 410 return ret; 411 412 clk_fs660c32_vco_get_rate(parent_rate, ¶ms, &hwrate); 413 414 pr_debug("%s: %s new rate %ld [ndiv=0x%x]\n", 415 __func__, clk_hw_get_name(hw), 416 hwrate, (unsigned int)params.ndiv); 417 418 if (!hwrate) 419 return -EINVAL; 420 421 pll->ndiv = params.ndiv; 422 423 if (pll->lock) 424 spin_lock_irqsave(pll->lock, flags); 425 426 CLKGEN_WRITE(pll, ndiv, pll->ndiv); 427 428 if (pll->lock) 429 spin_unlock_irqrestore(pll->lock, flags); 430 431 return 0; 432 } 433 434 static const struct clk_ops st_quadfs_pll_c32_ops = { 435 .enable = quadfs_pll_enable, 436 .disable = quadfs_pll_disable, 437 .is_enabled = quadfs_pll_is_enabled, 438 .recalc_rate = quadfs_pll_fs660c32_recalc_rate, 439 .round_rate = quadfs_pll_fs660c32_round_rate, 440 .set_rate = quadfs_pll_fs660c32_set_rate, 441 }; 442 443 static struct clk * __init st_clk_register_quadfs_pll( 444 const char *name, const char *parent_name, 445 struct clkgen_quadfs_data *quadfs, void __iomem *reg, 446 spinlock_t *lock) 447 { 448 struct st_clk_quadfs_pll *pll; 449 struct clk *clk; 450 struct clk_init_data init; 451 452 /* 453 * Sanity check required pointers. 454 */ 455 if (WARN_ON(!name || !parent_name)) 456 return ERR_PTR(-EINVAL); 457 458 pll = kzalloc(sizeof(*pll), GFP_KERNEL); 459 if (!pll) 460 return ERR_PTR(-ENOMEM); 461 462 init.name = name; 463 init.ops = quadfs->pll_ops; 464 init.flags = CLK_GET_RATE_NOCACHE; 465 init.parent_names = &parent_name; 466 init.num_parents = 1; 467 468 pll->data = quadfs; 469 pll->regs_base = reg; 470 pll->lock = lock; 471 pll->hw.init = &init; 472 473 clk = clk_register(NULL, &pll->hw); 474 475 if (IS_ERR(clk)) 476 kfree(pll); 477 478 return clk; 479 } 480 481 /** 482 * DOC: A digital frequency synthesizer 483 * 484 * Traits of this clock: 485 * prepare - clk_(un)prepare only ensures parent is (un)prepared 486 * enable - clk_enable and clk_disable are functional 487 * rate - set rate is functional 488 * parent - fixed parent. No clk_set_parent support 489 */ 490 491 /* 492 * struct st_clk_quadfs_fsynth - One clock output from a four channel digital 493 * frequency synthesizer (fsynth) block. 494 * 495 * @hw: handle between common and hardware-specific interfaces 496 * 497 * @nsb: regmap field in the output control register for the digital 498 * standby of this fsynth channel. This control is active low so 499 * the channel is in standby when the control bit is cleared. 500 * 501 * @nsdiv: regmap field in the output control register for 502 * for the optional divide by 3 of this fsynth channel. This control 503 * is active low so the divide by 3 is active when the control bit is 504 * cleared and the divide is bypassed when the bit is set. 505 */ 506 struct st_clk_quadfs_fsynth { 507 struct clk_hw hw; 508 void __iomem *regs_base; 509 spinlock_t *lock; 510 struct clkgen_quadfs_data *data; 511 512 u32 chan; 513 /* 514 * Cached hardware values from set_rate so we can program the 515 * hardware in enable. There are two reasons for this: 516 * 517 * 1. The registers may not be writable until the parent has been 518 * enabled. 519 * 520 * 2. It restores the clock rate when a driver does an enable 521 * on PM restore, after a suspend to RAM has lost the hardware 522 * setup. 523 */ 524 u32 md; 525 u32 pe; 526 u32 sdiv; 527 u32 nsdiv; 528 }; 529 530 #define to_quadfs_fsynth(_hw) \ 531 container_of(_hw, struct st_clk_quadfs_fsynth, hw) 532 533 static void quadfs_fsynth_program_enable(struct st_clk_quadfs_fsynth *fs) 534 { 535 /* 536 * Pulse the program enable register lsb to make the hardware take 537 * notice of the new md/pe values with a glitchless transition. 538 */ 539 CLKGEN_WRITE(fs, en[fs->chan], 1); 540 CLKGEN_WRITE(fs, en[fs->chan], 0); 541 } 542 543 static void quadfs_fsynth_program_rate(struct st_clk_quadfs_fsynth *fs) 544 { 545 unsigned long flags = 0; 546 547 /* 548 * Ensure the md/pe parameters are ignored while we are 549 * reprogramming them so we can get a glitchless change 550 * when fine tuning the speed of a running clock. 551 */ 552 CLKGEN_WRITE(fs, en[fs->chan], 0); 553 554 CLKGEN_WRITE(fs, mdiv[fs->chan], fs->md); 555 CLKGEN_WRITE(fs, pe[fs->chan], fs->pe); 556 CLKGEN_WRITE(fs, sdiv[fs->chan], fs->sdiv); 557 558 if (fs->lock) 559 spin_lock_irqsave(fs->lock, flags); 560 561 if (fs->data->nsdiv_present) 562 CLKGEN_WRITE(fs, nsdiv[fs->chan], fs->nsdiv); 563 564 if (fs->lock) 565 spin_unlock_irqrestore(fs->lock, flags); 566 } 567 568 static int quadfs_fsynth_enable(struct clk_hw *hw) 569 { 570 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 571 unsigned long flags = 0; 572 573 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw)); 574 575 quadfs_fsynth_program_rate(fs); 576 577 if (fs->lock) 578 spin_lock_irqsave(fs->lock, flags); 579 580 CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity); 581 582 if (fs->data->nrst_present) 583 CLKGEN_WRITE(fs, nrst[fs->chan], 0); 584 585 if (fs->lock) 586 spin_unlock_irqrestore(fs->lock, flags); 587 588 quadfs_fsynth_program_enable(fs); 589 590 return 0; 591 } 592 593 static void quadfs_fsynth_disable(struct clk_hw *hw) 594 { 595 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 596 unsigned long flags = 0; 597 598 pr_debug("%s: %s\n", __func__, clk_hw_get_name(hw)); 599 600 if (fs->lock) 601 spin_lock_irqsave(fs->lock, flags); 602 603 CLKGEN_WRITE(fs, nsb[fs->chan], fs->data->standby_polarity); 604 605 if (fs->lock) 606 spin_unlock_irqrestore(fs->lock, flags); 607 } 608 609 static int quadfs_fsynth_is_enabled(struct clk_hw *hw) 610 { 611 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 612 u32 nsb = CLKGEN_READ(fs, nsb[fs->chan]); 613 614 pr_debug("%s: %s enable bit = 0x%x\n", 615 __func__, clk_hw_get_name(hw), nsb); 616 617 return fs->data->standby_polarity ? !nsb : !!nsb; 618 } 619 620 #define P20 (uint64_t)(1 << 20) 621 622 static int clk_fs660c32_dig_get_rate(unsigned long input, 623 const struct stm_fs *fs, unsigned long *rate) 624 { 625 unsigned long s = (1 << fs->sdiv); 626 unsigned long ns; 627 uint64_t res; 628 629 /* 630 * 'nsdiv' is a register value ('BIN') which is translated 631 * to a decimal value according to following rules. 632 * 633 * nsdiv ns.dec 634 * 0 3 635 * 1 1 636 */ 637 ns = (fs->nsdiv == 1) ? 1 : 3; 638 639 res = (P20 * (32 + fs->mdiv) + 32 * fs->pe) * s * ns; 640 *rate = (unsigned long)div64_u64(input * P20 * 32, res); 641 642 return 0; 643 } 644 645 646 static int clk_fs660c32_get_pe(int m, int si, unsigned long *deviation, 647 signed long input, unsigned long output, uint64_t *p, 648 struct stm_fs *fs) 649 { 650 unsigned long new_freq, new_deviation; 651 struct stm_fs fs_tmp; 652 uint64_t val; 653 654 val = (uint64_t)output << si; 655 656 *p = (uint64_t)input * P20 - (32LL + (uint64_t)m) * val * (P20 / 32LL); 657 658 *p = div64_u64(*p, val); 659 660 if (*p > 32767LL) 661 return 1; 662 663 fs_tmp.mdiv = (unsigned long) m; 664 fs_tmp.pe = (unsigned long)*p; 665 fs_tmp.sdiv = si; 666 fs_tmp.nsdiv = 1; 667 668 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq); 669 670 new_deviation = abs(output - new_freq); 671 672 if (new_deviation < *deviation) { 673 fs->mdiv = m; 674 fs->pe = (unsigned long)*p; 675 fs->sdiv = si; 676 fs->nsdiv = 1; 677 *deviation = new_deviation; 678 } 679 return 0; 680 } 681 682 static int clk_fs660c32_dig_get_params(unsigned long input, 683 unsigned long output, struct stm_fs *fs) 684 { 685 int si; /* sdiv_reg (8 downto 0) */ 686 int m; /* md value */ 687 unsigned long new_freq, new_deviation; 688 /* initial condition to say: "infinite deviation" */ 689 unsigned long deviation = ~0; 690 uint64_t p, p1, p2; /* pe value */ 691 int r1, r2; 692 693 struct stm_fs fs_tmp; 694 695 for (si = 0; (si <= 8) && deviation; si++) { 696 697 /* Boundary test to avoid useless iteration */ 698 r1 = clk_fs660c32_get_pe(0, si, &deviation, 699 input, output, &p1, fs); 700 r2 = clk_fs660c32_get_pe(31, si, &deviation, 701 input, output, &p2, fs); 702 703 /* No solution */ 704 if (r1 && r2 && (p1 > p2)) 705 continue; 706 707 /* Try to find best deviation */ 708 for (m = 1; (m < 31) && deviation; m++) 709 clk_fs660c32_get_pe(m, si, &deviation, 710 input, output, &p, fs); 711 712 } 713 714 if (deviation == ~0) /* No solution found */ 715 return -1; 716 717 /* pe fine tuning if deviation not 0: +/- 2 around computed pe value */ 718 if (deviation) { 719 fs_tmp.mdiv = fs->mdiv; 720 fs_tmp.sdiv = fs->sdiv; 721 fs_tmp.nsdiv = fs->nsdiv; 722 723 if (fs->pe > 2) 724 p2 = fs->pe - 2; 725 else 726 p2 = 0; 727 728 for (; p2 < 32768ll && (p2 <= (fs->pe + 2)); p2++) { 729 fs_tmp.pe = (unsigned long)p2; 730 731 clk_fs660c32_dig_get_rate(input, &fs_tmp, &new_freq); 732 733 new_deviation = abs(output - new_freq); 734 735 /* Check if this is a better solution */ 736 if (new_deviation < deviation) { 737 fs->pe = (unsigned long)p2; 738 deviation = new_deviation; 739 740 } 741 } 742 } 743 return 0; 744 } 745 746 static int quadfs_fsynt_get_hw_value_for_recalc(struct st_clk_quadfs_fsynth *fs, 747 struct stm_fs *params) 748 { 749 /* 750 * Get the initial hardware values for recalc_rate 751 */ 752 params->mdiv = CLKGEN_READ(fs, mdiv[fs->chan]); 753 params->pe = CLKGEN_READ(fs, pe[fs->chan]); 754 params->sdiv = CLKGEN_READ(fs, sdiv[fs->chan]); 755 756 if (fs->data->nsdiv_present) 757 params->nsdiv = CLKGEN_READ(fs, nsdiv[fs->chan]); 758 else 759 params->nsdiv = 1; 760 761 /* 762 * If All are NULL then assume no clock rate is programmed. 763 */ 764 if (!params->mdiv && !params->pe && !params->sdiv) 765 return 1; 766 767 fs->md = params->mdiv; 768 fs->pe = params->pe; 769 fs->sdiv = params->sdiv; 770 fs->nsdiv = params->nsdiv; 771 772 return 0; 773 } 774 775 static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate, 776 unsigned long prate, struct stm_fs *params) 777 { 778 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 779 int (*clk_fs_get_rate)(unsigned long , 780 const struct stm_fs *, unsigned long *); 781 int (*clk_fs_get_params)(unsigned long, unsigned long, struct stm_fs *); 782 unsigned long rate = 0; 783 784 clk_fs_get_rate = fs->data->get_rate; 785 clk_fs_get_params = fs->data->get_params; 786 787 if (!clk_fs_get_params(prate, drate, params)) 788 clk_fs_get_rate(prate, params, &rate); 789 790 return rate; 791 } 792 793 static unsigned long quadfs_recalc_rate(struct clk_hw *hw, 794 unsigned long parent_rate) 795 { 796 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 797 unsigned long rate = 0; 798 struct stm_fs params; 799 int (*clk_fs_get_rate)(unsigned long , 800 const struct stm_fs *, unsigned long *); 801 802 clk_fs_get_rate = fs->data->get_rate; 803 804 if (quadfs_fsynt_get_hw_value_for_recalc(fs, ¶ms)) 805 return 0; 806 807 if (clk_fs_get_rate(parent_rate, ¶ms, &rate)) { 808 pr_err("%s:%s error calculating rate\n", 809 clk_hw_get_name(hw), __func__); 810 } 811 812 pr_debug("%s:%s rate %lu\n", clk_hw_get_name(hw), __func__, rate); 813 814 return rate; 815 } 816 817 static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate, 818 unsigned long *prate) 819 { 820 struct stm_fs params; 821 822 rate = quadfs_find_best_rate(hw, rate, *prate, ¶ms); 823 824 pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n", 825 __func__, clk_hw_get_name(hw), 826 rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv, 827 (unsigned int)params.pe, (unsigned int)params.nsdiv); 828 829 return rate; 830 } 831 832 833 static void quadfs_program_and_enable(struct st_clk_quadfs_fsynth *fs, 834 struct stm_fs *params) 835 { 836 fs->md = params->mdiv; 837 fs->pe = params->pe; 838 fs->sdiv = params->sdiv; 839 fs->nsdiv = params->nsdiv; 840 841 /* 842 * In some integrations you can only change the fsynth programming when 843 * the parent entity containing it is enabled. 844 */ 845 quadfs_fsynth_program_rate(fs); 846 quadfs_fsynth_program_enable(fs); 847 } 848 849 static int quadfs_set_rate(struct clk_hw *hw, unsigned long rate, 850 unsigned long parent_rate) 851 { 852 struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw); 853 struct stm_fs params; 854 long hwrate; 855 856 if (!rate || !parent_rate) 857 return -EINVAL; 858 859 memset(¶ms, 0, sizeof(struct stm_fs)); 860 861 hwrate = quadfs_find_best_rate(hw, rate, parent_rate, ¶ms); 862 if (!hwrate) 863 return -EINVAL; 864 865 quadfs_program_and_enable(fs, ¶ms); 866 867 return 0; 868 } 869 870 871 872 static const struct clk_ops st_quadfs_ops = { 873 .enable = quadfs_fsynth_enable, 874 .disable = quadfs_fsynth_disable, 875 .is_enabled = quadfs_fsynth_is_enabled, 876 .round_rate = quadfs_round_rate, 877 .set_rate = quadfs_set_rate, 878 .recalc_rate = quadfs_recalc_rate, 879 }; 880 881 static struct clk * __init st_clk_register_quadfs_fsynth( 882 const char *name, const char *parent_name, 883 struct clkgen_quadfs_data *quadfs, void __iomem *reg, u32 chan, 884 unsigned long flags, spinlock_t *lock) 885 { 886 struct st_clk_quadfs_fsynth *fs; 887 struct clk *clk; 888 struct clk_init_data init; 889 890 /* 891 * Sanity check required pointers, note that nsdiv3 is optional. 892 */ 893 if (WARN_ON(!name || !parent_name)) 894 return ERR_PTR(-EINVAL); 895 896 fs = kzalloc(sizeof(*fs), GFP_KERNEL); 897 if (!fs) 898 return ERR_PTR(-ENOMEM); 899 900 init.name = name; 901 init.ops = &st_quadfs_ops; 902 init.flags = flags | CLK_GET_RATE_NOCACHE; 903 init.parent_names = &parent_name; 904 init.num_parents = 1; 905 906 fs->data = quadfs; 907 fs->regs_base = reg; 908 fs->chan = chan; 909 fs->lock = lock; 910 fs->hw.init = &init; 911 912 clk = clk_register(NULL, &fs->hw); 913 914 if (IS_ERR(clk)) 915 kfree(fs); 916 917 return clk; 918 } 919 920 static void __init st_of_create_quadfs_fsynths( 921 struct device_node *np, const char *pll_name, 922 struct clkgen_quadfs_data_clks *quadfs, void __iomem *reg, 923 spinlock_t *lock) 924 { 925 struct clk_onecell_data *clk_data; 926 int fschan; 927 928 clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); 929 if (!clk_data) 930 return; 931 932 clk_data->clk_num = QUADFS_MAX_CHAN; 933 clk_data->clks = kcalloc(QUADFS_MAX_CHAN, sizeof(struct clk *), 934 GFP_KERNEL); 935 936 if (!clk_data->clks) { 937 kfree(clk_data); 938 return; 939 } 940 941 for (fschan = 0; fschan < QUADFS_MAX_CHAN; fschan++) { 942 struct clk *clk; 943 const char *clk_name; 944 unsigned long flags = 0; 945 946 if (quadfs->outputs) { 947 clk_name = quadfs->outputs[fschan].name; 948 flags = quadfs->outputs[fschan].flags; 949 } else { 950 if (of_property_read_string_index(np, 951 "clock-output-names", 952 fschan, &clk_name)) 953 break; 954 of_clk_detect_critical(np, fschan, &flags); 955 } 956 957 /* 958 * If we read an empty clock name then the channel is unused 959 */ 960 if (*clk_name == '\0') 961 continue; 962 963 clk = st_clk_register_quadfs_fsynth(clk_name, pll_name, 964 quadfs->data, reg, fschan, 965 flags, lock); 966 967 /* 968 * If there was an error registering this clock output, clean 969 * up and move on to the next one. 970 */ 971 if (!IS_ERR(clk)) { 972 clk_data->clks[fschan] = clk; 973 pr_debug("%s: parent %s rate %u\n", 974 __clk_get_name(clk), 975 __clk_get_name(clk_get_parent(clk)), 976 (unsigned int)clk_get_rate(clk)); 977 } 978 } 979 980 of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); 981 } 982 983 static void __init st_of_quadfs_setup(struct device_node *np, 984 struct clkgen_quadfs_data_clks *datac) 985 { 986 struct clk *clk; 987 const char *pll_name, *clk_parent_name; 988 void __iomem *reg; 989 spinlock_t *lock; 990 struct device_node *parent_np; 991 992 /* 993 * First check for reg property within the node to keep backward 994 * compatibility, then if reg doesn't exist look at the parent node 995 */ 996 reg = of_iomap(np, 0); 997 if (!reg) { 998 parent_np = of_get_parent(np); 999 reg = of_iomap(parent_np, 0); 1000 of_node_put(parent_np); 1001 if (!reg) { 1002 pr_err("%s: Failed to get base address\n", __func__); 1003 return; 1004 } 1005 } 1006 1007 clk_parent_name = of_clk_get_parent_name(np, 0); 1008 if (!clk_parent_name) 1009 return; 1010 1011 pll_name = kasprintf(GFP_KERNEL, "%pOFn.pll", np); 1012 if (!pll_name) 1013 return; 1014 1015 lock = kzalloc(sizeof(*lock), GFP_KERNEL); 1016 if (!lock) 1017 goto err_exit; 1018 1019 spin_lock_init(lock); 1020 1021 clk = st_clk_register_quadfs_pll(pll_name, clk_parent_name, datac->data, 1022 reg, lock); 1023 if (IS_ERR(clk)) 1024 goto err_exit; 1025 else 1026 pr_debug("%s: parent %s rate %u\n", 1027 __clk_get_name(clk), 1028 __clk_get_name(clk_get_parent(clk)), 1029 (unsigned int)clk_get_rate(clk)); 1030 1031 st_of_create_quadfs_fsynths(np, pll_name, datac, reg, lock); 1032 1033 err_exit: 1034 kfree(pll_name); /* No longer need local copy of the PLL name */ 1035 } 1036 1037 static void __init st_of_quadfs660C_setup(struct device_node *np) 1038 { 1039 st_of_quadfs_setup(np, 1040 (struct clkgen_quadfs_data_clks *) &st_fs660c32_C_data); 1041 } 1042 CLK_OF_DECLARE(quadfs660C, "st,quadfs-pll", st_of_quadfs660C_setup); 1043 1044 static void __init st_of_quadfs660D_setup(struct device_node *np) 1045 { 1046 st_of_quadfs_setup(np, 1047 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D_data); 1048 } 1049 CLK_OF_DECLARE(quadfs660D, "st,quadfs", st_of_quadfs660D_setup); 1050 1051 static void __init st_of_quadfs660D0_setup(struct device_node *np) 1052 { 1053 st_of_quadfs_setup(np, 1054 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D0_data); 1055 } 1056 CLK_OF_DECLARE(quadfs660D0, "st,quadfs-d0", st_of_quadfs660D0_setup); 1057 1058 static void __init st_of_quadfs660D2_setup(struct device_node *np) 1059 { 1060 st_of_quadfs_setup(np, 1061 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D2_data); 1062 } 1063 CLK_OF_DECLARE(quadfs660D2, "st,quadfs-d2", st_of_quadfs660D2_setup); 1064 1065 static void __init st_of_quadfs660D3_setup(struct device_node *np) 1066 { 1067 st_of_quadfs_setup(np, 1068 (struct clkgen_quadfs_data_clks *) &st_fs660c32_D3_data); 1069 } 1070 CLK_OF_DECLARE(quadfs660D3, "st,quadfs-d3", st_of_quadfs660D3_setup); 1071