1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * R-Car Gen3 Clock Pulse Generator 4 * 5 * Copyright (C) 2015-2018 Glider bvba 6 * 7 * Based on clk-rcar-gen3.c 8 * 9 * Copyright (C) 2015 Renesas Electronics Corp. 10 */ 11 12 #include <linux/bug.h> 13 #include <linux/bitfield.h> 14 #include <linux/clk.h> 15 #include <linux/clk-provider.h> 16 #include <linux/device.h> 17 #include <linux/err.h> 18 #include <linux/init.h> 19 #include <linux/io.h> 20 #include <linux/pm.h> 21 #include <linux/slab.h> 22 #include <linux/sys_soc.h> 23 24 #include "renesas-cpg-mssr.h" 25 #include "rcar-gen3-cpg.h" 26 27 #define CPG_PLL0CR 0x00d8 28 #define CPG_PLL2CR 0x002c 29 #define CPG_PLL4CR 0x01f4 30 31 #define CPG_RCKCR_CKSEL BIT(15) /* RCLK Clock Source Select */ 32 33 static spinlock_t cpg_lock; 34 35 static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set) 36 { 37 unsigned long flags; 38 u32 val; 39 40 spin_lock_irqsave(&cpg_lock, flags); 41 val = readl(reg); 42 val &= ~clear; 43 val |= set; 44 writel(val, reg); 45 spin_unlock_irqrestore(&cpg_lock, flags); 46 }; 47 48 struct cpg_simple_notifier { 49 struct notifier_block nb; 50 void __iomem *reg; 51 u32 saved; 52 }; 53 54 static int cpg_simple_notifier_call(struct notifier_block *nb, 55 unsigned long action, void *data) 56 { 57 struct cpg_simple_notifier *csn = 58 container_of(nb, struct cpg_simple_notifier, nb); 59 60 switch (action) { 61 case PM_EVENT_SUSPEND: 62 csn->saved = readl(csn->reg); 63 return NOTIFY_OK; 64 65 case PM_EVENT_RESUME: 66 writel(csn->saved, csn->reg); 67 return NOTIFY_OK; 68 } 69 return NOTIFY_DONE; 70 } 71 72 static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers, 73 struct cpg_simple_notifier *csn) 74 { 75 csn->nb.notifier_call = cpg_simple_notifier_call; 76 raw_notifier_chain_register(notifiers, &csn->nb); 77 } 78 79 /* 80 * Z Clock & Z2 Clock 81 * 82 * Traits of this clock: 83 * prepare - clk_prepare only ensures that parents are prepared 84 * enable - clk_enable only ensures that parents are enabled 85 * rate - rate is adjustable. clk->rate = (parent->rate * mult / 32 ) / 2 86 * parent - fixed parent. No clk_set_parent support 87 */ 88 #define CPG_FRQCRB 0x00000004 89 #define CPG_FRQCRB_KICK BIT(31) 90 #define CPG_FRQCRC 0x000000e0 91 #define CPG_FRQCRC_ZFC_MASK GENMASK(12, 8) 92 #define CPG_FRQCRC_Z2FC_MASK GENMASK(4, 0) 93 94 struct cpg_z_clk { 95 struct clk_hw hw; 96 void __iomem *reg; 97 void __iomem *kick_reg; 98 unsigned long mask; 99 }; 100 101 #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) 102 103 static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw, 104 unsigned long parent_rate) 105 { 106 struct cpg_z_clk *zclk = to_z_clk(hw); 107 unsigned int mult; 108 u32 val; 109 110 val = readl(zclk->reg) & zclk->mask; 111 mult = 32 - (val >> __ffs(zclk->mask)); 112 113 /* Factor of 2 is for fixed divider */ 114 return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, 32 * 2); 115 } 116 117 static long cpg_z_clk_round_rate(struct clk_hw *hw, unsigned long rate, 118 unsigned long *parent_rate) 119 { 120 /* Factor of 2 is for fixed divider */ 121 unsigned long prate = *parent_rate / 2; 122 unsigned int mult; 123 124 mult = div_u64(rate * 32ULL, prate); 125 mult = clamp(mult, 1U, 32U); 126 127 return (u64)prate * mult / 32; 128 } 129 130 static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, 131 unsigned long parent_rate) 132 { 133 struct cpg_z_clk *zclk = to_z_clk(hw); 134 unsigned int mult; 135 unsigned int i; 136 137 /* Factor of 2 is for fixed divider */ 138 mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL * 2, parent_rate); 139 mult = clamp(mult, 1U, 32U); 140 141 if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK) 142 return -EBUSY; 143 144 cpg_reg_modify(zclk->reg, zclk->mask, 145 ((32 - mult) << __ffs(zclk->mask)) & zclk->mask); 146 147 /* 148 * Set KICK bit in FRQCRB to update hardware setting and wait for 149 * clock change completion. 150 */ 151 cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK); 152 153 /* 154 * Note: There is no HW information about the worst case latency. 155 * 156 * Using experimental measurements, it seems that no more than 157 * ~10 iterations are needed, independently of the CPU rate. 158 * Since this value might be dependent of external xtal rate, pll1 159 * rate or even the other emulation clocks rate, use 1000 as a 160 * "super" safe value. 161 */ 162 for (i = 1000; i; i--) { 163 if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK)) 164 return 0; 165 166 cpu_relax(); 167 } 168 169 return -ETIMEDOUT; 170 } 171 172 static const struct clk_ops cpg_z_clk_ops = { 173 .recalc_rate = cpg_z_clk_recalc_rate, 174 .round_rate = cpg_z_clk_round_rate, 175 .set_rate = cpg_z_clk_set_rate, 176 }; 177 178 static struct clk * __init cpg_z_clk_register(const char *name, 179 const char *parent_name, 180 void __iomem *reg, 181 unsigned long mask) 182 { 183 struct clk_init_data init; 184 struct cpg_z_clk *zclk; 185 struct clk *clk; 186 187 zclk = kzalloc(sizeof(*zclk), GFP_KERNEL); 188 if (!zclk) 189 return ERR_PTR(-ENOMEM); 190 191 init.name = name; 192 init.ops = &cpg_z_clk_ops; 193 init.flags = 0; 194 init.parent_names = &parent_name; 195 init.num_parents = 1; 196 197 zclk->reg = reg + CPG_FRQCRC; 198 zclk->kick_reg = reg + CPG_FRQCRB; 199 zclk->hw.init = &init; 200 zclk->mask = mask; 201 202 clk = clk_register(NULL, &zclk->hw); 203 if (IS_ERR(clk)) 204 kfree(zclk); 205 206 return clk; 207 } 208 209 /* 210 * SDn Clock 211 */ 212 #define CPG_SD_STP_HCK BIT(9) 213 #define CPG_SD_STP_CK BIT(8) 214 215 #define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK) 216 #define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0) 217 218 #define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \ 219 { \ 220 .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \ 221 ((stp_ck) ? CPG_SD_STP_CK : 0) | \ 222 ((sd_srcfc) << 2) | \ 223 ((sd_fc) << 0), \ 224 .div = (sd_div), \ 225 } 226 227 struct sd_div_table { 228 u32 val; 229 unsigned int div; 230 }; 231 232 struct sd_clock { 233 struct clk_hw hw; 234 const struct sd_div_table *div_table; 235 struct cpg_simple_notifier csn; 236 unsigned int div_num; 237 unsigned int div_min; 238 unsigned int div_max; 239 unsigned int cur_div_idx; 240 }; 241 242 /* SDn divider 243 * sd_srcfc sd_fc div 244 * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc 245 *------------------------------------------------------------------- 246 * 0 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP) 247 * 0 0 1 (2) 1 (4) 8 : SDR50 248 * 1 0 2 (4) 1 (4) 16 : HS / SDR25 249 * 1 0 3 (8) 1 (4) 32 : NS / SDR12 250 * 1 0 4 (16) 1 (4) 64 251 * 0 0 0 (1) 0 (2) 2 252 * 0 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP) 253 * 1 0 2 (4) 0 (2) 8 254 * 1 0 3 (8) 0 (2) 16 255 * 1 0 4 (16) 0 (2) 32 256 * 257 * NOTE: There is a quirk option to ignore the first row of the dividers 258 * table when searching for suitable settings. This is because HS400 on 259 * early ES versions of H3 and M3-W requires a specific setting to work. 260 */ 261 static const struct sd_div_table cpg_sd_div_table[] = { 262 /* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */ 263 CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4), 264 CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8), 265 CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16), 266 CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32), 267 CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64), 268 CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2), 269 CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4), 270 CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8), 271 CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16), 272 CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32), 273 }; 274 275 #define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw) 276 277 static int cpg_sd_clock_enable(struct clk_hw *hw) 278 { 279 struct sd_clock *clock = to_sd_clock(hw); 280 281 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK, 282 clock->div_table[clock->cur_div_idx].val & 283 CPG_SD_STP_MASK); 284 285 return 0; 286 } 287 288 static void cpg_sd_clock_disable(struct clk_hw *hw) 289 { 290 struct sd_clock *clock = to_sd_clock(hw); 291 292 cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK); 293 } 294 295 static int cpg_sd_clock_is_enabled(struct clk_hw *hw) 296 { 297 struct sd_clock *clock = to_sd_clock(hw); 298 299 return !(readl(clock->csn.reg) & CPG_SD_STP_MASK); 300 } 301 302 static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw, 303 unsigned long parent_rate) 304 { 305 struct sd_clock *clock = to_sd_clock(hw); 306 307 return DIV_ROUND_CLOSEST(parent_rate, 308 clock->div_table[clock->cur_div_idx].div); 309 } 310 311 static unsigned int cpg_sd_clock_calc_div(struct sd_clock *clock, 312 unsigned long rate, 313 unsigned long parent_rate) 314 { 315 unsigned int div; 316 317 if (!rate) 318 rate = 1; 319 320 div = DIV_ROUND_CLOSEST(parent_rate, rate); 321 322 return clamp_t(unsigned int, div, clock->div_min, clock->div_max); 323 } 324 325 static long cpg_sd_clock_round_rate(struct clk_hw *hw, unsigned long rate, 326 unsigned long *parent_rate) 327 { 328 struct sd_clock *clock = to_sd_clock(hw); 329 unsigned int div = cpg_sd_clock_calc_div(clock, rate, *parent_rate); 330 331 return DIV_ROUND_CLOSEST(*parent_rate, div); 332 } 333 334 static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate, 335 unsigned long parent_rate) 336 { 337 struct sd_clock *clock = to_sd_clock(hw); 338 unsigned int div = cpg_sd_clock_calc_div(clock, rate, parent_rate); 339 unsigned int i; 340 341 for (i = 0; i < clock->div_num; i++) 342 if (div == clock->div_table[i].div) 343 break; 344 345 if (i >= clock->div_num) 346 return -EINVAL; 347 348 clock->cur_div_idx = i; 349 350 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK, 351 clock->div_table[i].val & 352 (CPG_SD_STP_MASK | CPG_SD_FC_MASK)); 353 354 return 0; 355 } 356 357 static const struct clk_ops cpg_sd_clock_ops = { 358 .enable = cpg_sd_clock_enable, 359 .disable = cpg_sd_clock_disable, 360 .is_enabled = cpg_sd_clock_is_enabled, 361 .recalc_rate = cpg_sd_clock_recalc_rate, 362 .round_rate = cpg_sd_clock_round_rate, 363 .set_rate = cpg_sd_clock_set_rate, 364 }; 365 366 static u32 cpg_quirks __initdata; 367 368 #define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */ 369 #define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */ 370 #define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */ 371 372 static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core, 373 void __iomem *base, const char *parent_name, 374 struct raw_notifier_head *notifiers) 375 { 376 struct clk_init_data init; 377 struct sd_clock *clock; 378 struct clk *clk; 379 unsigned int i; 380 u32 val; 381 382 clock = kzalloc(sizeof(*clock), GFP_KERNEL); 383 if (!clock) 384 return ERR_PTR(-ENOMEM); 385 386 init.name = core->name; 387 init.ops = &cpg_sd_clock_ops; 388 init.flags = CLK_SET_RATE_PARENT; 389 init.parent_names = &parent_name; 390 init.num_parents = 1; 391 392 clock->csn.reg = base + core->offset; 393 clock->hw.init = &init; 394 clock->div_table = cpg_sd_div_table; 395 clock->div_num = ARRAY_SIZE(cpg_sd_div_table); 396 397 if (cpg_quirks & SD_SKIP_FIRST) { 398 clock->div_table++; 399 clock->div_num--; 400 } 401 402 val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK; 403 val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK); 404 writel(val, clock->csn.reg); 405 406 clock->div_max = clock->div_table[0].div; 407 clock->div_min = clock->div_max; 408 for (i = 1; i < clock->div_num; i++) { 409 clock->div_max = max(clock->div_max, clock->div_table[i].div); 410 clock->div_min = min(clock->div_min, clock->div_table[i].div); 411 } 412 413 clk = clk_register(NULL, &clock->hw); 414 if (IS_ERR(clk)) 415 goto free_clock; 416 417 cpg_simple_notifier_register(notifiers, &clock->csn); 418 return clk; 419 420 free_clock: 421 kfree(clock); 422 return clk; 423 } 424 425 struct rpc_clock { 426 struct clk_divider div; 427 struct clk_gate gate; 428 /* 429 * One notifier covers both RPC and RPCD2 clocks as they are both 430 * controlled by the same RPCCKCR register... 431 */ 432 struct cpg_simple_notifier csn; 433 }; 434 435 static const struct clk_div_table cpg_rpcsrc_div_table[] = { 436 { 2, 5 }, { 3, 6 }, { 0, 0 }, 437 }; 438 439 static const struct clk_div_table cpg_rpc_div_table[] = { 440 { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 }, 441 }; 442 443 static struct clk * __init cpg_rpc_clk_register(const char *name, 444 void __iomem *base, const char *parent_name, 445 struct raw_notifier_head *notifiers) 446 { 447 struct rpc_clock *rpc; 448 struct clk *clk; 449 450 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); 451 if (!rpc) 452 return ERR_PTR(-ENOMEM); 453 454 rpc->div.reg = base + CPG_RPCCKCR; 455 rpc->div.width = 3; 456 rpc->div.table = cpg_rpc_div_table; 457 rpc->div.lock = &cpg_lock; 458 459 rpc->gate.reg = base + CPG_RPCCKCR; 460 rpc->gate.bit_idx = 8; 461 rpc->gate.flags = CLK_GATE_SET_TO_DISABLE; 462 rpc->gate.lock = &cpg_lock; 463 464 rpc->csn.reg = base + CPG_RPCCKCR; 465 466 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, 467 &rpc->div.hw, &clk_divider_ops, 468 &rpc->gate.hw, &clk_gate_ops, 0); 469 if (IS_ERR(clk)) { 470 kfree(rpc); 471 return clk; 472 } 473 474 cpg_simple_notifier_register(notifiers, &rpc->csn); 475 return clk; 476 } 477 478 struct rpcd2_clock { 479 struct clk_fixed_factor fixed; 480 struct clk_gate gate; 481 }; 482 483 static struct clk * __init cpg_rpcd2_clk_register(const char *name, 484 void __iomem *base, 485 const char *parent_name) 486 { 487 struct rpcd2_clock *rpcd2; 488 struct clk *clk; 489 490 rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL); 491 if (!rpcd2) 492 return ERR_PTR(-ENOMEM); 493 494 rpcd2->fixed.mult = 1; 495 rpcd2->fixed.div = 2; 496 497 rpcd2->gate.reg = base + CPG_RPCCKCR; 498 rpcd2->gate.bit_idx = 9; 499 rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE; 500 rpcd2->gate.lock = &cpg_lock; 501 502 clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL, 503 &rpcd2->fixed.hw, &clk_fixed_factor_ops, 504 &rpcd2->gate.hw, &clk_gate_ops, 0); 505 if (IS_ERR(clk)) 506 kfree(rpcd2); 507 508 return clk; 509 } 510 511 512 static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata; 513 static unsigned int cpg_clk_extalr __initdata; 514 static u32 cpg_mode __initdata; 515 516 static const struct soc_device_attribute cpg_quirks_match[] __initconst = { 517 { 518 .soc_id = "r8a7795", .revision = "ES1.0", 519 .data = (void *)(PLL_ERRATA | RCKCR_CKSEL | SD_SKIP_FIRST), 520 }, 521 { 522 .soc_id = "r8a7795", .revision = "ES1.*", 523 .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST), 524 }, 525 { 526 .soc_id = "r8a7795", .revision = "ES2.0", 527 .data = (void *)SD_SKIP_FIRST, 528 }, 529 { 530 .soc_id = "r8a7796", .revision = "ES1.0", 531 .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST), 532 }, 533 { 534 .soc_id = "r8a7796", .revision = "ES1.1", 535 .data = (void *)SD_SKIP_FIRST, 536 }, 537 { /* sentinel */ } 538 }; 539 540 struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, 541 const struct cpg_core_clk *core, const struct cpg_mssr_info *info, 542 struct clk **clks, void __iomem *base, 543 struct raw_notifier_head *notifiers) 544 { 545 const struct clk *parent; 546 unsigned int mult = 1; 547 unsigned int div = 1; 548 u32 value; 549 550 parent = clks[core->parent & 0xffff]; /* some types use high bits */ 551 if (IS_ERR(parent)) 552 return ERR_CAST(parent); 553 554 switch (core->type) { 555 case CLK_TYPE_GEN3_MAIN: 556 div = cpg_pll_config->extal_div; 557 break; 558 559 case CLK_TYPE_GEN3_PLL0: 560 /* 561 * PLL0 is a configurable multiplier clock. Register it as a 562 * fixed factor clock for now as there's no generic multiplier 563 * clock implementation and we currently have no need to change 564 * the multiplier value. 565 */ 566 value = readl(base + CPG_PLL0CR); 567 mult = (((value >> 24) & 0x7f) + 1) * 2; 568 if (cpg_quirks & PLL_ERRATA) 569 mult *= 2; 570 break; 571 572 case CLK_TYPE_GEN3_PLL1: 573 mult = cpg_pll_config->pll1_mult; 574 div = cpg_pll_config->pll1_div; 575 break; 576 577 case CLK_TYPE_GEN3_PLL2: 578 /* 579 * PLL2 is a configurable multiplier clock. Register it as a 580 * fixed factor clock for now as there's no generic multiplier 581 * clock implementation and we currently have no need to change 582 * the multiplier value. 583 */ 584 value = readl(base + CPG_PLL2CR); 585 mult = (((value >> 24) & 0x7f) + 1) * 2; 586 if (cpg_quirks & PLL_ERRATA) 587 mult *= 2; 588 break; 589 590 case CLK_TYPE_GEN3_PLL3: 591 mult = cpg_pll_config->pll3_mult; 592 div = cpg_pll_config->pll3_div; 593 break; 594 595 case CLK_TYPE_GEN3_PLL4: 596 /* 597 * PLL4 is a configurable multiplier clock. Register it as a 598 * fixed factor clock for now as there's no generic multiplier 599 * clock implementation and we currently have no need to change 600 * the multiplier value. 601 */ 602 value = readl(base + CPG_PLL4CR); 603 mult = (((value >> 24) & 0x7f) + 1) * 2; 604 if (cpg_quirks & PLL_ERRATA) 605 mult *= 2; 606 break; 607 608 case CLK_TYPE_GEN3_SD: 609 return cpg_sd_clk_register(core, base, __clk_get_name(parent), 610 notifiers); 611 612 case CLK_TYPE_GEN3_R: 613 if (cpg_quirks & RCKCR_CKSEL) { 614 struct cpg_simple_notifier *csn; 615 616 csn = kzalloc(sizeof(*csn), GFP_KERNEL); 617 if (!csn) 618 return ERR_PTR(-ENOMEM); 619 620 csn->reg = base + CPG_RCKCR; 621 622 /* 623 * RINT is default. 624 * Only if EXTALR is populated, we switch to it. 625 */ 626 value = readl(csn->reg) & 0x3f; 627 628 if (clk_get_rate(clks[cpg_clk_extalr])) { 629 parent = clks[cpg_clk_extalr]; 630 value |= CPG_RCKCR_CKSEL; 631 } 632 633 writel(value, csn->reg); 634 cpg_simple_notifier_register(notifiers, csn); 635 break; 636 } 637 638 /* Select parent clock of RCLK by MD28 */ 639 if (cpg_mode & BIT(28)) 640 parent = clks[cpg_clk_extalr]; 641 break; 642 643 case CLK_TYPE_GEN3_MDSEL: 644 /* 645 * Clock selectable between two parents and two fixed dividers 646 * using a mode pin 647 */ 648 if (cpg_mode & BIT(core->offset)) { 649 div = core->div & 0xffff; 650 } else { 651 parent = clks[core->parent >> 16]; 652 if (IS_ERR(parent)) 653 return ERR_CAST(parent); 654 div = core->div >> 16; 655 } 656 mult = 1; 657 break; 658 659 case CLK_TYPE_GEN3_Z: 660 return cpg_z_clk_register(core->name, __clk_get_name(parent), 661 base, CPG_FRQCRC_ZFC_MASK); 662 663 case CLK_TYPE_GEN3_Z2: 664 return cpg_z_clk_register(core->name, __clk_get_name(parent), 665 base, CPG_FRQCRC_Z2FC_MASK); 666 667 case CLK_TYPE_GEN3_OSC: 668 /* 669 * Clock combining OSC EXTAL predivider and a fixed divider 670 */ 671 div = cpg_pll_config->osc_prediv * core->div; 672 break; 673 674 case CLK_TYPE_GEN3_RCKSEL: 675 /* 676 * Clock selectable between two parents and two fixed dividers 677 * using RCKCR.CKSEL 678 */ 679 if (readl(base + CPG_RCKCR) & CPG_RCKCR_CKSEL) { 680 div = core->div & 0xffff; 681 } else { 682 parent = clks[core->parent >> 16]; 683 if (IS_ERR(parent)) 684 return ERR_CAST(parent); 685 div = core->div >> 16; 686 } 687 break; 688 689 case CLK_TYPE_GEN3_RPCSRC: 690 return clk_register_divider_table(NULL, core->name, 691 __clk_get_name(parent), 0, 692 base + CPG_RPCCKCR, 3, 2, 0, 693 cpg_rpcsrc_div_table, 694 &cpg_lock); 695 696 case CLK_TYPE_GEN3_RPC: 697 return cpg_rpc_clk_register(core->name, base, 698 __clk_get_name(parent), notifiers); 699 700 case CLK_TYPE_GEN3_RPCD2: 701 return cpg_rpcd2_clk_register(core->name, base, 702 __clk_get_name(parent)); 703 704 default: 705 return ERR_PTR(-EINVAL); 706 } 707 708 return clk_register_fixed_factor(NULL, core->name, 709 __clk_get_name(parent), 0, mult, div); 710 } 711 712 int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config, 713 unsigned int clk_extalr, u32 mode) 714 { 715 const struct soc_device_attribute *attr; 716 717 cpg_pll_config = config; 718 cpg_clk_extalr = clk_extalr; 719 cpg_mode = mode; 720 attr = soc_device_match(cpg_quirks_match); 721 if (attr) 722 cpg_quirks = (uintptr_t)attr->data; 723 pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks); 724 725 spin_lock_init(&cpg_lock); 726 727 return 0; 728 } 729