1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * R-Car Gen3 Clock Pulse Generator
4  *
5  * Copyright (C) 2015-2018 Glider bvba
6  * Copyright (C) 2019 Renesas Electronics Corp.
7  *
8  * Based on clk-rcar-gen3.c
9  *
10  * Copyright (C) 2015 Renesas Electronics Corp.
11  */
12 
13 #include <linux/bug.h>
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/io.h>
21 #include <linux/pm.h>
22 #include <linux/slab.h>
23 #include <linux/sys_soc.h>
24 
25 #include "renesas-cpg-mssr.h"
26 #include "rcar-gen3-cpg.h"
27 
28 #define CPG_PLL0CR		0x00d8
29 #define CPG_PLL2CR		0x002c
30 #define CPG_PLL4CR		0x01f4
31 
32 #define CPG_RCKCR_CKSEL	BIT(15)	/* RCLK Clock Source Select */
33 
34 static spinlock_t cpg_lock;
35 
36 static void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
37 {
38 	unsigned long flags;
39 	u32 val;
40 
41 	spin_lock_irqsave(&cpg_lock, flags);
42 	val = readl(reg);
43 	val &= ~clear;
44 	val |= set;
45 	writel(val, reg);
46 	spin_unlock_irqrestore(&cpg_lock, flags);
47 };
48 
49 struct cpg_simple_notifier {
50 	struct notifier_block nb;
51 	void __iomem *reg;
52 	u32 saved;
53 };
54 
55 static int cpg_simple_notifier_call(struct notifier_block *nb,
56 				    unsigned long action, void *data)
57 {
58 	struct cpg_simple_notifier *csn =
59 		container_of(nb, struct cpg_simple_notifier, nb);
60 
61 	switch (action) {
62 	case PM_EVENT_SUSPEND:
63 		csn->saved = readl(csn->reg);
64 		return NOTIFY_OK;
65 
66 	case PM_EVENT_RESUME:
67 		writel(csn->saved, csn->reg);
68 		return NOTIFY_OK;
69 	}
70 	return NOTIFY_DONE;
71 }
72 
73 static void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
74 					 struct cpg_simple_notifier *csn)
75 {
76 	csn->nb.notifier_call = cpg_simple_notifier_call;
77 	raw_notifier_chain_register(notifiers, &csn->nb);
78 }
79 
80 /*
81  * Z Clock & Z2 Clock
82  *
83  * Traits of this clock:
84  * prepare - clk_prepare only ensures that parents are prepared
85  * enable - clk_enable only ensures that parents are enabled
86  * rate - rate is adjustable.  clk->rate = (parent->rate * mult / 32 ) / 2
87  * parent - fixed parent.  No clk_set_parent support
88  */
89 #define CPG_FRQCRB			0x00000004
90 #define CPG_FRQCRB_KICK			BIT(31)
91 #define CPG_FRQCRC			0x000000e0
92 
93 struct cpg_z_clk {
94 	struct clk_hw hw;
95 	void __iomem *reg;
96 	void __iomem *kick_reg;
97 	unsigned long mask;
98 	unsigned int fixed_div;
99 };
100 
101 #define to_z_clk(_hw)	container_of(_hw, struct cpg_z_clk, hw)
102 
103 static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
104 					   unsigned long parent_rate)
105 {
106 	struct cpg_z_clk *zclk = to_z_clk(hw);
107 	unsigned int mult;
108 	u32 val;
109 
110 	val = readl(zclk->reg) & zclk->mask;
111 	mult = 32 - (val >> __ffs(zclk->mask));
112 
113 	return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
114 				     32 * zclk->fixed_div);
115 }
116 
117 static int cpg_z_clk_determine_rate(struct clk_hw *hw,
118 				    struct clk_rate_request *req)
119 {
120 	struct cpg_z_clk *zclk = to_z_clk(hw);
121 	unsigned int min_mult, max_mult, mult;
122 	unsigned long prate;
123 
124 	prate = req->best_parent_rate / zclk->fixed_div;
125 	min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
126 	max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
127 	if (max_mult < min_mult)
128 		return -EINVAL;
129 
130 	mult = div64_ul(req->rate * 32ULL, prate);
131 	mult = clamp(mult, min_mult, max_mult);
132 
133 	req->rate = div_u64((u64)prate * mult, 32);
134 	return 0;
135 }
136 
137 static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
138 			      unsigned long parent_rate)
139 {
140 	struct cpg_z_clk *zclk = to_z_clk(hw);
141 	unsigned int mult;
142 	unsigned int i;
143 
144 	mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
145 				       parent_rate);
146 	mult = clamp(mult, 1U, 32U);
147 
148 	if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
149 		return -EBUSY;
150 
151 	cpg_reg_modify(zclk->reg, zclk->mask,
152 		       ((32 - mult) << __ffs(zclk->mask)) & zclk->mask);
153 
154 	/*
155 	 * Set KICK bit in FRQCRB to update hardware setting and wait for
156 	 * clock change completion.
157 	 */
158 	cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
159 
160 	/*
161 	 * Note: There is no HW information about the worst case latency.
162 	 *
163 	 * Using experimental measurements, it seems that no more than
164 	 * ~10 iterations are needed, independently of the CPU rate.
165 	 * Since this value might be dependent of external xtal rate, pll1
166 	 * rate or even the other emulation clocks rate, use 1000 as a
167 	 * "super" safe value.
168 	 */
169 	for (i = 1000; i; i--) {
170 		if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
171 			return 0;
172 
173 		cpu_relax();
174 	}
175 
176 	return -ETIMEDOUT;
177 }
178 
179 static const struct clk_ops cpg_z_clk_ops = {
180 	.recalc_rate = cpg_z_clk_recalc_rate,
181 	.determine_rate = cpg_z_clk_determine_rate,
182 	.set_rate = cpg_z_clk_set_rate,
183 };
184 
185 static struct clk * __init cpg_z_clk_register(const char *name,
186 					      const char *parent_name,
187 					      void __iomem *reg,
188 					      unsigned int div,
189 					      unsigned int offset)
190 {
191 	struct clk_init_data init;
192 	struct cpg_z_clk *zclk;
193 	struct clk *clk;
194 
195 	zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
196 	if (!zclk)
197 		return ERR_PTR(-ENOMEM);
198 
199 	init.name = name;
200 	init.ops = &cpg_z_clk_ops;
201 	init.flags = 0;
202 	init.parent_names = &parent_name;
203 	init.num_parents = 1;
204 
205 	zclk->reg = reg + CPG_FRQCRC;
206 	zclk->kick_reg = reg + CPG_FRQCRB;
207 	zclk->hw.init = &init;
208 	zclk->mask = GENMASK(offset + 4, offset);
209 	zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
210 
211 	clk = clk_register(NULL, &zclk->hw);
212 	if (IS_ERR(clk))
213 		kfree(zclk);
214 
215 	return clk;
216 }
217 
218 /*
219  * SDn Clock
220  */
221 #define CPG_SD_STP_HCK		BIT(9)
222 #define CPG_SD_STP_CK		BIT(8)
223 
224 #define CPG_SD_STP_MASK		(CPG_SD_STP_HCK | CPG_SD_STP_CK)
225 #define CPG_SD_FC_MASK		(0x7 << 2 | 0x3 << 0)
226 
227 #define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
228 { \
229 	.val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
230 	       ((sd_srcfc) << 2) | \
231 	       ((sd_fc) << 0), \
232 	.div = (sd_div), \
233 }
234 
235 struct sd_div_table {
236 	u32 val;
237 	unsigned int div;
238 };
239 
240 struct sd_clock {
241 	struct clk_hw hw;
242 	const struct sd_div_table *div_table;
243 	struct cpg_simple_notifier csn;
244 	unsigned int div_num;
245 	unsigned int cur_div_idx;
246 };
247 
248 /* SDn divider
249  *           sd_srcfc   sd_fc   div
250  * stp_hck   (div)      (div)     = sd_srcfc x sd_fc
251  *---------------------------------------------------------
252  *  0         0 (1)      1 (4)      4 : SDR104 / HS200 / HS400 (8 TAP)
253  *  0         1 (2)      1 (4)      8 : SDR50
254  *  1         2 (4)      1 (4)     16 : HS / SDR25
255  *  1         3 (8)      1 (4)     32 : NS / SDR12
256  *  1         4 (16)     1 (4)     64
257  *  0         0 (1)      0 (2)      2
258  *  0         1 (2)      0 (2)      4 : SDR104 / HS200 / HS400 (4 TAP)
259  *  1         2 (4)      0 (2)      8
260  *  1         3 (8)      0 (2)     16
261  *  1         4 (16)     0 (2)     32
262  *
263  *  NOTE: There is a quirk option to ignore the first row of the dividers
264  *  table when searching for suitable settings. This is because HS400 on
265  *  early ES versions of H3 and M3-W requires a specific setting to work.
266  */
267 static const struct sd_div_table cpg_sd_div_table[] = {
268 /*	CPG_SD_DIV_TABLE_DATA(stp_hck,  sd_srcfc,   sd_fc,  sd_div) */
269 	CPG_SD_DIV_TABLE_DATA(0,        0,          1,        4),
270 	CPG_SD_DIV_TABLE_DATA(0,        1,          1,        8),
271 	CPG_SD_DIV_TABLE_DATA(1,        2,          1,       16),
272 	CPG_SD_DIV_TABLE_DATA(1,        3,          1,       32),
273 	CPG_SD_DIV_TABLE_DATA(1,        4,          1,       64),
274 	CPG_SD_DIV_TABLE_DATA(0,        0,          0,        2),
275 	CPG_SD_DIV_TABLE_DATA(0,        1,          0,        4),
276 	CPG_SD_DIV_TABLE_DATA(1,        2,          0,        8),
277 	CPG_SD_DIV_TABLE_DATA(1,        3,          0,       16),
278 	CPG_SD_DIV_TABLE_DATA(1,        4,          0,       32),
279 };
280 
281 #define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
282 
283 static int cpg_sd_clock_enable(struct clk_hw *hw)
284 {
285 	struct sd_clock *clock = to_sd_clock(hw);
286 
287 	cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
288 		       clock->div_table[clock->cur_div_idx].val &
289 		       CPG_SD_STP_MASK);
290 
291 	return 0;
292 }
293 
294 static void cpg_sd_clock_disable(struct clk_hw *hw)
295 {
296 	struct sd_clock *clock = to_sd_clock(hw);
297 
298 	cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
299 }
300 
301 static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
302 {
303 	struct sd_clock *clock = to_sd_clock(hw);
304 
305 	return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
306 }
307 
308 static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
309 						unsigned long parent_rate)
310 {
311 	struct sd_clock *clock = to_sd_clock(hw);
312 
313 	return DIV_ROUND_CLOSEST(parent_rate,
314 				 clock->div_table[clock->cur_div_idx].div);
315 }
316 
317 static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
318 				       struct clk_rate_request *req)
319 {
320 	unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
321 	struct sd_clock *clock = to_sd_clock(hw);
322 	unsigned long calc_rate, diff;
323 	unsigned int i;
324 
325 	for (i = 0; i < clock->div_num; i++) {
326 		calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
327 					      clock->div_table[i].div);
328 		if (calc_rate < req->min_rate || calc_rate > req->max_rate)
329 			continue;
330 
331 		diff = calc_rate > req->rate ? calc_rate - req->rate
332 					     : req->rate - calc_rate;
333 		if (diff < diff_min) {
334 			best_rate = calc_rate;
335 			diff_min = diff;
336 		}
337 	}
338 
339 	if (best_rate == ULONG_MAX)
340 		return -EINVAL;
341 
342 	req->rate = best_rate;
343 	return 0;
344 }
345 
346 static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
347 				 unsigned long parent_rate)
348 {
349 	struct sd_clock *clock = to_sd_clock(hw);
350 	unsigned int i;
351 
352 	for (i = 0; i < clock->div_num; i++)
353 		if (rate == DIV_ROUND_CLOSEST(parent_rate,
354 					      clock->div_table[i].div))
355 			break;
356 
357 	if (i >= clock->div_num)
358 		return -EINVAL;
359 
360 	clock->cur_div_idx = i;
361 
362 	cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
363 		       clock->div_table[i].val &
364 		       (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
365 
366 	return 0;
367 }
368 
369 static const struct clk_ops cpg_sd_clock_ops = {
370 	.enable = cpg_sd_clock_enable,
371 	.disable = cpg_sd_clock_disable,
372 	.is_enabled = cpg_sd_clock_is_enabled,
373 	.recalc_rate = cpg_sd_clock_recalc_rate,
374 	.determine_rate = cpg_sd_clock_determine_rate,
375 	.set_rate = cpg_sd_clock_set_rate,
376 };
377 
378 static u32 cpg_quirks __initdata;
379 
380 #define PLL_ERRATA	BIT(0)		/* Missing PLL0/2/4 post-divider */
381 #define RCKCR_CKSEL	BIT(1)		/* Manual RCLK parent selection */
382 #define SD_SKIP_FIRST	BIT(2)		/* Skip first clock in SD table */
383 
384 static struct clk * __init cpg_sd_clk_register(const char *name,
385 	void __iomem *base, unsigned int offset, const char *parent_name,
386 	struct raw_notifier_head *notifiers)
387 {
388 	struct clk_init_data init;
389 	struct sd_clock *clock;
390 	struct clk *clk;
391 	u32 val;
392 
393 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
394 	if (!clock)
395 		return ERR_PTR(-ENOMEM);
396 
397 	init.name = name;
398 	init.ops = &cpg_sd_clock_ops;
399 	init.flags = CLK_SET_RATE_PARENT;
400 	init.parent_names = &parent_name;
401 	init.num_parents = 1;
402 
403 	clock->csn.reg = base + offset;
404 	clock->hw.init = &init;
405 	clock->div_table = cpg_sd_div_table;
406 	clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
407 
408 	if (cpg_quirks & SD_SKIP_FIRST) {
409 		clock->div_table++;
410 		clock->div_num--;
411 	}
412 
413 	val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
414 	val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
415 	writel(val, clock->csn.reg);
416 
417 	clk = clk_register(NULL, &clock->hw);
418 	if (IS_ERR(clk))
419 		goto free_clock;
420 
421 	cpg_simple_notifier_register(notifiers, &clock->csn);
422 	return clk;
423 
424 free_clock:
425 	kfree(clock);
426 	return clk;
427 }
428 
429 struct rpc_clock {
430 	struct clk_divider div;
431 	struct clk_gate gate;
432 	/*
433 	 * One notifier covers both RPC and RPCD2 clocks as they are both
434 	 * controlled by the same RPCCKCR register...
435 	 */
436 	struct cpg_simple_notifier csn;
437 };
438 
439 static const struct clk_div_table cpg_rpcsrc_div_table[] = {
440 	{ 2, 5 }, { 3, 6 }, { 0, 0 },
441 };
442 
443 static const struct clk_div_table cpg_rpc_div_table[] = {
444 	{ 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
445 };
446 
447 static struct clk * __init cpg_rpc_clk_register(const char *name,
448 	void __iomem *base, const char *parent_name,
449 	struct raw_notifier_head *notifiers)
450 {
451 	struct rpc_clock *rpc;
452 	struct clk *clk;
453 
454 	rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
455 	if (!rpc)
456 		return ERR_PTR(-ENOMEM);
457 
458 	rpc->div.reg = base + CPG_RPCCKCR;
459 	rpc->div.width = 3;
460 	rpc->div.table = cpg_rpc_div_table;
461 	rpc->div.lock = &cpg_lock;
462 
463 	rpc->gate.reg = base + CPG_RPCCKCR;
464 	rpc->gate.bit_idx = 8;
465 	rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
466 	rpc->gate.lock = &cpg_lock;
467 
468 	rpc->csn.reg = base + CPG_RPCCKCR;
469 
470 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
471 				     &rpc->div.hw,  &clk_divider_ops,
472 				     &rpc->gate.hw, &clk_gate_ops,
473 				     CLK_SET_RATE_PARENT);
474 	if (IS_ERR(clk)) {
475 		kfree(rpc);
476 		return clk;
477 	}
478 
479 	cpg_simple_notifier_register(notifiers, &rpc->csn);
480 	return clk;
481 }
482 
483 struct rpcd2_clock {
484 	struct clk_fixed_factor fixed;
485 	struct clk_gate gate;
486 };
487 
488 static struct clk * __init cpg_rpcd2_clk_register(const char *name,
489 						  void __iomem *base,
490 						  const char *parent_name)
491 {
492 	struct rpcd2_clock *rpcd2;
493 	struct clk *clk;
494 
495 	rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
496 	if (!rpcd2)
497 		return ERR_PTR(-ENOMEM);
498 
499 	rpcd2->fixed.mult = 1;
500 	rpcd2->fixed.div = 2;
501 
502 	rpcd2->gate.reg = base + CPG_RPCCKCR;
503 	rpcd2->gate.bit_idx = 9;
504 	rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
505 	rpcd2->gate.lock = &cpg_lock;
506 
507 	clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
508 				     &rpcd2->fixed.hw, &clk_fixed_factor_ops,
509 				     &rpcd2->gate.hw, &clk_gate_ops,
510 				     CLK_SET_RATE_PARENT);
511 	if (IS_ERR(clk))
512 		kfree(rpcd2);
513 
514 	return clk;
515 }
516 
517 
518 static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
519 static unsigned int cpg_clk_extalr __initdata;
520 static u32 cpg_mode __initdata;
521 
522 static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
523 	{
524 		.soc_id = "r8a7795", .revision = "ES1.0",
525 		.data = (void *)(PLL_ERRATA | RCKCR_CKSEL | SD_SKIP_FIRST),
526 	},
527 	{
528 		.soc_id = "r8a7795", .revision = "ES1.*",
529 		.data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
530 	},
531 	{
532 		.soc_id = "r8a7795", .revision = "ES2.0",
533 		.data = (void *)SD_SKIP_FIRST,
534 	},
535 	{
536 		.soc_id = "r8a7796", .revision = "ES1.0",
537 		.data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
538 	},
539 	{
540 		.soc_id = "r8a7796", .revision = "ES1.1",
541 		.data = (void *)SD_SKIP_FIRST,
542 	},
543 	{ /* sentinel */ }
544 };
545 
546 struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
547 	const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
548 	struct clk **clks, void __iomem *base,
549 	struct raw_notifier_head *notifiers)
550 {
551 	const struct clk *parent;
552 	unsigned int mult = 1;
553 	unsigned int div = 1;
554 	u32 value;
555 
556 	parent = clks[core->parent & 0xffff];	/* some types use high bits */
557 	if (IS_ERR(parent))
558 		return ERR_CAST(parent);
559 
560 	switch (core->type) {
561 	case CLK_TYPE_GEN3_MAIN:
562 		div = cpg_pll_config->extal_div;
563 		break;
564 
565 	case CLK_TYPE_GEN3_PLL0:
566 		/*
567 		 * PLL0 is a configurable multiplier clock. Register it as a
568 		 * fixed factor clock for now as there's no generic multiplier
569 		 * clock implementation and we currently have no need to change
570 		 * the multiplier value.
571 		 */
572 		value = readl(base + CPG_PLL0CR);
573 		mult = (((value >> 24) & 0x7f) + 1) * 2;
574 		if (cpg_quirks & PLL_ERRATA)
575 			mult *= 2;
576 		break;
577 
578 	case CLK_TYPE_GEN3_PLL1:
579 		mult = cpg_pll_config->pll1_mult;
580 		div = cpg_pll_config->pll1_div;
581 		break;
582 
583 	case CLK_TYPE_GEN3_PLL2:
584 		/*
585 		 * PLL2 is a configurable multiplier clock. Register it as a
586 		 * fixed factor clock for now as there's no generic multiplier
587 		 * clock implementation and we currently have no need to change
588 		 * the multiplier value.
589 		 */
590 		value = readl(base + CPG_PLL2CR);
591 		mult = (((value >> 24) & 0x7f) + 1) * 2;
592 		if (cpg_quirks & PLL_ERRATA)
593 			mult *= 2;
594 		break;
595 
596 	case CLK_TYPE_GEN3_PLL3:
597 		mult = cpg_pll_config->pll3_mult;
598 		div = cpg_pll_config->pll3_div;
599 		break;
600 
601 	case CLK_TYPE_GEN3_PLL4:
602 		/*
603 		 * PLL4 is a configurable multiplier clock. Register it as a
604 		 * fixed factor clock for now as there's no generic multiplier
605 		 * clock implementation and we currently have no need to change
606 		 * the multiplier value.
607 		 */
608 		value = readl(base + CPG_PLL4CR);
609 		mult = (((value >> 24) & 0x7f) + 1) * 2;
610 		if (cpg_quirks & PLL_ERRATA)
611 			mult *= 2;
612 		break;
613 
614 	case CLK_TYPE_GEN3_SD:
615 		return cpg_sd_clk_register(core->name, base, core->offset,
616 					   __clk_get_name(parent), notifiers);
617 
618 	case CLK_TYPE_GEN3_R:
619 		if (cpg_quirks & RCKCR_CKSEL) {
620 			struct cpg_simple_notifier *csn;
621 
622 			csn = kzalloc(sizeof(*csn), GFP_KERNEL);
623 			if (!csn)
624 				return ERR_PTR(-ENOMEM);
625 
626 			csn->reg = base + CPG_RCKCR;
627 
628 			/*
629 			 * RINT is default.
630 			 * Only if EXTALR is populated, we switch to it.
631 			 */
632 			value = readl(csn->reg) & 0x3f;
633 
634 			if (clk_get_rate(clks[cpg_clk_extalr])) {
635 				parent = clks[cpg_clk_extalr];
636 				value |= CPG_RCKCR_CKSEL;
637 			}
638 
639 			writel(value, csn->reg);
640 			cpg_simple_notifier_register(notifiers, csn);
641 			break;
642 		}
643 
644 		/* Select parent clock of RCLK by MD28 */
645 		if (cpg_mode & BIT(28))
646 			parent = clks[cpg_clk_extalr];
647 		break;
648 
649 	case CLK_TYPE_GEN3_MDSEL:
650 		/*
651 		 * Clock selectable between two parents and two fixed dividers
652 		 * using a mode pin
653 		 */
654 		if (cpg_mode & BIT(core->offset)) {
655 			div = core->div & 0xffff;
656 		} else {
657 			parent = clks[core->parent >> 16];
658 			if (IS_ERR(parent))
659 				return ERR_CAST(parent);
660 			div = core->div >> 16;
661 		}
662 		mult = 1;
663 		break;
664 
665 	case CLK_TYPE_GEN3_Z:
666 		return cpg_z_clk_register(core->name, __clk_get_name(parent),
667 					  base, core->div, core->offset);
668 
669 	case CLK_TYPE_GEN3_OSC:
670 		/*
671 		 * Clock combining OSC EXTAL predivider and a fixed divider
672 		 */
673 		div = cpg_pll_config->osc_prediv * core->div;
674 		break;
675 
676 	case CLK_TYPE_GEN3_RCKSEL:
677 		/*
678 		 * Clock selectable between two parents and two fixed dividers
679 		 * using RCKCR.CKSEL
680 		 */
681 		if (readl(base + CPG_RCKCR) & CPG_RCKCR_CKSEL) {
682 			div = core->div & 0xffff;
683 		} else {
684 			parent = clks[core->parent >> 16];
685 			if (IS_ERR(parent))
686 				return ERR_CAST(parent);
687 			div = core->div >> 16;
688 		}
689 		break;
690 
691 	case CLK_TYPE_GEN3_RPCSRC:
692 		return clk_register_divider_table(NULL, core->name,
693 						  __clk_get_name(parent), 0,
694 						  base + CPG_RPCCKCR, 3, 2, 0,
695 						  cpg_rpcsrc_div_table,
696 						  &cpg_lock);
697 
698 	case CLK_TYPE_GEN3_E3_RPCSRC:
699 		/*
700 		 * Register RPCSRC as fixed factor clock based on the
701 		 * MD[4:1] pins and CPG_RPCCKCR[4:3] register value for
702 		 * which has been set prior to booting the kernel.
703 		 */
704 		value = (readl(base + CPG_RPCCKCR) & GENMASK(4, 3)) >> 3;
705 
706 		switch (value) {
707 		case 0:
708 			div = 5;
709 			break;
710 		case 1:
711 			div = 3;
712 			break;
713 		case 2:
714 			parent = clks[core->parent >> 16];
715 			if (IS_ERR(parent))
716 				return ERR_CAST(parent);
717 			div = core->div;
718 			break;
719 		case 3:
720 		default:
721 			div = 2;
722 			break;
723 		}
724 		break;
725 
726 	case CLK_TYPE_GEN3_RPC:
727 		return cpg_rpc_clk_register(core->name, base,
728 					    __clk_get_name(parent), notifiers);
729 
730 	case CLK_TYPE_GEN3_RPCD2:
731 		return cpg_rpcd2_clk_register(core->name, base,
732 					      __clk_get_name(parent));
733 
734 	default:
735 		return ERR_PTR(-EINVAL);
736 	}
737 
738 	return clk_register_fixed_factor(NULL, core->name,
739 					 __clk_get_name(parent), 0, mult, div);
740 }
741 
742 int __init rcar_gen3_cpg_init(const struct rcar_gen3_cpg_pll_config *config,
743 			      unsigned int clk_extalr, u32 mode)
744 {
745 	const struct soc_device_attribute *attr;
746 
747 	cpg_pll_config = config;
748 	cpg_clk_extalr = clk_extalr;
749 	cpg_mode = mode;
750 	attr = soc_device_match(cpg_quirks_match);
751 	if (attr)
752 		cpg_quirks = (uintptr_t)attr->data;
753 	pr_debug("%s: mode = 0x%x quirks = 0x%x\n", __func__, mode, cpg_quirks);
754 
755 	spin_lock_init(&cpg_lock);
756 
757 	return 0;
758 }
759