xref: /openbmc/linux/drivers/clk/renesas/rzg2l-cpg.c (revision b4a6aaea)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/clk/renesas.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/init.h>
20 #include <linux/iopoll.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 
31 #include <dt-bindings/clock/renesas-cpg-mssr.h>
32 
33 #include "rzg2l-cpg.h"
34 
35 #ifdef DEBUG
36 #define WARN_DEBUG(x)	WARN_ON(x)
37 #else
38 #define WARN_DEBUG(x)	do { } while (0)
39 #endif
40 
41 #define DIV_RSMASK(v, s, m)	((v >> s) & m)
42 #define GET_SHIFT(val)		((val >> 12) & 0xff)
43 #define GET_WIDTH(val)		((val >> 8) & 0xf)
44 
45 #define KDIV(val)		DIV_RSMASK(val, 16, 0xffff)
46 #define MDIV(val)		DIV_RSMASK(val, 6, 0x3ff)
47 #define PDIV(val)		DIV_RSMASK(val, 0, 0x3f)
48 #define SDIV(val)		DIV_RSMASK(val, 0, 0x7)
49 
50 #define CLK_ON_R(reg)		(reg)
51 #define CLK_MON_R(reg)		(0x180 + (reg))
52 #define CLK_RST_R(reg)		(reg)
53 #define CLK_MRST_R(reg)		(0x180 + (reg))
54 
55 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
56 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
57 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
58 
59 struct sd_hw_data {
60 	struct clk_hw hw;
61 	u32 conf;
62 	struct rzg2l_cpg_priv *priv;
63 };
64 
65 #define to_sd_hw_data(_hw)	container_of(_hw, struct sd_hw_data, hw)
66 
67 /**
68  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
69  *
70  * @rcdev: Reset controller entity
71  * @dev: CPG device
72  * @base: CPG register block base address
73  * @rmw_lock: protects register accesses
74  * @clks: Array containing all Core and Module Clocks
75  * @num_core_clks: Number of Core Clocks in clks[]
76  * @num_mod_clks: Number of Module Clocks in clks[]
77  * @last_dt_core_clk: ID of the last Core Clock exported to DT
78  * @notifiers: Notifier chain to save/restore clock state for system resume
79  * @info: Pointer to platform data
80  */
81 struct rzg2l_cpg_priv {
82 	struct reset_controller_dev rcdev;
83 	struct device *dev;
84 	void __iomem *base;
85 	spinlock_t rmw_lock;
86 
87 	struct clk **clks;
88 	unsigned int num_core_clks;
89 	unsigned int num_mod_clks;
90 	unsigned int num_resets;
91 	unsigned int last_dt_core_clk;
92 
93 	struct raw_notifier_head notifiers;
94 	const struct rzg2l_cpg_info *info;
95 };
96 
97 static void rzg2l_cpg_del_clk_provider(void *data)
98 {
99 	of_clk_del_provider(data);
100 }
101 
102 static struct clk * __init
103 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
104 			   struct clk **clks,
105 			   void __iomem *base,
106 			   struct rzg2l_cpg_priv *priv)
107 {
108 	struct device *dev = priv->dev;
109 	const struct clk *parent;
110 	const char *parent_name;
111 	struct clk_hw *clk_hw;
112 
113 	parent = clks[core->parent & 0xffff];
114 	if (IS_ERR(parent))
115 		return ERR_CAST(parent);
116 
117 	parent_name = __clk_get_name(parent);
118 
119 	if (core->dtable)
120 		clk_hw = clk_hw_register_divider_table(dev, core->name,
121 						       parent_name, 0,
122 						       base + GET_REG_OFFSET(core->conf),
123 						       GET_SHIFT(core->conf),
124 						       GET_WIDTH(core->conf),
125 						       core->flag,
126 						       core->dtable,
127 						       &priv->rmw_lock);
128 	else
129 		clk_hw = clk_hw_register_divider(dev, core->name,
130 						 parent_name, 0,
131 						 base + GET_REG_OFFSET(core->conf),
132 						 GET_SHIFT(core->conf),
133 						 GET_WIDTH(core->conf),
134 						 core->flag, &priv->rmw_lock);
135 
136 	if (IS_ERR(clk_hw))
137 		return ERR_CAST(clk_hw);
138 
139 	return clk_hw->clk;
140 }
141 
142 static struct clk * __init
143 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
144 			   void __iomem *base,
145 			   struct rzg2l_cpg_priv *priv)
146 {
147 	const struct clk_hw *clk_hw;
148 
149 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
150 					  core->parent_names, core->num_parents,
151 					  core->flag,
152 					  base + GET_REG_OFFSET(core->conf),
153 					  GET_SHIFT(core->conf),
154 					  GET_WIDTH(core->conf),
155 					  core->mux_flags, &priv->rmw_lock);
156 	if (IS_ERR(clk_hw))
157 		return ERR_CAST(clk_hw);
158 
159 	return clk_hw->clk;
160 }
161 
162 static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
163 					       struct clk_rate_request *req)
164 {
165 	return clk_mux_determine_rate_flags(hw, req, 0);
166 }
167 
168 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
169 {
170 	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
171 	struct rzg2l_cpg_priv *priv = hwdata->priv;
172 	u32 off = GET_REG_OFFSET(hwdata->conf);
173 	u32 shift = GET_SHIFT(hwdata->conf);
174 	const u32 clk_src_266 = 2;
175 	u32 bitmask;
176 
177 	/*
178 	 * As per the HW manual, we should not directly switch from 533 MHz to
179 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
180 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
181 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
182 	 * (400 MHz)).
183 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
184 	 * switching register is prohibited.
185 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
186 	 * the index to value mapping is done by adding 1 to the index.
187 	 */
188 	bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
189 	if (index != clk_src_266) {
190 		u32 msk, val;
191 		int ret;
192 
193 		writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
194 
195 		msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
196 
197 		ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
198 					 !(val & msk), 100,
199 					 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
200 		if (ret) {
201 			dev_err(priv->dev, "failed to switch clk source\n");
202 			return ret;
203 		}
204 	}
205 
206 	writel(bitmask | ((index + 1) << shift), priv->base + off);
207 
208 	return 0;
209 }
210 
211 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
212 {
213 	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
214 	struct rzg2l_cpg_priv *priv = hwdata->priv;
215 	u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
216 
217 	val >>= GET_SHIFT(hwdata->conf);
218 	val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
219 	if (val) {
220 		val--;
221 	} else {
222 		/* Prohibited clk source, change it to 533 MHz(reset value) */
223 		rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
224 	}
225 
226 	return val;
227 }
228 
229 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
230 	.determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
231 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
232 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
233 };
234 
235 static struct clk * __init
236 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
237 			      void __iomem *base,
238 			      struct rzg2l_cpg_priv *priv)
239 {
240 	struct sd_hw_data *clk_hw_data;
241 	struct clk_init_data init;
242 	struct clk_hw *clk_hw;
243 	int ret;
244 
245 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
246 	if (!clk_hw_data)
247 		return ERR_PTR(-ENOMEM);
248 
249 	clk_hw_data->priv = priv;
250 	clk_hw_data->conf = core->conf;
251 
252 	init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
253 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
254 	init.flags = 0;
255 	init.num_parents = core->num_parents;
256 	init.parent_names = core->parent_names;
257 
258 	clk_hw = &clk_hw_data->hw;
259 	clk_hw->init = &init;
260 
261 	ret = devm_clk_hw_register(priv->dev, clk_hw);
262 	if (ret)
263 		return ERR_PTR(ret);
264 
265 	return clk_hw->clk;
266 }
267 
268 struct pll_clk {
269 	struct clk_hw hw;
270 	unsigned int conf;
271 	unsigned int type;
272 	void __iomem *base;
273 	struct rzg2l_cpg_priv *priv;
274 };
275 
276 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
277 
278 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
279 						   unsigned long parent_rate)
280 {
281 	struct pll_clk *pll_clk = to_pll(hw);
282 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
283 	unsigned int val1, val2;
284 	unsigned int mult = 1;
285 	unsigned int div = 1;
286 
287 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
288 		return parent_rate;
289 
290 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
291 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
292 	mult = MDIV(val1) + KDIV(val1) / 65536;
293 	div = PDIV(val1) * (1 << SDIV(val2));
294 
295 	return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
296 }
297 
298 static const struct clk_ops rzg2l_cpg_pll_ops = {
299 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
300 };
301 
302 static struct clk * __init
303 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
304 			   struct clk **clks,
305 			   void __iomem *base,
306 			   struct rzg2l_cpg_priv *priv)
307 {
308 	struct device *dev = priv->dev;
309 	const struct clk *parent;
310 	struct clk_init_data init;
311 	const char *parent_name;
312 	struct pll_clk *pll_clk;
313 
314 	parent = clks[core->parent & 0xffff];
315 	if (IS_ERR(parent))
316 		return ERR_CAST(parent);
317 
318 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
319 	if (!pll_clk)
320 		return ERR_PTR(-ENOMEM);
321 
322 	parent_name = __clk_get_name(parent);
323 	init.name = core->name;
324 	init.ops = &rzg2l_cpg_pll_ops;
325 	init.flags = 0;
326 	init.parent_names = &parent_name;
327 	init.num_parents = 1;
328 
329 	pll_clk->hw.init = &init;
330 	pll_clk->conf = core->conf;
331 	pll_clk->base = base;
332 	pll_clk->priv = priv;
333 	pll_clk->type = core->type;
334 
335 	return clk_register(NULL, &pll_clk->hw);
336 }
337 
338 static struct clk
339 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
340 			       void *data)
341 {
342 	unsigned int clkidx = clkspec->args[1];
343 	struct rzg2l_cpg_priv *priv = data;
344 	struct device *dev = priv->dev;
345 	const char *type;
346 	struct clk *clk;
347 
348 	switch (clkspec->args[0]) {
349 	case CPG_CORE:
350 		type = "core";
351 		if (clkidx > priv->last_dt_core_clk) {
352 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
353 			return ERR_PTR(-EINVAL);
354 		}
355 		clk = priv->clks[clkidx];
356 		break;
357 
358 	case CPG_MOD:
359 		type = "module";
360 		if (clkidx >= priv->num_mod_clks) {
361 			dev_err(dev, "Invalid %s clock index %u\n", type,
362 				clkidx);
363 			return ERR_PTR(-EINVAL);
364 		}
365 		clk = priv->clks[priv->num_core_clks + clkidx];
366 		break;
367 
368 	default:
369 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
370 		return ERR_PTR(-EINVAL);
371 	}
372 
373 	if (IS_ERR(clk))
374 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
375 			PTR_ERR(clk));
376 	else
377 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
378 			clkspec->args[0], clkspec->args[1], clk,
379 			clk_get_rate(clk));
380 	return clk;
381 }
382 
383 static void __init
384 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
385 			    const struct rzg2l_cpg_info *info,
386 			    struct rzg2l_cpg_priv *priv)
387 {
388 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
389 	struct device *dev = priv->dev;
390 	unsigned int id = core->id, div = core->div;
391 	const char *parent_name;
392 
393 	WARN_DEBUG(id >= priv->num_core_clks);
394 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
395 
396 	if (!core->name) {
397 		/* Skip NULLified clock */
398 		return;
399 	}
400 
401 	switch (core->type) {
402 	case CLK_TYPE_IN:
403 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
404 		break;
405 	case CLK_TYPE_FF:
406 		WARN_DEBUG(core->parent >= priv->num_core_clks);
407 		parent = priv->clks[core->parent];
408 		if (IS_ERR(parent)) {
409 			clk = parent;
410 			goto fail;
411 		}
412 
413 		parent_name = __clk_get_name(parent);
414 		clk = clk_register_fixed_factor(NULL, core->name,
415 						parent_name, CLK_SET_RATE_PARENT,
416 						core->mult, div);
417 		break;
418 	case CLK_TYPE_SAM_PLL:
419 		clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
420 						 priv->base, priv);
421 		break;
422 	case CLK_TYPE_DIV:
423 		clk = rzg2l_cpg_div_clk_register(core, priv->clks,
424 						 priv->base, priv);
425 		break;
426 	case CLK_TYPE_MUX:
427 		clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
428 		break;
429 	case CLK_TYPE_SD_MUX:
430 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
431 		break;
432 	default:
433 		goto fail;
434 	}
435 
436 	if (IS_ERR_OR_NULL(clk))
437 		goto fail;
438 
439 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
440 	priv->clks[id] = clk;
441 	return;
442 
443 fail:
444 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
445 		core->name, PTR_ERR(clk));
446 }
447 
448 /**
449  * struct mstp_clock - MSTP gating clock
450  *
451  * @hw: handle between common and hardware-specific interfaces
452  * @off: register offset
453  * @bit: ON/MON bit
454  * @enabled: soft state of the clock, if it is coupled with another clock
455  * @priv: CPG/MSTP private data
456  * @sibling: pointer to the other coupled clock
457  */
458 struct mstp_clock {
459 	struct clk_hw hw;
460 	u16 off;
461 	u8 bit;
462 	bool enabled;
463 	struct rzg2l_cpg_priv *priv;
464 	struct mstp_clock *sibling;
465 };
466 
467 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
468 
469 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
470 {
471 	struct mstp_clock *clock = to_mod_clock(hw);
472 	struct rzg2l_cpg_priv *priv = clock->priv;
473 	unsigned int reg = clock->off;
474 	struct device *dev = priv->dev;
475 	unsigned long flags;
476 	unsigned int i;
477 	u32 bitmask = BIT(clock->bit);
478 	u32 value;
479 
480 	if (!clock->off) {
481 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
482 		return 0;
483 	}
484 
485 	dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
486 		enable ? "ON" : "OFF");
487 	spin_lock_irqsave(&priv->rmw_lock, flags);
488 
489 	if (enable)
490 		value = (bitmask << 16) | bitmask;
491 	else
492 		value = bitmask << 16;
493 	writel(value, priv->base + CLK_ON_R(reg));
494 
495 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
496 
497 	if (!enable)
498 		return 0;
499 
500 	for (i = 1000; i > 0; --i) {
501 		if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
502 			break;
503 		cpu_relax();
504 	}
505 
506 	if (!i) {
507 		dev_err(dev, "Failed to enable CLK_ON %p\n",
508 			priv->base + CLK_ON_R(reg));
509 		return -ETIMEDOUT;
510 	}
511 
512 	return 0;
513 }
514 
515 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
516 {
517 	struct mstp_clock *clock = to_mod_clock(hw);
518 
519 	if (clock->sibling) {
520 		struct rzg2l_cpg_priv *priv = clock->priv;
521 		unsigned long flags;
522 		bool enabled;
523 
524 		spin_lock_irqsave(&priv->rmw_lock, flags);
525 		enabled = clock->sibling->enabled;
526 		clock->enabled = true;
527 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
528 		if (enabled)
529 			return 0;
530 	}
531 
532 	return rzg2l_mod_clock_endisable(hw, true);
533 }
534 
535 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
536 {
537 	struct mstp_clock *clock = to_mod_clock(hw);
538 
539 	if (clock->sibling) {
540 		struct rzg2l_cpg_priv *priv = clock->priv;
541 		unsigned long flags;
542 		bool enabled;
543 
544 		spin_lock_irqsave(&priv->rmw_lock, flags);
545 		enabled = clock->sibling->enabled;
546 		clock->enabled = false;
547 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
548 		if (enabled)
549 			return;
550 	}
551 
552 	rzg2l_mod_clock_endisable(hw, false);
553 }
554 
555 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
556 {
557 	struct mstp_clock *clock = to_mod_clock(hw);
558 	struct rzg2l_cpg_priv *priv = clock->priv;
559 	u32 bitmask = BIT(clock->bit);
560 	u32 value;
561 
562 	if (!clock->off) {
563 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
564 		return 1;
565 	}
566 
567 	if (clock->sibling)
568 		return clock->enabled;
569 
570 	value = readl(priv->base + CLK_MON_R(clock->off));
571 
572 	return value & bitmask;
573 }
574 
575 static const struct clk_ops rzg2l_mod_clock_ops = {
576 	.enable = rzg2l_mod_clock_enable,
577 	.disable = rzg2l_mod_clock_disable,
578 	.is_enabled = rzg2l_mod_clock_is_enabled,
579 };
580 
581 static struct mstp_clock
582 *rzg2l_mod_clock__get_sibling(struct mstp_clock *clock,
583 			      struct rzg2l_cpg_priv *priv)
584 {
585 	struct clk_hw *hw;
586 	unsigned int i;
587 
588 	for (i = 0; i < priv->num_mod_clks; i++) {
589 		struct mstp_clock *clk;
590 
591 		if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
592 			continue;
593 
594 		hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
595 		clk = to_mod_clock(hw);
596 		if (clock->off == clk->off && clock->bit == clk->bit)
597 			return clk;
598 	}
599 
600 	return NULL;
601 }
602 
603 static void __init
604 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
605 			   const struct rzg2l_cpg_info *info,
606 			   struct rzg2l_cpg_priv *priv)
607 {
608 	struct mstp_clock *clock = NULL;
609 	struct device *dev = priv->dev;
610 	unsigned int id = mod->id;
611 	struct clk_init_data init;
612 	struct clk *parent, *clk;
613 	const char *parent_name;
614 	unsigned int i;
615 
616 	WARN_DEBUG(id < priv->num_core_clks);
617 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
618 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
619 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
620 
621 	if (!mod->name) {
622 		/* Skip NULLified clock */
623 		return;
624 	}
625 
626 	parent = priv->clks[mod->parent];
627 	if (IS_ERR(parent)) {
628 		clk = parent;
629 		goto fail;
630 	}
631 
632 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
633 	if (!clock) {
634 		clk = ERR_PTR(-ENOMEM);
635 		goto fail;
636 	}
637 
638 	init.name = mod->name;
639 	init.ops = &rzg2l_mod_clock_ops;
640 	init.flags = CLK_SET_RATE_PARENT;
641 	for (i = 0; i < info->num_crit_mod_clks; i++)
642 		if (id == info->crit_mod_clks[i]) {
643 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
644 				mod->name);
645 			init.flags |= CLK_IS_CRITICAL;
646 			break;
647 		}
648 
649 	parent_name = __clk_get_name(parent);
650 	init.parent_names = &parent_name;
651 	init.num_parents = 1;
652 
653 	clock->off = mod->off;
654 	clock->bit = mod->bit;
655 	clock->priv = priv;
656 	clock->hw.init = &init;
657 
658 	clk = clk_register(NULL, &clock->hw);
659 	if (IS_ERR(clk))
660 		goto fail;
661 
662 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
663 	priv->clks[id] = clk;
664 
665 	if (mod->is_coupled) {
666 		struct mstp_clock *sibling;
667 
668 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
669 		sibling = rzg2l_mod_clock__get_sibling(clock, priv);
670 		if (sibling) {
671 			clock->sibling = sibling;
672 			sibling->sibling = clock;
673 		}
674 	}
675 
676 	return;
677 
678 fail:
679 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
680 		mod->name, PTR_ERR(clk));
681 }
682 
683 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
684 
685 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
686 			   unsigned long id)
687 {
688 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
689 	const struct rzg2l_cpg_info *info = priv->info;
690 	unsigned int reg = info->resets[id].off;
691 	u32 dis = BIT(info->resets[id].bit);
692 	u32 we = dis << 16;
693 
694 	dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
695 
696 	/* Reset module */
697 	writel(we, priv->base + CLK_RST_R(reg));
698 
699 	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
700 	udelay(35);
701 
702 	/* Release module from reset state */
703 	writel(we | dis, priv->base + CLK_RST_R(reg));
704 
705 	return 0;
706 }
707 
708 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
709 			    unsigned long id)
710 {
711 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
712 	const struct rzg2l_cpg_info *info = priv->info;
713 	unsigned int reg = info->resets[id].off;
714 	u32 value = BIT(info->resets[id].bit) << 16;
715 
716 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
717 
718 	writel(value, priv->base + CLK_RST_R(reg));
719 	return 0;
720 }
721 
722 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
723 			      unsigned long id)
724 {
725 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
726 	const struct rzg2l_cpg_info *info = priv->info;
727 	unsigned int reg = info->resets[id].off;
728 	u32 dis = BIT(info->resets[id].bit);
729 	u32 value = (dis << 16) | dis;
730 
731 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
732 		CLK_RST_R(reg));
733 
734 	writel(value, priv->base + CLK_RST_R(reg));
735 	return 0;
736 }
737 
738 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
739 			    unsigned long id)
740 {
741 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
742 	const struct rzg2l_cpg_info *info = priv->info;
743 	unsigned int reg = info->resets[id].off;
744 	u32 bitmask = BIT(info->resets[id].bit);
745 
746 	return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
747 }
748 
749 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
750 	.reset = rzg2l_cpg_reset,
751 	.assert = rzg2l_cpg_assert,
752 	.deassert = rzg2l_cpg_deassert,
753 	.status = rzg2l_cpg_status,
754 };
755 
756 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
757 				 const struct of_phandle_args *reset_spec)
758 {
759 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
760 	const struct rzg2l_cpg_info *info = priv->info;
761 	unsigned int id = reset_spec->args[0];
762 
763 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
764 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
765 		return -EINVAL;
766 	}
767 
768 	return id;
769 }
770 
771 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
772 {
773 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
774 	priv->rcdev.of_node = priv->dev->of_node;
775 	priv->rcdev.dev = priv->dev;
776 	priv->rcdev.of_reset_n_cells = 1;
777 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
778 	priv->rcdev.nr_resets = priv->num_resets;
779 
780 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
781 }
782 
783 static bool rzg2l_cpg_is_pm_clk(const struct of_phandle_args *clkspec)
784 {
785 	if (clkspec->args_count != 2)
786 		return false;
787 
788 	switch (clkspec->args[0]) {
789 	case CPG_MOD:
790 		return true;
791 
792 	default:
793 		return false;
794 	}
795 }
796 
797 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device *dev)
798 {
799 	struct device_node *np = dev->of_node;
800 	struct of_phandle_args clkspec;
801 	bool once = true;
802 	struct clk *clk;
803 	int error;
804 	int i = 0;
805 
806 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
807 					   &clkspec)) {
808 		if (rzg2l_cpg_is_pm_clk(&clkspec)) {
809 			if (once) {
810 				once = false;
811 				error = pm_clk_create(dev);
812 				if (error) {
813 					of_node_put(clkspec.np);
814 					goto err;
815 				}
816 			}
817 			clk = of_clk_get_from_provider(&clkspec);
818 			of_node_put(clkspec.np);
819 			if (IS_ERR(clk)) {
820 				error = PTR_ERR(clk);
821 				goto fail_destroy;
822 			}
823 
824 			error = pm_clk_add_clk(dev, clk);
825 			if (error) {
826 				dev_err(dev, "pm_clk_add_clk failed %d\n",
827 					error);
828 				goto fail_put;
829 			}
830 		} else {
831 			of_node_put(clkspec.np);
832 		}
833 		i++;
834 	}
835 
836 	return 0;
837 
838 fail_put:
839 	clk_put(clk);
840 
841 fail_destroy:
842 	pm_clk_destroy(dev);
843 err:
844 	return error;
845 }
846 
847 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
848 {
849 	if (!pm_clk_no_clocks(dev))
850 		pm_clk_destroy(dev);
851 }
852 
853 static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
854 {
855 	struct device_node *np = dev->of_node;
856 	struct generic_pm_domain *genpd;
857 
858 	genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL);
859 	if (!genpd)
860 		return -ENOMEM;
861 
862 	genpd->name = np->name;
863 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
864 		       GENPD_FLAG_ACTIVE_WAKEUP;
865 	genpd->attach_dev = rzg2l_cpg_attach_dev;
866 	genpd->detach_dev = rzg2l_cpg_detach_dev;
867 	pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
868 
869 	of_genpd_add_provider_simple(np, genpd);
870 	return 0;
871 }
872 
873 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
874 {
875 	struct device *dev = &pdev->dev;
876 	struct device_node *np = dev->of_node;
877 	const struct rzg2l_cpg_info *info;
878 	struct rzg2l_cpg_priv *priv;
879 	unsigned int nclks, i;
880 	struct clk **clks;
881 	int error;
882 
883 	info = of_device_get_match_data(dev);
884 
885 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
886 	if (!priv)
887 		return -ENOMEM;
888 
889 	priv->dev = dev;
890 	priv->info = info;
891 	spin_lock_init(&priv->rmw_lock);
892 
893 	priv->base = devm_platform_ioremap_resource(pdev, 0);
894 	if (IS_ERR(priv->base))
895 		return PTR_ERR(priv->base);
896 
897 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
898 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
899 	if (!clks)
900 		return -ENOMEM;
901 
902 	dev_set_drvdata(dev, priv);
903 	priv->clks = clks;
904 	priv->num_core_clks = info->num_total_core_clks;
905 	priv->num_mod_clks = info->num_hw_mod_clks;
906 	priv->num_resets = info->num_resets;
907 	priv->last_dt_core_clk = info->last_dt_core_clk;
908 
909 	for (i = 0; i < nclks; i++)
910 		clks[i] = ERR_PTR(-ENOENT);
911 
912 	for (i = 0; i < info->num_core_clks; i++)
913 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
914 
915 	for (i = 0; i < info->num_mod_clks; i++)
916 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
917 
918 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
919 	if (error)
920 		return error;
921 
922 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
923 	if (error)
924 		return error;
925 
926 	error = rzg2l_cpg_add_clk_domain(dev);
927 	if (error)
928 		return error;
929 
930 	error = rzg2l_cpg_reset_controller_register(priv);
931 	if (error)
932 		return error;
933 
934 	return 0;
935 }
936 
937 static const struct of_device_id rzg2l_cpg_match[] = {
938 #ifdef CONFIG_CLK_R9A07G044
939 	{
940 		.compatible = "renesas,r9a07g044-cpg",
941 		.data = &r9a07g044_cpg_info,
942 	},
943 #endif
944 	{ /* sentinel */ }
945 };
946 
947 static struct platform_driver rzg2l_cpg_driver = {
948 	.driver		= {
949 		.name	= "rzg2l-cpg",
950 		.of_match_table = rzg2l_cpg_match,
951 	},
952 };
953 
954 static int __init rzg2l_cpg_init(void)
955 {
956 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
957 }
958 
959 subsys_initcall(rzg2l_cpg_init);
960 
961 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
962 MODULE_LICENSE("GPL v2");
963