xref: /openbmc/linux/drivers/clk/renesas/rzg2l-cpg.c (revision cef69974)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/clk-provider.h>
16 #include <linux/clk/renesas.h>
17 #include <linux/delay.h>
18 #include <linux/device.h>
19 #include <linux/init.h>
20 #include <linux/iopoll.h>
21 #include <linux/mod_devicetable.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 
31 #include <dt-bindings/clock/renesas-cpg-mssr.h>
32 
33 #include "rzg2l-cpg.h"
34 
35 #ifdef DEBUG
36 #define WARN_DEBUG(x)	WARN_ON(x)
37 #else
38 #define WARN_DEBUG(x)	do { } while (0)
39 #endif
40 
41 #define DIV_RSMASK(v, s, m)	((v >> s) & m)
42 #define GET_SHIFT(val)		((val >> 12) & 0xff)
43 #define GET_WIDTH(val)		((val >> 8) & 0xf)
44 
45 #define KDIV(val)		DIV_RSMASK(val, 16, 0xffff)
46 #define MDIV(val)		DIV_RSMASK(val, 6, 0x3ff)
47 #define PDIV(val)		DIV_RSMASK(val, 0, 0x3f)
48 #define SDIV(val)		DIV_RSMASK(val, 0, 0x7)
49 
50 #define CLK_ON_R(reg)		(reg)
51 #define CLK_MON_R(reg)		(0x180 + (reg))
52 #define CLK_RST_R(reg)		(reg)
53 #define CLK_MRST_R(reg)		(0x180 + (reg))
54 
55 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
56 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
57 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
58 
59 struct sd_hw_data {
60 	struct clk_hw hw;
61 	u32 conf;
62 	struct rzg2l_cpg_priv *priv;
63 };
64 
65 #define to_sd_hw_data(_hw)	container_of(_hw, struct sd_hw_data, hw)
66 
67 /**
68  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
69  *
70  * @rcdev: Reset controller entity
71  * @dev: CPG device
72  * @base: CPG register block base address
73  * @rmw_lock: protects register accesses
74  * @clks: Array containing all Core and Module Clocks
75  * @num_core_clks: Number of Core Clocks in clks[]
76  * @num_mod_clks: Number of Module Clocks in clks[]
77  * @num_resets: Number of Module Resets in info->resets[]
78  * @last_dt_core_clk: ID of the last Core Clock exported to DT
79  * @notifiers: Notifier chain to save/restore clock state for system resume
80  * @info: Pointer to platform data
81  */
82 struct rzg2l_cpg_priv {
83 	struct reset_controller_dev rcdev;
84 	struct device *dev;
85 	void __iomem *base;
86 	spinlock_t rmw_lock;
87 
88 	struct clk **clks;
89 	unsigned int num_core_clks;
90 	unsigned int num_mod_clks;
91 	unsigned int num_resets;
92 	unsigned int last_dt_core_clk;
93 
94 	struct raw_notifier_head notifiers;
95 	const struct rzg2l_cpg_info *info;
96 };
97 
98 static void rzg2l_cpg_del_clk_provider(void *data)
99 {
100 	of_clk_del_provider(data);
101 }
102 
103 static struct clk * __init
104 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
105 			   struct clk **clks,
106 			   void __iomem *base,
107 			   struct rzg2l_cpg_priv *priv)
108 {
109 	struct device *dev = priv->dev;
110 	const struct clk *parent;
111 	const char *parent_name;
112 	struct clk_hw *clk_hw;
113 
114 	parent = clks[core->parent & 0xffff];
115 	if (IS_ERR(parent))
116 		return ERR_CAST(parent);
117 
118 	parent_name = __clk_get_name(parent);
119 
120 	if (core->dtable)
121 		clk_hw = clk_hw_register_divider_table(dev, core->name,
122 						       parent_name, 0,
123 						       base + GET_REG_OFFSET(core->conf),
124 						       GET_SHIFT(core->conf),
125 						       GET_WIDTH(core->conf),
126 						       core->flag,
127 						       core->dtable,
128 						       &priv->rmw_lock);
129 	else
130 		clk_hw = clk_hw_register_divider(dev, core->name,
131 						 parent_name, 0,
132 						 base + GET_REG_OFFSET(core->conf),
133 						 GET_SHIFT(core->conf),
134 						 GET_WIDTH(core->conf),
135 						 core->flag, &priv->rmw_lock);
136 
137 	if (IS_ERR(clk_hw))
138 		return ERR_CAST(clk_hw);
139 
140 	return clk_hw->clk;
141 }
142 
143 static struct clk * __init
144 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
145 			   void __iomem *base,
146 			   struct rzg2l_cpg_priv *priv)
147 {
148 	const struct clk_hw *clk_hw;
149 
150 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
151 					  core->parent_names, core->num_parents,
152 					  core->flag,
153 					  base + GET_REG_OFFSET(core->conf),
154 					  GET_SHIFT(core->conf),
155 					  GET_WIDTH(core->conf),
156 					  core->mux_flags, &priv->rmw_lock);
157 	if (IS_ERR(clk_hw))
158 		return ERR_CAST(clk_hw);
159 
160 	return clk_hw->clk;
161 }
162 
163 static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
164 					       struct clk_rate_request *req)
165 {
166 	return clk_mux_determine_rate_flags(hw, req, 0);
167 }
168 
169 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
170 {
171 	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
172 	struct rzg2l_cpg_priv *priv = hwdata->priv;
173 	u32 off = GET_REG_OFFSET(hwdata->conf);
174 	u32 shift = GET_SHIFT(hwdata->conf);
175 	const u32 clk_src_266 = 2;
176 	u32 bitmask;
177 
178 	/*
179 	 * As per the HW manual, we should not directly switch from 533 MHz to
180 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
181 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
182 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
183 	 * (400 MHz)).
184 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
185 	 * switching register is prohibited.
186 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
187 	 * the index to value mapping is done by adding 1 to the index.
188 	 */
189 	bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
190 	if (index != clk_src_266) {
191 		u32 msk, val;
192 		int ret;
193 
194 		writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
195 
196 		msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
197 
198 		ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
199 					 !(val & msk), 100,
200 					 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
201 		if (ret) {
202 			dev_err(priv->dev, "failed to switch clk source\n");
203 			return ret;
204 		}
205 	}
206 
207 	writel(bitmask | ((index + 1) << shift), priv->base + off);
208 
209 	return 0;
210 }
211 
212 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
213 {
214 	struct sd_hw_data *hwdata = to_sd_hw_data(hw);
215 	struct rzg2l_cpg_priv *priv = hwdata->priv;
216 	u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
217 
218 	val >>= GET_SHIFT(hwdata->conf);
219 	val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
220 	if (val) {
221 		val--;
222 	} else {
223 		/* Prohibited clk source, change it to 533 MHz(reset value) */
224 		rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
225 	}
226 
227 	return val;
228 }
229 
230 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
231 	.determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
232 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
233 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
234 };
235 
236 static struct clk * __init
237 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
238 			      void __iomem *base,
239 			      struct rzg2l_cpg_priv *priv)
240 {
241 	struct sd_hw_data *clk_hw_data;
242 	struct clk_init_data init;
243 	struct clk_hw *clk_hw;
244 	int ret;
245 
246 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
247 	if (!clk_hw_data)
248 		return ERR_PTR(-ENOMEM);
249 
250 	clk_hw_data->priv = priv;
251 	clk_hw_data->conf = core->conf;
252 
253 	init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
254 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
255 	init.flags = 0;
256 	init.num_parents = core->num_parents;
257 	init.parent_names = core->parent_names;
258 
259 	clk_hw = &clk_hw_data->hw;
260 	clk_hw->init = &init;
261 
262 	ret = devm_clk_hw_register(priv->dev, clk_hw);
263 	if (ret)
264 		return ERR_PTR(ret);
265 
266 	return clk_hw->clk;
267 }
268 
269 struct pll_clk {
270 	struct clk_hw hw;
271 	unsigned int conf;
272 	unsigned int type;
273 	void __iomem *base;
274 	struct rzg2l_cpg_priv *priv;
275 };
276 
277 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
278 
279 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
280 						   unsigned long parent_rate)
281 {
282 	struct pll_clk *pll_clk = to_pll(hw);
283 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
284 	unsigned int val1, val2;
285 	unsigned int mult = 1;
286 	unsigned int div = 1;
287 
288 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
289 		return parent_rate;
290 
291 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
292 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
293 	mult = MDIV(val1) + KDIV(val1) / 65536;
294 	div = PDIV(val1) * (1 << SDIV(val2));
295 
296 	return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult, div);
297 }
298 
299 static const struct clk_ops rzg2l_cpg_pll_ops = {
300 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
301 };
302 
303 static struct clk * __init
304 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
305 			   struct clk **clks,
306 			   void __iomem *base,
307 			   struct rzg2l_cpg_priv *priv)
308 {
309 	struct device *dev = priv->dev;
310 	const struct clk *parent;
311 	struct clk_init_data init;
312 	const char *parent_name;
313 	struct pll_clk *pll_clk;
314 
315 	parent = clks[core->parent & 0xffff];
316 	if (IS_ERR(parent))
317 		return ERR_CAST(parent);
318 
319 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
320 	if (!pll_clk)
321 		return ERR_PTR(-ENOMEM);
322 
323 	parent_name = __clk_get_name(parent);
324 	init.name = core->name;
325 	init.ops = &rzg2l_cpg_pll_ops;
326 	init.flags = 0;
327 	init.parent_names = &parent_name;
328 	init.num_parents = 1;
329 
330 	pll_clk->hw.init = &init;
331 	pll_clk->conf = core->conf;
332 	pll_clk->base = base;
333 	pll_clk->priv = priv;
334 	pll_clk->type = core->type;
335 
336 	return clk_register(NULL, &pll_clk->hw);
337 }
338 
339 static struct clk
340 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
341 			       void *data)
342 {
343 	unsigned int clkidx = clkspec->args[1];
344 	struct rzg2l_cpg_priv *priv = data;
345 	struct device *dev = priv->dev;
346 	const char *type;
347 	struct clk *clk;
348 
349 	switch (clkspec->args[0]) {
350 	case CPG_CORE:
351 		type = "core";
352 		if (clkidx > priv->last_dt_core_clk) {
353 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
354 			return ERR_PTR(-EINVAL);
355 		}
356 		clk = priv->clks[clkidx];
357 		break;
358 
359 	case CPG_MOD:
360 		type = "module";
361 		if (clkidx >= priv->num_mod_clks) {
362 			dev_err(dev, "Invalid %s clock index %u\n", type,
363 				clkidx);
364 			return ERR_PTR(-EINVAL);
365 		}
366 		clk = priv->clks[priv->num_core_clks + clkidx];
367 		break;
368 
369 	default:
370 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
371 		return ERR_PTR(-EINVAL);
372 	}
373 
374 	if (IS_ERR(clk))
375 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
376 			PTR_ERR(clk));
377 	else
378 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
379 			clkspec->args[0], clkspec->args[1], clk,
380 			clk_get_rate(clk));
381 	return clk;
382 }
383 
384 static void __init
385 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
386 			    const struct rzg2l_cpg_info *info,
387 			    struct rzg2l_cpg_priv *priv)
388 {
389 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
390 	struct device *dev = priv->dev;
391 	unsigned int id = core->id, div = core->div;
392 	const char *parent_name;
393 
394 	WARN_DEBUG(id >= priv->num_core_clks);
395 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
396 
397 	if (!core->name) {
398 		/* Skip NULLified clock */
399 		return;
400 	}
401 
402 	switch (core->type) {
403 	case CLK_TYPE_IN:
404 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
405 		break;
406 	case CLK_TYPE_FF:
407 		WARN_DEBUG(core->parent >= priv->num_core_clks);
408 		parent = priv->clks[core->parent];
409 		if (IS_ERR(parent)) {
410 			clk = parent;
411 			goto fail;
412 		}
413 
414 		parent_name = __clk_get_name(parent);
415 		clk = clk_register_fixed_factor(NULL, core->name,
416 						parent_name, CLK_SET_RATE_PARENT,
417 						core->mult, div);
418 		break;
419 	case CLK_TYPE_SAM_PLL:
420 		clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
421 						 priv->base, priv);
422 		break;
423 	case CLK_TYPE_DIV:
424 		clk = rzg2l_cpg_div_clk_register(core, priv->clks,
425 						 priv->base, priv);
426 		break;
427 	case CLK_TYPE_MUX:
428 		clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
429 		break;
430 	case CLK_TYPE_SD_MUX:
431 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
432 		break;
433 	default:
434 		goto fail;
435 	}
436 
437 	if (IS_ERR_OR_NULL(clk))
438 		goto fail;
439 
440 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
441 	priv->clks[id] = clk;
442 	return;
443 
444 fail:
445 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
446 		core->name, PTR_ERR(clk));
447 }
448 
449 /**
450  * struct mstp_clock - MSTP gating clock
451  *
452  * @hw: handle between common and hardware-specific interfaces
453  * @off: register offset
454  * @bit: ON/MON bit
455  * @enabled: soft state of the clock, if it is coupled with another clock
456  * @priv: CPG/MSTP private data
457  * @sibling: pointer to the other coupled clock
458  */
459 struct mstp_clock {
460 	struct clk_hw hw;
461 	u16 off;
462 	u8 bit;
463 	bool enabled;
464 	struct rzg2l_cpg_priv *priv;
465 	struct mstp_clock *sibling;
466 };
467 
468 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
469 
470 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
471 {
472 	struct mstp_clock *clock = to_mod_clock(hw);
473 	struct rzg2l_cpg_priv *priv = clock->priv;
474 	unsigned int reg = clock->off;
475 	struct device *dev = priv->dev;
476 	unsigned long flags;
477 	unsigned int i;
478 	u32 bitmask = BIT(clock->bit);
479 	u32 value;
480 
481 	if (!clock->off) {
482 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
483 		return 0;
484 	}
485 
486 	dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
487 		enable ? "ON" : "OFF");
488 	spin_lock_irqsave(&priv->rmw_lock, flags);
489 
490 	if (enable)
491 		value = (bitmask << 16) | bitmask;
492 	else
493 		value = bitmask << 16;
494 	writel(value, priv->base + CLK_ON_R(reg));
495 
496 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
497 
498 	if (!enable)
499 		return 0;
500 
501 	for (i = 1000; i > 0; --i) {
502 		if (((readl(priv->base + CLK_MON_R(reg))) & bitmask))
503 			break;
504 		cpu_relax();
505 	}
506 
507 	if (!i) {
508 		dev_err(dev, "Failed to enable CLK_ON %p\n",
509 			priv->base + CLK_ON_R(reg));
510 		return -ETIMEDOUT;
511 	}
512 
513 	return 0;
514 }
515 
516 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
517 {
518 	struct mstp_clock *clock = to_mod_clock(hw);
519 
520 	if (clock->sibling) {
521 		struct rzg2l_cpg_priv *priv = clock->priv;
522 		unsigned long flags;
523 		bool enabled;
524 
525 		spin_lock_irqsave(&priv->rmw_lock, flags);
526 		enabled = clock->sibling->enabled;
527 		clock->enabled = true;
528 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
529 		if (enabled)
530 			return 0;
531 	}
532 
533 	return rzg2l_mod_clock_endisable(hw, true);
534 }
535 
536 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
537 {
538 	struct mstp_clock *clock = to_mod_clock(hw);
539 
540 	if (clock->sibling) {
541 		struct rzg2l_cpg_priv *priv = clock->priv;
542 		unsigned long flags;
543 		bool enabled;
544 
545 		spin_lock_irqsave(&priv->rmw_lock, flags);
546 		enabled = clock->sibling->enabled;
547 		clock->enabled = false;
548 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
549 		if (enabled)
550 			return;
551 	}
552 
553 	rzg2l_mod_clock_endisable(hw, false);
554 }
555 
556 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
557 {
558 	struct mstp_clock *clock = to_mod_clock(hw);
559 	struct rzg2l_cpg_priv *priv = clock->priv;
560 	u32 bitmask = BIT(clock->bit);
561 	u32 value;
562 
563 	if (!clock->off) {
564 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
565 		return 1;
566 	}
567 
568 	if (clock->sibling)
569 		return clock->enabled;
570 
571 	value = readl(priv->base + CLK_MON_R(clock->off));
572 
573 	return value & bitmask;
574 }
575 
576 static const struct clk_ops rzg2l_mod_clock_ops = {
577 	.enable = rzg2l_mod_clock_enable,
578 	.disable = rzg2l_mod_clock_disable,
579 	.is_enabled = rzg2l_mod_clock_is_enabled,
580 };
581 
582 static struct mstp_clock
583 *rzg2l_mod_clock__get_sibling(struct mstp_clock *clock,
584 			      struct rzg2l_cpg_priv *priv)
585 {
586 	struct clk_hw *hw;
587 	unsigned int i;
588 
589 	for (i = 0; i < priv->num_mod_clks; i++) {
590 		struct mstp_clock *clk;
591 
592 		if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
593 			continue;
594 
595 		hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
596 		clk = to_mod_clock(hw);
597 		if (clock->off == clk->off && clock->bit == clk->bit)
598 			return clk;
599 	}
600 
601 	return NULL;
602 }
603 
604 static void __init
605 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
606 			   const struct rzg2l_cpg_info *info,
607 			   struct rzg2l_cpg_priv *priv)
608 {
609 	struct mstp_clock *clock = NULL;
610 	struct device *dev = priv->dev;
611 	unsigned int id = mod->id;
612 	struct clk_init_data init;
613 	struct clk *parent, *clk;
614 	const char *parent_name;
615 	unsigned int i;
616 
617 	WARN_DEBUG(id < priv->num_core_clks);
618 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
619 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
620 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
621 
622 	if (!mod->name) {
623 		/* Skip NULLified clock */
624 		return;
625 	}
626 
627 	parent = priv->clks[mod->parent];
628 	if (IS_ERR(parent)) {
629 		clk = parent;
630 		goto fail;
631 	}
632 
633 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
634 	if (!clock) {
635 		clk = ERR_PTR(-ENOMEM);
636 		goto fail;
637 	}
638 
639 	init.name = mod->name;
640 	init.ops = &rzg2l_mod_clock_ops;
641 	init.flags = CLK_SET_RATE_PARENT;
642 	for (i = 0; i < info->num_crit_mod_clks; i++)
643 		if (id == info->crit_mod_clks[i]) {
644 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
645 				mod->name);
646 			init.flags |= CLK_IS_CRITICAL;
647 			break;
648 		}
649 
650 	parent_name = __clk_get_name(parent);
651 	init.parent_names = &parent_name;
652 	init.num_parents = 1;
653 
654 	clock->off = mod->off;
655 	clock->bit = mod->bit;
656 	clock->priv = priv;
657 	clock->hw.init = &init;
658 
659 	clk = clk_register(NULL, &clock->hw);
660 	if (IS_ERR(clk))
661 		goto fail;
662 
663 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
664 	priv->clks[id] = clk;
665 
666 	if (mod->is_coupled) {
667 		struct mstp_clock *sibling;
668 
669 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
670 		sibling = rzg2l_mod_clock__get_sibling(clock, priv);
671 		if (sibling) {
672 			clock->sibling = sibling;
673 			sibling->sibling = clock;
674 		}
675 	}
676 
677 	return;
678 
679 fail:
680 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
681 		mod->name, PTR_ERR(clk));
682 }
683 
684 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
685 
686 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
687 			   unsigned long id)
688 {
689 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
690 	const struct rzg2l_cpg_info *info = priv->info;
691 	unsigned int reg = info->resets[id].off;
692 	u32 dis = BIT(info->resets[id].bit);
693 	u32 we = dis << 16;
694 
695 	dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
696 
697 	/* Reset module */
698 	writel(we, priv->base + CLK_RST_R(reg));
699 
700 	/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
701 	udelay(35);
702 
703 	/* Release module from reset state */
704 	writel(we | dis, priv->base + CLK_RST_R(reg));
705 
706 	return 0;
707 }
708 
709 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
710 			    unsigned long id)
711 {
712 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
713 	const struct rzg2l_cpg_info *info = priv->info;
714 	unsigned int reg = info->resets[id].off;
715 	u32 value = BIT(info->resets[id].bit) << 16;
716 
717 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
718 
719 	writel(value, priv->base + CLK_RST_R(reg));
720 	return 0;
721 }
722 
723 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
724 			      unsigned long id)
725 {
726 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
727 	const struct rzg2l_cpg_info *info = priv->info;
728 	unsigned int reg = info->resets[id].off;
729 	u32 dis = BIT(info->resets[id].bit);
730 	u32 value = (dis << 16) | dis;
731 
732 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
733 		CLK_RST_R(reg));
734 
735 	writel(value, priv->base + CLK_RST_R(reg));
736 	return 0;
737 }
738 
739 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
740 			    unsigned long id)
741 {
742 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
743 	const struct rzg2l_cpg_info *info = priv->info;
744 	unsigned int reg = info->resets[id].off;
745 	u32 bitmask = BIT(info->resets[id].bit);
746 
747 	return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask);
748 }
749 
750 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
751 	.reset = rzg2l_cpg_reset,
752 	.assert = rzg2l_cpg_assert,
753 	.deassert = rzg2l_cpg_deassert,
754 	.status = rzg2l_cpg_status,
755 };
756 
757 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
758 				 const struct of_phandle_args *reset_spec)
759 {
760 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
761 	const struct rzg2l_cpg_info *info = priv->info;
762 	unsigned int id = reset_spec->args[0];
763 
764 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
765 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
766 		return -EINVAL;
767 	}
768 
769 	return id;
770 }
771 
772 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
773 {
774 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
775 	priv->rcdev.of_node = priv->dev->of_node;
776 	priv->rcdev.dev = priv->dev;
777 	priv->rcdev.of_reset_n_cells = 1;
778 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
779 	priv->rcdev.nr_resets = priv->num_resets;
780 
781 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
782 }
783 
784 static bool rzg2l_cpg_is_pm_clk(const struct of_phandle_args *clkspec)
785 {
786 	if (clkspec->args_count != 2)
787 		return false;
788 
789 	switch (clkspec->args[0]) {
790 	case CPG_MOD:
791 		return true;
792 
793 	default:
794 		return false;
795 	}
796 }
797 
798 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device *dev)
799 {
800 	struct device_node *np = dev->of_node;
801 	struct of_phandle_args clkspec;
802 	bool once = true;
803 	struct clk *clk;
804 	int error;
805 	int i = 0;
806 
807 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
808 					   &clkspec)) {
809 		if (rzg2l_cpg_is_pm_clk(&clkspec)) {
810 			if (once) {
811 				once = false;
812 				error = pm_clk_create(dev);
813 				if (error) {
814 					of_node_put(clkspec.np);
815 					goto err;
816 				}
817 			}
818 			clk = of_clk_get_from_provider(&clkspec);
819 			of_node_put(clkspec.np);
820 			if (IS_ERR(clk)) {
821 				error = PTR_ERR(clk);
822 				goto fail_destroy;
823 			}
824 
825 			error = pm_clk_add_clk(dev, clk);
826 			if (error) {
827 				dev_err(dev, "pm_clk_add_clk failed %d\n",
828 					error);
829 				goto fail_put;
830 			}
831 		} else {
832 			of_node_put(clkspec.np);
833 		}
834 		i++;
835 	}
836 
837 	return 0;
838 
839 fail_put:
840 	clk_put(clk);
841 
842 fail_destroy:
843 	pm_clk_destroy(dev);
844 err:
845 	return error;
846 }
847 
848 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
849 {
850 	if (!pm_clk_no_clocks(dev))
851 		pm_clk_destroy(dev);
852 }
853 
854 static void rzg2l_cpg_genpd_remove(void *data)
855 {
856 	pm_genpd_remove(data);
857 }
858 
859 static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
860 {
861 	struct device_node *np = dev->of_node;
862 	struct generic_pm_domain *genpd;
863 	int ret;
864 
865 	genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL);
866 	if (!genpd)
867 		return -ENOMEM;
868 
869 	genpd->name = np->name;
870 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
871 		       GENPD_FLAG_ACTIVE_WAKEUP;
872 	genpd->attach_dev = rzg2l_cpg_attach_dev;
873 	genpd->detach_dev = rzg2l_cpg_detach_dev;
874 	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
875 	if (ret)
876 		return ret;
877 
878 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
879 	if (ret)
880 		return ret;
881 
882 	return of_genpd_add_provider_simple(np, genpd);
883 }
884 
885 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
886 {
887 	struct device *dev = &pdev->dev;
888 	struct device_node *np = dev->of_node;
889 	const struct rzg2l_cpg_info *info;
890 	struct rzg2l_cpg_priv *priv;
891 	unsigned int nclks, i;
892 	struct clk **clks;
893 	int error;
894 
895 	info = of_device_get_match_data(dev);
896 
897 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
898 	if (!priv)
899 		return -ENOMEM;
900 
901 	priv->dev = dev;
902 	priv->info = info;
903 	spin_lock_init(&priv->rmw_lock);
904 
905 	priv->base = devm_platform_ioremap_resource(pdev, 0);
906 	if (IS_ERR(priv->base))
907 		return PTR_ERR(priv->base);
908 
909 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
910 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
911 	if (!clks)
912 		return -ENOMEM;
913 
914 	dev_set_drvdata(dev, priv);
915 	priv->clks = clks;
916 	priv->num_core_clks = info->num_total_core_clks;
917 	priv->num_mod_clks = info->num_hw_mod_clks;
918 	priv->num_resets = info->num_resets;
919 	priv->last_dt_core_clk = info->last_dt_core_clk;
920 
921 	for (i = 0; i < nclks; i++)
922 		clks[i] = ERR_PTR(-ENOENT);
923 
924 	for (i = 0; i < info->num_core_clks; i++)
925 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
926 
927 	for (i = 0; i < info->num_mod_clks; i++)
928 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
929 
930 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
931 	if (error)
932 		return error;
933 
934 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
935 	if (error)
936 		return error;
937 
938 	error = rzg2l_cpg_add_clk_domain(dev);
939 	if (error)
940 		return error;
941 
942 	error = rzg2l_cpg_reset_controller_register(priv);
943 	if (error)
944 		return error;
945 
946 	return 0;
947 }
948 
949 static const struct of_device_id rzg2l_cpg_match[] = {
950 #ifdef CONFIG_CLK_R9A07G044
951 	{
952 		.compatible = "renesas,r9a07g044-cpg",
953 		.data = &r9a07g044_cpg_info,
954 	},
955 #endif
956 #ifdef CONFIG_CLK_R9A07G054
957 	{
958 		.compatible = "renesas,r9a07g054-cpg",
959 		.data = &r9a07g054_cpg_info,
960 	},
961 #endif
962 	{ /* sentinel */ }
963 };
964 
965 static struct platform_driver rzg2l_cpg_driver = {
966 	.driver		= {
967 		.name	= "rzg2l-cpg",
968 		.of_match_table = rzg2l_cpg_match,
969 	},
970 };
971 
972 static int __init rzg2l_cpg_init(void)
973 {
974 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
975 }
976 
977 subsys_initcall(rzg2l_cpg_init);
978 
979 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
980 MODULE_LICENSE("GPL v2");
981