xref: /openbmc/linux/drivers/clk/renesas/rzg2l-cpg.c (revision 3dfbe6a73ae80429ccd268749e91c0d8d1526107)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * RZ/G2L Clock Pulse Generator
4  *
5  * Copyright (C) 2021 Renesas Electronics Corp.
6  *
7  * Based on renesas-cpg-mssr.c
8  *
9  * Copyright (C) 2015 Glider bvba
10  * Copyright (C) 2013 Ideas On Board SPRL
11  * Copyright (C) 2015 Renesas Electronics Corp.
12  */
13 
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31 
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33 
34 #include "rzg2l-cpg.h"
35 
36 #ifdef DEBUG
37 #define WARN_DEBUG(x)	WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x)	do { } while (0)
40 #endif
41 
42 #define GET_SHIFT(val)		((val >> 12) & 0xff)
43 #define GET_WIDTH(val)		((val >> 8) & 0xf)
44 
45 #define KDIV(val)		((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val)		FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val)		FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val)		FIELD_GET(GENMASK(2, 0), val)
49 
50 #define CLK_ON_R(reg)		(reg)
51 #define CLK_MON_R(reg)		(0x180 + (reg))
52 #define CLK_RST_R(reg)		(reg)
53 #define CLK_MRST_R(reg)		(0x180 + (reg))
54 
55 #define GET_REG_OFFSET(val)		((val >> 20) & 0xfff)
56 #define GET_REG_SAMPLL_CLK1(val)	((val >> 22) & 0xfff)
57 #define GET_REG_SAMPLL_CLK2(val)	((val >> 12) & 0xfff)
58 
59 #define CPG_WEN_BIT		BIT(16)
60 
61 #define MAX_VCLK_FREQ		(148500000)
62 
63 /**
64  * struct clk_hw_data - clock hardware data
65  * @hw: clock hw
66  * @conf: clock configuration (register offset, shift, width)
67  * @sconf: clock status configuration (register offset, shift, width)
68  * @priv: CPG private data structure
69  */
70 struct clk_hw_data {
71 	struct clk_hw hw;
72 	u32 conf;
73 	u32 sconf;
74 	struct rzg2l_cpg_priv *priv;
75 };
76 
77 #define to_clk_hw_data(_hw)	container_of(_hw, struct clk_hw_data, hw)
78 
79 /**
80  * struct sd_mux_hw_data - SD MUX clock hardware data
81  * @hw_data: clock hw data
82  * @mtable: clock mux table
83  */
84 struct sd_mux_hw_data {
85 	struct clk_hw_data hw_data;
86 	const u32 *mtable;
87 };
88 
89 #define to_sd_mux_hw_data(_hw)	container_of(_hw, struct sd_mux_hw_data, hw_data)
90 
91 struct rzg2l_pll5_param {
92 	u32 pl5_fracin;
93 	u8 pl5_refdiv;
94 	u8 pl5_intin;
95 	u8 pl5_postdiv1;
96 	u8 pl5_postdiv2;
97 	u8 pl5_spread;
98 };
99 
100 struct rzg2l_pll5_mux_dsi_div_param {
101 	u8 clksrc;
102 	u8 dsi_div_a;
103 	u8 dsi_div_b;
104 };
105 
106 /**
107  * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
108  *
109  * @rcdev: Reset controller entity
110  * @dev: CPG device
111  * @base: CPG register block base address
112  * @rmw_lock: protects register accesses
113  * @clks: Array containing all Core and Module Clocks
114  * @num_core_clks: Number of Core Clocks in clks[]
115  * @num_mod_clks: Number of Module Clocks in clks[]
116  * @num_resets: Number of Module Resets in info->resets[]
117  * @last_dt_core_clk: ID of the last Core Clock exported to DT
118  * @info: Pointer to platform data
119  * @genpd: PM domain
120  * @mux_dsi_div_params: pll5 mux and dsi div parameters
121  */
122 struct rzg2l_cpg_priv {
123 	struct reset_controller_dev rcdev;
124 	struct device *dev;
125 	void __iomem *base;
126 	spinlock_t rmw_lock;
127 
128 	struct clk **clks;
129 	unsigned int num_core_clks;
130 	unsigned int num_mod_clks;
131 	unsigned int num_resets;
132 	unsigned int last_dt_core_clk;
133 
134 	const struct rzg2l_cpg_info *info;
135 
136 	struct generic_pm_domain genpd;
137 
138 	struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
139 };
140 
rzg2l_cpg_del_clk_provider(void * data)141 static void rzg2l_cpg_del_clk_provider(void *data)
142 {
143 	of_clk_del_provider(data);
144 }
145 
146 /* Must be called in atomic context. */
rzg2l_cpg_wait_clk_update_done(void __iomem * base,u32 conf)147 static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
148 {
149 	u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
150 	u32 off = GET_REG_OFFSET(conf);
151 	u32 val;
152 
153 	return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
154 }
155 
rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block * nb,unsigned long event,void * data)156 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
157 				  void *data)
158 {
159 	struct clk_notifier_data *cnd = data;
160 	struct clk_hw *hw = __clk_get_hw(cnd->clk);
161 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
162 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
163 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
164 	u32 shift = GET_SHIFT(clk_hw_data->conf);
165 	const u32 clk_src_266 = 3;
166 	unsigned long flags;
167 	int ret;
168 
169 	if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
170 		return NOTIFY_DONE;
171 
172 	spin_lock_irqsave(&priv->rmw_lock, flags);
173 
174 	/*
175 	 * As per the HW manual, we should not directly switch from 533 MHz to
176 	 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
177 	 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
178 	 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
179 	 * (400 MHz)).
180 	 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
181 	 * switching register is prohibited.
182 	 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
183 	 * the index to value mapping is done by adding 1 to the index.
184 	 */
185 
186 	writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
187 
188 	/* Wait for the update done. */
189 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
190 
191 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
192 
193 	if (ret)
194 		dev_err(priv->dev, "failed to switch to safe clk source\n");
195 
196 	return notifier_from_errno(ret);
197 }
198 
rzg2l_register_notifier(struct clk_hw * hw,const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)199 static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
200 				   struct rzg2l_cpg_priv *priv)
201 {
202 	struct notifier_block *nb;
203 
204 	if (!core->notifier)
205 		return 0;
206 
207 	nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
208 	if (!nb)
209 		return -ENOMEM;
210 
211 	nb->notifier_call = core->notifier;
212 
213 	return clk_notifier_register(hw->clk, nb);
214 }
215 
216 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct clk ** clks,void __iomem * base,struct rzg2l_cpg_priv * priv)217 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
218 			   struct clk **clks,
219 			   void __iomem *base,
220 			   struct rzg2l_cpg_priv *priv)
221 {
222 	struct device *dev = priv->dev;
223 	const struct clk *parent;
224 	const char *parent_name;
225 	struct clk_hw *clk_hw;
226 
227 	parent = clks[core->parent & 0xffff];
228 	if (IS_ERR(parent))
229 		return ERR_CAST(parent);
230 
231 	parent_name = __clk_get_name(parent);
232 
233 	if (core->dtable)
234 		clk_hw = clk_hw_register_divider_table(dev, core->name,
235 						       parent_name, 0,
236 						       base + GET_REG_OFFSET(core->conf),
237 						       GET_SHIFT(core->conf),
238 						       GET_WIDTH(core->conf),
239 						       core->flag,
240 						       core->dtable,
241 						       &priv->rmw_lock);
242 	else
243 		clk_hw = clk_hw_register_divider(dev, core->name,
244 						 parent_name, 0,
245 						 base + GET_REG_OFFSET(core->conf),
246 						 GET_SHIFT(core->conf),
247 						 GET_WIDTH(core->conf),
248 						 core->flag, &priv->rmw_lock);
249 
250 	if (IS_ERR(clk_hw))
251 		return ERR_CAST(clk_hw);
252 
253 	return clk_hw->clk;
254 }
255 
256 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,void __iomem * base,struct rzg2l_cpg_priv * priv)257 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
258 			   void __iomem *base,
259 			   struct rzg2l_cpg_priv *priv)
260 {
261 	const struct clk_hw *clk_hw;
262 
263 	clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
264 					  core->parent_names, core->num_parents,
265 					  core->flag,
266 					  base + GET_REG_OFFSET(core->conf),
267 					  GET_SHIFT(core->conf),
268 					  GET_WIDTH(core->conf),
269 					  core->mux_flags, &priv->rmw_lock);
270 	if (IS_ERR(clk_hw))
271 		return ERR_CAST(clk_hw);
272 
273 	return clk_hw->clk;
274 }
275 
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)276 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
277 {
278 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
279 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
280 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
281 	u32 off = GET_REG_OFFSET(clk_hw_data->conf);
282 	u32 shift = GET_SHIFT(clk_hw_data->conf);
283 	unsigned long flags;
284 	u32 val;
285 	int ret;
286 
287 	val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
288 
289 	spin_lock_irqsave(&priv->rmw_lock, flags);
290 
291 	writel((CPG_WEN_BIT | val) << shift, priv->base + off);
292 
293 	/* Wait for the update done. */
294 	ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
295 
296 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
297 
298 	if (ret)
299 		dev_err(priv->dev, "Failed to switch parent\n");
300 
301 	return ret;
302 }
303 
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)304 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
305 {
306 	struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
307 	struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
308 	struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
309 	u32 val;
310 
311 	val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
312 	val >>= GET_SHIFT(clk_hw_data->conf);
313 	val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
314 
315 	return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
316 }
317 
318 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
319 	.determine_rate = __clk_mux_determine_rate_closest,
320 	.set_parent	= rzg2l_cpg_sd_clk_mux_set_parent,
321 	.get_parent	= rzg2l_cpg_sd_clk_mux_get_parent,
322 };
323 
324 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,void __iomem * base,struct rzg2l_cpg_priv * priv)325 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
326 			      void __iomem *base,
327 			      struct rzg2l_cpg_priv *priv)
328 {
329 	struct sd_mux_hw_data *sd_mux_hw_data;
330 	struct clk_init_data init;
331 	struct clk_hw *clk_hw;
332 	int ret;
333 
334 	sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
335 	if (!sd_mux_hw_data)
336 		return ERR_PTR(-ENOMEM);
337 
338 	sd_mux_hw_data->hw_data.priv = priv;
339 	sd_mux_hw_data->hw_data.conf = core->conf;
340 	sd_mux_hw_data->hw_data.sconf = core->sconf;
341 	sd_mux_hw_data->mtable = core->mtable;
342 
343 	init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
344 	init.ops = &rzg2l_cpg_sd_clk_mux_ops;
345 	init.flags = core->flag;
346 	init.num_parents = core->num_parents;
347 	init.parent_names = core->parent_names;
348 
349 	clk_hw = &sd_mux_hw_data->hw_data.hw;
350 	clk_hw->init = &init;
351 
352 	ret = devm_clk_hw_register(priv->dev, clk_hw);
353 	if (ret)
354 		return ERR_PTR(ret);
355 
356 	ret = rzg2l_register_notifier(clk_hw, core, priv);
357 	if (ret) {
358 		dev_err(priv->dev, "Failed to register notifier for %s\n",
359 			core->name);
360 		return ERR_PTR(ret);
361 	}
362 
363 	return clk_hw->clk;
364 }
365 
366 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)367 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
368 			       unsigned long rate)
369 {
370 	unsigned long foutpostdiv_rate, foutvco_rate;
371 
372 	params->pl5_intin = rate / MEGA;
373 	params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
374 	params->pl5_refdiv = 2;
375 	params->pl5_postdiv1 = 1;
376 	params->pl5_postdiv2 = 1;
377 	params->pl5_spread = 0x16;
378 
379 	foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
380 					   (params->pl5_intin << 24) + params->pl5_fracin),
381 			       params->pl5_refdiv) >> 24;
382 	foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
383 						 params->pl5_postdiv1 * params->pl5_postdiv2);
384 
385 	return foutpostdiv_rate;
386 }
387 
388 struct dsi_div_hw_data {
389 	struct clk_hw hw;
390 	u32 conf;
391 	unsigned long rate;
392 	struct rzg2l_cpg_priv *priv;
393 };
394 
395 #define to_dsi_div_hw_data(_hw)	container_of(_hw, struct dsi_div_hw_data, hw)
396 
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)397 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
398 						   unsigned long parent_rate)
399 {
400 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
401 	unsigned long rate = dsi_div->rate;
402 
403 	if (!rate)
404 		rate = parent_rate;
405 
406 	return rate;
407 }
408 
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)409 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
410 						    unsigned long rate)
411 {
412 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
413 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
414 	struct rzg2l_pll5_param params;
415 	unsigned long parent_rate;
416 
417 	parent_rate = rzg2l_cpg_get_foutpostdiv_rate(&params, rate);
418 
419 	if (priv->mux_dsi_div_params.clksrc)
420 		parent_rate /= 2;
421 
422 	return parent_rate;
423 }
424 
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)425 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
426 					    struct clk_rate_request *req)
427 {
428 	if (req->rate > MAX_VCLK_FREQ)
429 		req->rate = MAX_VCLK_FREQ;
430 
431 	req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
432 
433 	return 0;
434 }
435 
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)436 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
437 				      unsigned long rate,
438 				      unsigned long parent_rate)
439 {
440 	struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
441 	struct rzg2l_cpg_priv *priv = dsi_div->priv;
442 
443 	/*
444 	 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
445 	 *
446 	 * Based on the dot clock, the DSI divider clock sets the divider value,
447 	 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
448 	 * source for the MUX and propagates that info to the parents.
449 	 */
450 
451 	if (!rate || rate > MAX_VCLK_FREQ)
452 		return -EINVAL;
453 
454 	dsi_div->rate = rate;
455 	writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
456 	       (priv->mux_dsi_div_params.dsi_div_a << 0) |
457 	       (priv->mux_dsi_div_params.dsi_div_b << 8),
458 	       priv->base + CPG_PL5_SDIV);
459 
460 	return 0;
461 }
462 
463 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
464 	.recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
465 	.determine_rate = rzg2l_cpg_dsi_div_determine_rate,
466 	.set_rate = rzg2l_cpg_dsi_div_set_rate,
467 };
468 
469 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct clk ** clks,struct rzg2l_cpg_priv * priv)470 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
471 			       struct clk **clks,
472 			       struct rzg2l_cpg_priv *priv)
473 {
474 	struct dsi_div_hw_data *clk_hw_data;
475 	const struct clk *parent;
476 	const char *parent_name;
477 	struct clk_init_data init;
478 	struct clk_hw *clk_hw;
479 	int ret;
480 
481 	parent = clks[core->parent & 0xffff];
482 	if (IS_ERR(parent))
483 		return ERR_CAST(parent);
484 
485 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
486 	if (!clk_hw_data)
487 		return ERR_PTR(-ENOMEM);
488 
489 	clk_hw_data->priv = priv;
490 
491 	parent_name = __clk_get_name(parent);
492 	init.name = core->name;
493 	init.ops = &rzg2l_cpg_dsi_div_ops;
494 	init.flags = CLK_SET_RATE_PARENT;
495 	init.parent_names = &parent_name;
496 	init.num_parents = 1;
497 
498 	clk_hw = &clk_hw_data->hw;
499 	clk_hw->init = &init;
500 
501 	ret = devm_clk_hw_register(priv->dev, clk_hw);
502 	if (ret)
503 		return ERR_PTR(ret);
504 
505 	return clk_hw->clk;
506 }
507 
508 struct pll5_mux_hw_data {
509 	struct clk_hw hw;
510 	u32 conf;
511 	unsigned long rate;
512 	struct rzg2l_cpg_priv *priv;
513 };
514 
515 #define to_pll5_mux_hw_data(_hw)	container_of(_hw, struct pll5_mux_hw_data, hw)
516 
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)517 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
518 						   struct clk_rate_request *req)
519 {
520 	struct clk_hw *parent;
521 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
522 	struct rzg2l_cpg_priv *priv = hwdata->priv;
523 
524 	parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
525 	req->best_parent_hw = parent;
526 	req->best_parent_rate = req->rate;
527 
528 	return 0;
529 }
530 
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)531 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
532 {
533 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
534 	struct rzg2l_cpg_priv *priv = hwdata->priv;
535 
536 	/*
537 	 * FOUTPOSTDIV--->|
538 	 *  |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
539 	 *  |--FOUT1PH0-->|
540 	 *
541 	 * Based on the dot clock, the DSI divider clock calculates the parent
542 	 * rate and clk source for the MUX. It propagates that info to
543 	 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
544 	 */
545 
546 	writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
547 	       priv->base + CPG_OTHERFUNC1_REG);
548 
549 	return 0;
550 }
551 
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)552 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
553 {
554 	struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
555 	struct rzg2l_cpg_priv *priv = hwdata->priv;
556 
557 	return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
558 }
559 
560 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
561 	.determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
562 	.set_parent	= rzg2l_cpg_pll5_4_clk_mux_set_parent,
563 	.get_parent	= rzg2l_cpg_pll5_4_clk_mux_get_parent,
564 };
565 
566 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)567 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
568 				  struct rzg2l_cpg_priv *priv)
569 {
570 	struct pll5_mux_hw_data *clk_hw_data;
571 	struct clk_init_data init;
572 	struct clk_hw *clk_hw;
573 	int ret;
574 
575 	clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
576 	if (!clk_hw_data)
577 		return ERR_PTR(-ENOMEM);
578 
579 	clk_hw_data->priv = priv;
580 	clk_hw_data->conf = core->conf;
581 
582 	init.name = core->name;
583 	init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
584 	init.flags = CLK_SET_RATE_PARENT;
585 	init.num_parents = core->num_parents;
586 	init.parent_names = core->parent_names;
587 
588 	clk_hw = &clk_hw_data->hw;
589 	clk_hw->init = &init;
590 
591 	ret = devm_clk_hw_register(priv->dev, clk_hw);
592 	if (ret)
593 		return ERR_PTR(ret);
594 
595 	return clk_hw->clk;
596 }
597 
598 struct sipll5 {
599 	struct clk_hw hw;
600 	u32 conf;
601 	unsigned long foutpostdiv_rate;
602 	struct rzg2l_cpg_priv *priv;
603 };
604 
605 #define to_sipll5(_hw)	container_of(_hw, struct sipll5, hw)
606 
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)607 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
608 					     unsigned long rate)
609 {
610 	struct sipll5 *sipll5 = to_sipll5(hw);
611 	struct rzg2l_cpg_priv *priv = sipll5->priv;
612 	unsigned long vclk;
613 
614 	vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
615 		       (priv->mux_dsi_div_params.dsi_div_b + 1));
616 
617 	if (priv->mux_dsi_div_params.clksrc)
618 		vclk /= 2;
619 
620 	return vclk;
621 }
622 
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)623 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
624 						  unsigned long parent_rate)
625 {
626 	struct sipll5 *sipll5 = to_sipll5(hw);
627 	unsigned long pll5_rate = sipll5->foutpostdiv_rate;
628 
629 	if (!pll5_rate)
630 		pll5_rate = parent_rate;
631 
632 	return pll5_rate;
633 }
634 
rzg2l_cpg_sipll5_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)635 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
636 					unsigned long rate,
637 					unsigned long *parent_rate)
638 {
639 	return rate;
640 }
641 
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)642 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
643 				     unsigned long rate,
644 				     unsigned long parent_rate)
645 {
646 	struct sipll5 *sipll5 = to_sipll5(hw);
647 	struct rzg2l_cpg_priv *priv = sipll5->priv;
648 	struct rzg2l_pll5_param params;
649 	unsigned long vclk_rate;
650 	int ret;
651 	u32 val;
652 
653 	/*
654 	 *  OSC --> PLL5 --> FOUTPOSTDIV-->|
655 	 *                   |             | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
656 	 *                   |--FOUT1PH0-->|
657 	 *
658 	 * Based on the dot clock, the DSI divider clock calculates the parent
659 	 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
660 	 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
661 	 *
662 	 * OSC --> PLL5 --> FOUTPOSTDIV
663 	 */
664 
665 	if (!rate)
666 		return -EINVAL;
667 
668 	vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
669 	sipll5->foutpostdiv_rate =
670 		rzg2l_cpg_get_foutpostdiv_rate(&params, vclk_rate);
671 
672 	/* Put PLL5 into standby mode */
673 	writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
674 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
675 				 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
676 	if (ret) {
677 		dev_err(priv->dev, "failed to release pll5 lock");
678 		return ret;
679 	}
680 
681 	/* Output clock setting 1 */
682 	writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
683 	       (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
684 
685 	/* Output clock setting, SSCG modulation value setting 3 */
686 	writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
687 
688 	/* Output clock setting 4 */
689 	writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
690 	       priv->base + CPG_SIPLL5_CLK4);
691 
692 	/* Output clock setting 5 */
693 	writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
694 
695 	/* PLL normal mode setting */
696 	writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
697 	       CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
698 	       priv->base + CPG_SIPLL5_STBY);
699 
700 	/* PLL normal mode transition, output clock stability check */
701 	ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
702 				 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
703 	if (ret) {
704 		dev_err(priv->dev, "failed to lock pll5");
705 		return ret;
706 	}
707 
708 	return 0;
709 }
710 
711 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
712 	.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
713 	.round_rate = rzg2l_cpg_sipll5_round_rate,
714 	.set_rate = rzg2l_cpg_sipll5_set_rate,
715 };
716 
717 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct clk ** clks,struct rzg2l_cpg_priv * priv)718 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
719 			  struct clk **clks,
720 			  struct rzg2l_cpg_priv *priv)
721 {
722 	const struct clk *parent;
723 	struct clk_init_data init;
724 	const char *parent_name;
725 	struct sipll5 *sipll5;
726 	struct clk_hw *clk_hw;
727 	int ret;
728 
729 	parent = clks[core->parent & 0xffff];
730 	if (IS_ERR(parent))
731 		return ERR_CAST(parent);
732 
733 	sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
734 	if (!sipll5)
735 		return ERR_PTR(-ENOMEM);
736 
737 	init.name = core->name;
738 	parent_name = __clk_get_name(parent);
739 	init.ops = &rzg2l_cpg_sipll5_ops;
740 	init.flags = 0;
741 	init.parent_names = &parent_name;
742 	init.num_parents = 1;
743 
744 	sipll5->hw.init = &init;
745 	sipll5->conf = core->conf;
746 	sipll5->priv = priv;
747 
748 	writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
749 	       CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
750 
751 	clk_hw = &sipll5->hw;
752 	clk_hw->init = &init;
753 
754 	ret = devm_clk_hw_register(priv->dev, clk_hw);
755 	if (ret)
756 		return ERR_PTR(ret);
757 
758 	priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
759 	priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
760 	priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
761 
762 	return clk_hw->clk;
763 }
764 
765 struct pll_clk {
766 	struct clk_hw hw;
767 	unsigned int conf;
768 	unsigned int type;
769 	void __iomem *base;
770 	struct rzg2l_cpg_priv *priv;
771 };
772 
773 #define to_pll(_hw)	container_of(_hw, struct pll_clk, hw)
774 
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)775 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
776 						   unsigned long parent_rate)
777 {
778 	struct pll_clk *pll_clk = to_pll(hw);
779 	struct rzg2l_cpg_priv *priv = pll_clk->priv;
780 	unsigned int val1, val2;
781 	u64 rate;
782 
783 	if (pll_clk->type != CLK_TYPE_SAM_PLL)
784 		return parent_rate;
785 
786 	val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
787 	val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
788 
789 	rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
790 			       16 + SDIV(val2));
791 
792 	return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
793 }
794 
795 static const struct clk_ops rzg2l_cpg_pll_ops = {
796 	.recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
797 };
798 
799 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct clk ** clks,void __iomem * base,struct rzg2l_cpg_priv * priv)800 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
801 			   struct clk **clks,
802 			   void __iomem *base,
803 			   struct rzg2l_cpg_priv *priv)
804 {
805 	struct device *dev = priv->dev;
806 	const struct clk *parent;
807 	struct clk_init_data init;
808 	const char *parent_name;
809 	struct pll_clk *pll_clk;
810 
811 	parent = clks[core->parent & 0xffff];
812 	if (IS_ERR(parent))
813 		return ERR_CAST(parent);
814 
815 	pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
816 	if (!pll_clk)
817 		return ERR_PTR(-ENOMEM);
818 
819 	parent_name = __clk_get_name(parent);
820 	init.name = core->name;
821 	init.ops = &rzg2l_cpg_pll_ops;
822 	init.flags = 0;
823 	init.parent_names = &parent_name;
824 	init.num_parents = 1;
825 
826 	pll_clk->hw.init = &init;
827 	pll_clk->conf = core->conf;
828 	pll_clk->base = base;
829 	pll_clk->priv = priv;
830 	pll_clk->type = core->type;
831 
832 	return clk_register(NULL, &pll_clk->hw);
833 }
834 
835 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)836 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
837 			       void *data)
838 {
839 	unsigned int clkidx = clkspec->args[1];
840 	struct rzg2l_cpg_priv *priv = data;
841 	struct device *dev = priv->dev;
842 	const char *type;
843 	struct clk *clk;
844 
845 	switch (clkspec->args[0]) {
846 	case CPG_CORE:
847 		type = "core";
848 		if (clkidx > priv->last_dt_core_clk) {
849 			dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
850 			return ERR_PTR(-EINVAL);
851 		}
852 		clk = priv->clks[clkidx];
853 		break;
854 
855 	case CPG_MOD:
856 		type = "module";
857 		if (clkidx >= priv->num_mod_clks) {
858 			dev_err(dev, "Invalid %s clock index %u\n", type,
859 				clkidx);
860 			return ERR_PTR(-EINVAL);
861 		}
862 		clk = priv->clks[priv->num_core_clks + clkidx];
863 		break;
864 
865 	default:
866 		dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
867 		return ERR_PTR(-EINVAL);
868 	}
869 
870 	if (IS_ERR(clk))
871 		dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
872 			PTR_ERR(clk));
873 	else
874 		dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
875 			clkspec->args[0], clkspec->args[1], clk,
876 			clk_get_rate(clk));
877 	return clk;
878 }
879 
880 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)881 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
882 			    const struct rzg2l_cpg_info *info,
883 			    struct rzg2l_cpg_priv *priv)
884 {
885 	struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
886 	struct device *dev = priv->dev;
887 	unsigned int id = core->id, div = core->div;
888 	const char *parent_name;
889 
890 	WARN_DEBUG(id >= priv->num_core_clks);
891 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
892 
893 	if (!core->name) {
894 		/* Skip NULLified clock */
895 		return;
896 	}
897 
898 	switch (core->type) {
899 	case CLK_TYPE_IN:
900 		clk = of_clk_get_by_name(priv->dev->of_node, core->name);
901 		break;
902 	case CLK_TYPE_FF:
903 		WARN_DEBUG(core->parent >= priv->num_core_clks);
904 		parent = priv->clks[core->parent];
905 		if (IS_ERR(parent)) {
906 			clk = parent;
907 			goto fail;
908 		}
909 
910 		parent_name = __clk_get_name(parent);
911 		clk = clk_register_fixed_factor(NULL, core->name,
912 						parent_name, CLK_SET_RATE_PARENT,
913 						core->mult, div);
914 		break;
915 	case CLK_TYPE_SAM_PLL:
916 		clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
917 						 priv->base, priv);
918 		break;
919 	case CLK_TYPE_SIPLL5:
920 		clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
921 		break;
922 	case CLK_TYPE_DIV:
923 		clk = rzg2l_cpg_div_clk_register(core, priv->clks,
924 						 priv->base, priv);
925 		break;
926 	case CLK_TYPE_MUX:
927 		clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
928 		break;
929 	case CLK_TYPE_SD_MUX:
930 		clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
931 		break;
932 	case CLK_TYPE_PLL5_4_MUX:
933 		clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
934 		break;
935 	case CLK_TYPE_DSI_DIV:
936 		clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
937 		break;
938 	default:
939 		goto fail;
940 	}
941 
942 	if (IS_ERR_OR_NULL(clk))
943 		goto fail;
944 
945 	dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
946 	priv->clks[id] = clk;
947 	return;
948 
949 fail:
950 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
951 		core->name, PTR_ERR(clk));
952 }
953 
954 /**
955  * struct mstp_clock - MSTP gating clock
956  *
957  * @hw: handle between common and hardware-specific interfaces
958  * @off: register offset
959  * @bit: ON/MON bit
960  * @enabled: soft state of the clock, if it is coupled with another clock
961  * @priv: CPG/MSTP private data
962  * @sibling: pointer to the other coupled clock
963  */
964 struct mstp_clock {
965 	struct clk_hw hw;
966 	u16 off;
967 	u8 bit;
968 	bool enabled;
969 	struct rzg2l_cpg_priv *priv;
970 	struct mstp_clock *sibling;
971 };
972 
973 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
974 
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)975 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
976 {
977 	struct mstp_clock *clock = to_mod_clock(hw);
978 	struct rzg2l_cpg_priv *priv = clock->priv;
979 	unsigned int reg = clock->off;
980 	struct device *dev = priv->dev;
981 	unsigned long flags;
982 	u32 bitmask = BIT(clock->bit);
983 	u32 value;
984 	int error;
985 
986 	if (!clock->off) {
987 		dev_dbg(dev, "%pC does not support ON/OFF\n",  hw->clk);
988 		return 0;
989 	}
990 
991 	dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
992 		enable ? "ON" : "OFF");
993 	spin_lock_irqsave(&priv->rmw_lock, flags);
994 
995 	if (enable)
996 		value = (bitmask << 16) | bitmask;
997 	else
998 		value = bitmask << 16;
999 	writel(value, priv->base + CLK_ON_R(reg));
1000 
1001 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
1002 
1003 	if (!enable)
1004 		return 0;
1005 
1006 	if (!priv->info->has_clk_mon_regs)
1007 		return 0;
1008 
1009 	error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
1010 					  value & bitmask, 0, 10);
1011 	if (error)
1012 		dev_err(dev, "Failed to enable CLK_ON %p\n",
1013 			priv->base + CLK_ON_R(reg));
1014 
1015 	return error;
1016 }
1017 
rzg2l_mod_clock_enable(struct clk_hw * hw)1018 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
1019 {
1020 	struct mstp_clock *clock = to_mod_clock(hw);
1021 
1022 	if (clock->sibling) {
1023 		struct rzg2l_cpg_priv *priv = clock->priv;
1024 		unsigned long flags;
1025 		bool enabled;
1026 
1027 		spin_lock_irqsave(&priv->rmw_lock, flags);
1028 		enabled = clock->sibling->enabled;
1029 		clock->enabled = true;
1030 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1031 		if (enabled)
1032 			return 0;
1033 	}
1034 
1035 	return rzg2l_mod_clock_endisable(hw, true);
1036 }
1037 
rzg2l_mod_clock_disable(struct clk_hw * hw)1038 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
1039 {
1040 	struct mstp_clock *clock = to_mod_clock(hw);
1041 
1042 	if (clock->sibling) {
1043 		struct rzg2l_cpg_priv *priv = clock->priv;
1044 		unsigned long flags;
1045 		bool enabled;
1046 
1047 		spin_lock_irqsave(&priv->rmw_lock, flags);
1048 		enabled = clock->sibling->enabled;
1049 		clock->enabled = false;
1050 		spin_unlock_irqrestore(&priv->rmw_lock, flags);
1051 		if (enabled)
1052 			return;
1053 	}
1054 
1055 	rzg2l_mod_clock_endisable(hw, false);
1056 }
1057 
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)1058 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
1059 {
1060 	struct mstp_clock *clock = to_mod_clock(hw);
1061 	struct rzg2l_cpg_priv *priv = clock->priv;
1062 	u32 bitmask = BIT(clock->bit);
1063 	u32 value;
1064 
1065 	if (!clock->off) {
1066 		dev_dbg(priv->dev, "%pC does not support ON/OFF\n",  hw->clk);
1067 		return 1;
1068 	}
1069 
1070 	if (clock->sibling)
1071 		return clock->enabled;
1072 
1073 	if (priv->info->has_clk_mon_regs)
1074 		value = readl(priv->base + CLK_MON_R(clock->off));
1075 	else
1076 		value = readl(priv->base + clock->off);
1077 
1078 	return value & bitmask;
1079 }
1080 
1081 static const struct clk_ops rzg2l_mod_clock_ops = {
1082 	.enable = rzg2l_mod_clock_enable,
1083 	.disable = rzg2l_mod_clock_disable,
1084 	.is_enabled = rzg2l_mod_clock_is_enabled,
1085 };
1086 
1087 static struct mstp_clock
rzg2l_mod_clock_get_sibling(struct mstp_clock * clock,struct rzg2l_cpg_priv * priv)1088 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1089 			     struct rzg2l_cpg_priv *priv)
1090 {
1091 	struct clk_hw *hw;
1092 	unsigned int i;
1093 
1094 	for (i = 0; i < priv->num_mod_clks; i++) {
1095 		struct mstp_clock *clk;
1096 
1097 		if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1098 			continue;
1099 
1100 		hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1101 		clk = to_mod_clock(hw);
1102 		if (clock->off == clk->off && clock->bit == clk->bit)
1103 			return clk;
1104 	}
1105 
1106 	return NULL;
1107 }
1108 
1109 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1110 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1111 			   const struct rzg2l_cpg_info *info,
1112 			   struct rzg2l_cpg_priv *priv)
1113 {
1114 	struct mstp_clock *clock = NULL;
1115 	struct device *dev = priv->dev;
1116 	unsigned int id = mod->id;
1117 	struct clk_init_data init;
1118 	struct clk *parent, *clk;
1119 	const char *parent_name;
1120 	unsigned int i;
1121 
1122 	WARN_DEBUG(id < priv->num_core_clks);
1123 	WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1124 	WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1125 	WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1126 
1127 	if (!mod->name) {
1128 		/* Skip NULLified clock */
1129 		return;
1130 	}
1131 
1132 	parent = priv->clks[mod->parent];
1133 	if (IS_ERR(parent)) {
1134 		clk = parent;
1135 		goto fail;
1136 	}
1137 
1138 	clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1139 	if (!clock) {
1140 		clk = ERR_PTR(-ENOMEM);
1141 		goto fail;
1142 	}
1143 
1144 	init.name = mod->name;
1145 	init.ops = &rzg2l_mod_clock_ops;
1146 	init.flags = CLK_SET_RATE_PARENT;
1147 	for (i = 0; i < info->num_crit_mod_clks; i++)
1148 		if (id == info->crit_mod_clks[i]) {
1149 			dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1150 				mod->name);
1151 			init.flags |= CLK_IS_CRITICAL;
1152 			break;
1153 		}
1154 
1155 	parent_name = __clk_get_name(parent);
1156 	init.parent_names = &parent_name;
1157 	init.num_parents = 1;
1158 
1159 	clock->off = mod->off;
1160 	clock->bit = mod->bit;
1161 	clock->priv = priv;
1162 	clock->hw.init = &init;
1163 
1164 	clk = clk_register(NULL, &clock->hw);
1165 	if (IS_ERR(clk))
1166 		goto fail;
1167 
1168 	dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1169 	priv->clks[id] = clk;
1170 
1171 	if (mod->is_coupled) {
1172 		struct mstp_clock *sibling;
1173 
1174 		clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1175 		sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1176 		if (sibling) {
1177 			clock->sibling = sibling;
1178 			sibling->sibling = clock;
1179 		}
1180 	}
1181 
1182 	return;
1183 
1184 fail:
1185 	dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1186 		mod->name, PTR_ERR(clk));
1187 }
1188 
1189 #define rcdev_to_priv(x)	container_of(x, struct rzg2l_cpg_priv, rcdev)
1190 
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1191 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1192 			    unsigned long id)
1193 {
1194 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1195 	const struct rzg2l_cpg_info *info = priv->info;
1196 	unsigned int reg = info->resets[id].off;
1197 	u32 mask = BIT(info->resets[id].bit);
1198 	s8 monbit = info->resets[id].monbit;
1199 	u32 value = mask << 16;
1200 
1201 	dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1202 
1203 	writel(value, priv->base + CLK_RST_R(reg));
1204 
1205 	if (info->has_clk_mon_regs) {
1206 		reg = CLK_MRST_R(reg);
1207 	} else if (monbit >= 0) {
1208 		reg = CPG_RST_MON;
1209 		mask = BIT(monbit);
1210 	} else {
1211 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1212 		udelay(35);
1213 		return 0;
1214 	}
1215 
1216 	return readl_poll_timeout_atomic(priv->base + reg, value,
1217 					 value & mask, 10, 200);
1218 }
1219 
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1220 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1221 			      unsigned long id)
1222 {
1223 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1224 	const struct rzg2l_cpg_info *info = priv->info;
1225 	unsigned int reg = info->resets[id].off;
1226 	u32 mask = BIT(info->resets[id].bit);
1227 	s8 monbit = info->resets[id].monbit;
1228 	u32 value = (mask << 16) | mask;
1229 
1230 	dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1231 		CLK_RST_R(reg));
1232 
1233 	writel(value, priv->base + CLK_RST_R(reg));
1234 
1235 	if (info->has_clk_mon_regs) {
1236 		reg = CLK_MRST_R(reg);
1237 	} else if (monbit >= 0) {
1238 		reg = CPG_RST_MON;
1239 		mask = BIT(monbit);
1240 	} else {
1241 		/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1242 		udelay(35);
1243 		return 0;
1244 	}
1245 
1246 	return readl_poll_timeout_atomic(priv->base + reg, value,
1247 					 !(value & mask), 10, 200);
1248 }
1249 
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1250 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1251 			   unsigned long id)
1252 {
1253 	int ret;
1254 
1255 	ret = rzg2l_cpg_assert(rcdev, id);
1256 	if (ret)
1257 		return ret;
1258 
1259 	return rzg2l_cpg_deassert(rcdev, id);
1260 }
1261 
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1262 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1263 			    unsigned long id)
1264 {
1265 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1266 	const struct rzg2l_cpg_info *info = priv->info;
1267 	s8 monbit = info->resets[id].monbit;
1268 	unsigned int reg;
1269 	u32 bitmask;
1270 
1271 	if (info->has_clk_mon_regs) {
1272 		reg = CLK_MRST_R(info->resets[id].off);
1273 		bitmask = BIT(info->resets[id].bit);
1274 	} else if (monbit >= 0) {
1275 		reg = CPG_RST_MON;
1276 		bitmask = BIT(monbit);
1277 	} else {
1278 		return -ENOTSUPP;
1279 	}
1280 
1281 	return !!(readl(priv->base + reg) & bitmask);
1282 }
1283 
1284 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1285 	.reset = rzg2l_cpg_reset,
1286 	.assert = rzg2l_cpg_assert,
1287 	.deassert = rzg2l_cpg_deassert,
1288 	.status = rzg2l_cpg_status,
1289 };
1290 
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1291 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1292 				 const struct of_phandle_args *reset_spec)
1293 {
1294 	struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1295 	const struct rzg2l_cpg_info *info = priv->info;
1296 	unsigned int id = reset_spec->args[0];
1297 
1298 	if (id >= rcdev->nr_resets || !info->resets[id].off) {
1299 		dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1300 		return -EINVAL;
1301 	}
1302 
1303 	return id;
1304 }
1305 
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1306 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1307 {
1308 	priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1309 	priv->rcdev.of_node = priv->dev->of_node;
1310 	priv->rcdev.dev = priv->dev;
1311 	priv->rcdev.of_reset_n_cells = 1;
1312 	priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1313 	priv->rcdev.nr_resets = priv->num_resets;
1314 
1315 	return devm_reset_controller_register(priv->dev, &priv->rcdev);
1316 }
1317 
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv * priv,const struct of_phandle_args * clkspec)1318 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1319 				const struct of_phandle_args *clkspec)
1320 {
1321 	const struct rzg2l_cpg_info *info = priv->info;
1322 	unsigned int id;
1323 	unsigned int i;
1324 
1325 	if (clkspec->args_count != 2)
1326 		return false;
1327 
1328 	if (clkspec->args[0] != CPG_MOD)
1329 		return false;
1330 
1331 	id = clkspec->args[1] + info->num_total_core_clks;
1332 	for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1333 		if (info->no_pm_mod_clks[i] == id)
1334 			return false;
1335 	}
1336 
1337 	return true;
1338 }
1339 
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1340 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1341 {
1342 	struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1343 	struct device_node *np = dev->of_node;
1344 	struct of_phandle_args clkspec;
1345 	bool once = true;
1346 	struct clk *clk;
1347 	int error;
1348 	int i = 0;
1349 
1350 	while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1351 					   &clkspec)) {
1352 		if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1353 			if (once) {
1354 				once = false;
1355 				error = pm_clk_create(dev);
1356 				if (error) {
1357 					of_node_put(clkspec.np);
1358 					goto err;
1359 				}
1360 			}
1361 			clk = of_clk_get_from_provider(&clkspec);
1362 			of_node_put(clkspec.np);
1363 			if (IS_ERR(clk)) {
1364 				error = PTR_ERR(clk);
1365 				goto fail_destroy;
1366 			}
1367 
1368 			error = pm_clk_add_clk(dev, clk);
1369 			if (error) {
1370 				dev_err(dev, "pm_clk_add_clk failed %d\n",
1371 					error);
1372 				goto fail_put;
1373 			}
1374 		} else {
1375 			of_node_put(clkspec.np);
1376 		}
1377 		i++;
1378 	}
1379 
1380 	return 0;
1381 
1382 fail_put:
1383 	clk_put(clk);
1384 
1385 fail_destroy:
1386 	pm_clk_destroy(dev);
1387 err:
1388 	return error;
1389 }
1390 
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1391 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1392 {
1393 	if (!pm_clk_no_clocks(dev))
1394 		pm_clk_destroy(dev);
1395 }
1396 
rzg2l_cpg_genpd_remove(void * data)1397 static void rzg2l_cpg_genpd_remove(void *data)
1398 {
1399 	pm_genpd_remove(data);
1400 }
1401 
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1402 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1403 {
1404 	struct device *dev = priv->dev;
1405 	struct device_node *np = dev->of_node;
1406 	struct generic_pm_domain *genpd = &priv->genpd;
1407 	int ret;
1408 
1409 	genpd->name = np->name;
1410 	genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1411 		       GENPD_FLAG_ACTIVE_WAKEUP;
1412 	genpd->attach_dev = rzg2l_cpg_attach_dev;
1413 	genpd->detach_dev = rzg2l_cpg_detach_dev;
1414 	ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1415 	if (ret)
1416 		return ret;
1417 
1418 	ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1419 	if (ret)
1420 		return ret;
1421 
1422 	return of_genpd_add_provider_simple(np, genpd);
1423 }
1424 
rzg2l_cpg_probe(struct platform_device * pdev)1425 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1426 {
1427 	struct device *dev = &pdev->dev;
1428 	struct device_node *np = dev->of_node;
1429 	const struct rzg2l_cpg_info *info;
1430 	struct rzg2l_cpg_priv *priv;
1431 	unsigned int nclks, i;
1432 	struct clk **clks;
1433 	int error;
1434 
1435 	info = of_device_get_match_data(dev);
1436 
1437 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1438 	if (!priv)
1439 		return -ENOMEM;
1440 
1441 	priv->dev = dev;
1442 	priv->info = info;
1443 	spin_lock_init(&priv->rmw_lock);
1444 
1445 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1446 	if (IS_ERR(priv->base))
1447 		return PTR_ERR(priv->base);
1448 
1449 	nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1450 	clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1451 	if (!clks)
1452 		return -ENOMEM;
1453 
1454 	dev_set_drvdata(dev, priv);
1455 	priv->clks = clks;
1456 	priv->num_core_clks = info->num_total_core_clks;
1457 	priv->num_mod_clks = info->num_hw_mod_clks;
1458 	priv->num_resets = info->num_resets;
1459 	priv->last_dt_core_clk = info->last_dt_core_clk;
1460 
1461 	for (i = 0; i < nclks; i++)
1462 		clks[i] = ERR_PTR(-ENOENT);
1463 
1464 	for (i = 0; i < info->num_core_clks; i++)
1465 		rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1466 
1467 	for (i = 0; i < info->num_mod_clks; i++)
1468 		rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1469 
1470 	error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1471 	if (error)
1472 		return error;
1473 
1474 	error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1475 	if (error)
1476 		return error;
1477 
1478 	error = rzg2l_cpg_add_clk_domain(priv);
1479 	if (error)
1480 		return error;
1481 
1482 	error = rzg2l_cpg_reset_controller_register(priv);
1483 	if (error)
1484 		return error;
1485 
1486 	return 0;
1487 }
1488 
1489 static const struct of_device_id rzg2l_cpg_match[] = {
1490 #ifdef CONFIG_CLK_R9A07G043
1491 	{
1492 		.compatible = "renesas,r9a07g043-cpg",
1493 		.data = &r9a07g043_cpg_info,
1494 	},
1495 #endif
1496 #ifdef CONFIG_CLK_R9A07G044
1497 	{
1498 		.compatible = "renesas,r9a07g044-cpg",
1499 		.data = &r9a07g044_cpg_info,
1500 	},
1501 #endif
1502 #ifdef CONFIG_CLK_R9A07G054
1503 	{
1504 		.compatible = "renesas,r9a07g054-cpg",
1505 		.data = &r9a07g054_cpg_info,
1506 	},
1507 #endif
1508 #ifdef CONFIG_CLK_R9A09G011
1509 	{
1510 		.compatible = "renesas,r9a09g011-cpg",
1511 		.data = &r9a09g011_cpg_info,
1512 	},
1513 #endif
1514 	{ /* sentinel */ }
1515 };
1516 
1517 static struct platform_driver rzg2l_cpg_driver = {
1518 	.driver		= {
1519 		.name	= "rzg2l-cpg",
1520 		.of_match_table = rzg2l_cpg_match,
1521 	},
1522 };
1523 
rzg2l_cpg_init(void)1524 static int __init rzg2l_cpg_init(void)
1525 {
1526 	return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1527 }
1528 
1529 subsys_initcall(rzg2l_cpg_init);
1530 
1531 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1532