1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * RZ/G2L Clock Pulse Generator
4 *
5 * Copyright (C) 2021 Renesas Electronics Corp.
6 *
7 * Based on renesas-cpg-mssr.c
8 *
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
12 */
13
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
31
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
33
34 #include "rzg2l-cpg.h"
35
36 #ifdef DEBUG
37 #define WARN_DEBUG(x) WARN_ON(x)
38 #else
39 #define WARN_DEBUG(x) do { } while (0)
40 #endif
41
42 #define GET_SHIFT(val) ((val >> 12) & 0xff)
43 #define GET_WIDTH(val) ((val >> 8) & 0xf)
44
45 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
49
50 #define CLK_ON_R(reg) (reg)
51 #define CLK_MON_R(reg) (0x180 + (reg))
52 #define CLK_RST_R(reg) (reg)
53 #define CLK_MRST_R(reg) (0x180 + (reg))
54
55 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
56 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
57 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
58
59 #define MAX_VCLK_FREQ (148500000)
60
61 struct sd_hw_data {
62 struct clk_hw hw;
63 u32 conf;
64 struct rzg2l_cpg_priv *priv;
65 };
66
67 #define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
68
69 struct rzg2l_pll5_param {
70 u32 pl5_fracin;
71 u8 pl5_refdiv;
72 u8 pl5_intin;
73 u8 pl5_postdiv1;
74 u8 pl5_postdiv2;
75 u8 pl5_spread;
76 };
77
78 struct rzg2l_pll5_mux_dsi_div_param {
79 u8 clksrc;
80 u8 dsi_div_a;
81 u8 dsi_div_b;
82 };
83
84 /**
85 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
86 *
87 * @rcdev: Reset controller entity
88 * @dev: CPG device
89 * @base: CPG register block base address
90 * @rmw_lock: protects register accesses
91 * @clks: Array containing all Core and Module Clocks
92 * @num_core_clks: Number of Core Clocks in clks[]
93 * @num_mod_clks: Number of Module Clocks in clks[]
94 * @num_resets: Number of Module Resets in info->resets[]
95 * @last_dt_core_clk: ID of the last Core Clock exported to DT
96 * @info: Pointer to platform data
97 * @genpd: PM domain
98 * @mux_dsi_div_params: pll5 mux and dsi div parameters
99 */
100 struct rzg2l_cpg_priv {
101 struct reset_controller_dev rcdev;
102 struct device *dev;
103 void __iomem *base;
104 spinlock_t rmw_lock;
105
106 struct clk **clks;
107 unsigned int num_core_clks;
108 unsigned int num_mod_clks;
109 unsigned int num_resets;
110 unsigned int last_dt_core_clk;
111
112 const struct rzg2l_cpg_info *info;
113
114 struct generic_pm_domain genpd;
115
116 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params;
117 };
118
rzg2l_cpg_del_clk_provider(void * data)119 static void rzg2l_cpg_del_clk_provider(void *data)
120 {
121 of_clk_del_provider(data);
122 }
123
124 static struct clk * __init
rzg2l_cpg_div_clk_register(const struct cpg_core_clk * core,struct clk ** clks,void __iomem * base,struct rzg2l_cpg_priv * priv)125 rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
126 struct clk **clks,
127 void __iomem *base,
128 struct rzg2l_cpg_priv *priv)
129 {
130 struct device *dev = priv->dev;
131 const struct clk *parent;
132 const char *parent_name;
133 struct clk_hw *clk_hw;
134
135 parent = clks[core->parent & 0xffff];
136 if (IS_ERR(parent))
137 return ERR_CAST(parent);
138
139 parent_name = __clk_get_name(parent);
140
141 if (core->dtable)
142 clk_hw = clk_hw_register_divider_table(dev, core->name,
143 parent_name, 0,
144 base + GET_REG_OFFSET(core->conf),
145 GET_SHIFT(core->conf),
146 GET_WIDTH(core->conf),
147 core->flag,
148 core->dtable,
149 &priv->rmw_lock);
150 else
151 clk_hw = clk_hw_register_divider(dev, core->name,
152 parent_name, 0,
153 base + GET_REG_OFFSET(core->conf),
154 GET_SHIFT(core->conf),
155 GET_WIDTH(core->conf),
156 core->flag, &priv->rmw_lock);
157
158 if (IS_ERR(clk_hw))
159 return ERR_CAST(clk_hw);
160
161 return clk_hw->clk;
162 }
163
164 static struct clk * __init
rzg2l_cpg_mux_clk_register(const struct cpg_core_clk * core,void __iomem * base,struct rzg2l_cpg_priv * priv)165 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
166 void __iomem *base,
167 struct rzg2l_cpg_priv *priv)
168 {
169 const struct clk_hw *clk_hw;
170
171 clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
172 core->parent_names, core->num_parents,
173 core->flag,
174 base + GET_REG_OFFSET(core->conf),
175 GET_SHIFT(core->conf),
176 GET_WIDTH(core->conf),
177 core->mux_flags, &priv->rmw_lock);
178 if (IS_ERR(clk_hw))
179 return ERR_CAST(clk_hw);
180
181 return clk_hw->clk;
182 }
183
rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw * hw,u8 index)184 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
185 {
186 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
187 struct rzg2l_cpg_priv *priv = hwdata->priv;
188 u32 off = GET_REG_OFFSET(hwdata->conf);
189 u32 shift = GET_SHIFT(hwdata->conf);
190 const u32 clk_src_266 = 2;
191 u32 msk, val, bitmask;
192 unsigned long flags;
193 int ret;
194
195 /*
196 * As per the HW manual, we should not directly switch from 533 MHz to
197 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
198 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
199 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
200 * (400 MHz)).
201 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
202 * switching register is prohibited.
203 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
204 * the index to value mapping is done by adding 1 to the index.
205 */
206 bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
207 msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
208 spin_lock_irqsave(&priv->rmw_lock, flags);
209 if (index != clk_src_266) {
210 writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
211
212 ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
213 !(val & msk), 10,
214 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
215 if (ret)
216 goto unlock;
217 }
218
219 writel(bitmask | ((index + 1) << shift), priv->base + off);
220
221 ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
222 !(val & msk), 10,
223 CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
224 unlock:
225 spin_unlock_irqrestore(&priv->rmw_lock, flags);
226
227 if (ret)
228 dev_err(priv->dev, "failed to switch clk source\n");
229
230 return ret;
231 }
232
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw * hw)233 static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
234 {
235 struct sd_hw_data *hwdata = to_sd_hw_data(hw);
236 struct rzg2l_cpg_priv *priv = hwdata->priv;
237 u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
238
239 val >>= GET_SHIFT(hwdata->conf);
240 val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
241
242 return val ? val - 1 : 0;
243 }
244
245 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
246 .determine_rate = __clk_mux_determine_rate_closest,
247 .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
248 .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
249 };
250
251 static struct clk * __init
rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk * core,void __iomem * base,struct rzg2l_cpg_priv * priv)252 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
253 void __iomem *base,
254 struct rzg2l_cpg_priv *priv)
255 {
256 struct sd_hw_data *clk_hw_data;
257 struct clk_init_data init;
258 struct clk_hw *clk_hw;
259 int ret;
260
261 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
262 if (!clk_hw_data)
263 return ERR_PTR(-ENOMEM);
264
265 clk_hw_data->priv = priv;
266 clk_hw_data->conf = core->conf;
267
268 init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
269 init.ops = &rzg2l_cpg_sd_clk_mux_ops;
270 init.flags = 0;
271 init.num_parents = core->num_parents;
272 init.parent_names = core->parent_names;
273
274 clk_hw = &clk_hw_data->hw;
275 clk_hw->init = &init;
276
277 ret = devm_clk_hw_register(priv->dev, clk_hw);
278 if (ret)
279 return ERR_PTR(ret);
280
281 return clk_hw->clk;
282 }
283
284 static unsigned long
rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param * params,unsigned long rate)285 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params,
286 unsigned long rate)
287 {
288 unsigned long foutpostdiv_rate, foutvco_rate;
289
290 params->pl5_intin = rate / MEGA;
291 params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA);
292 params->pl5_refdiv = 2;
293 params->pl5_postdiv1 = 1;
294 params->pl5_postdiv2 = 1;
295 params->pl5_spread = 0x16;
296
297 foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA,
298 (params->pl5_intin << 24) + params->pl5_fracin),
299 params->pl5_refdiv) >> 24;
300 foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate,
301 params->pl5_postdiv1 * params->pl5_postdiv2);
302
303 return foutpostdiv_rate;
304 }
305
306 struct dsi_div_hw_data {
307 struct clk_hw hw;
308 u32 conf;
309 unsigned long rate;
310 struct rzg2l_cpg_priv *priv;
311 };
312
313 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
314
rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)315 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw *hw,
316 unsigned long parent_rate)
317 {
318 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
319 unsigned long rate = dsi_div->rate;
320
321 if (!rate)
322 rate = parent_rate;
323
324 return rate;
325 }
326
rzg2l_cpg_get_vclk_parent_rate(struct clk_hw * hw,unsigned long rate)327 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw *hw,
328 unsigned long rate)
329 {
330 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
331 struct rzg2l_cpg_priv *priv = dsi_div->priv;
332 struct rzg2l_pll5_param params;
333 unsigned long parent_rate;
334
335 parent_rate = rzg2l_cpg_get_foutpostdiv_rate(¶ms, rate);
336
337 if (priv->mux_dsi_div_params.clksrc)
338 parent_rate /= 2;
339
340 return parent_rate;
341 }
342
rzg2l_cpg_dsi_div_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)343 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw *hw,
344 struct clk_rate_request *req)
345 {
346 if (req->rate > MAX_VCLK_FREQ)
347 req->rate = MAX_VCLK_FREQ;
348
349 req->best_parent_rate = rzg2l_cpg_get_vclk_parent_rate(hw, req->rate);
350
351 return 0;
352 }
353
rzg2l_cpg_dsi_div_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)354 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw *hw,
355 unsigned long rate,
356 unsigned long parent_rate)
357 {
358 struct dsi_div_hw_data *dsi_div = to_dsi_div_hw_data(hw);
359 struct rzg2l_cpg_priv *priv = dsi_div->priv;
360
361 /*
362 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
363 *
364 * Based on the dot clock, the DSI divider clock sets the divider value,
365 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
366 * source for the MUX and propagates that info to the parents.
367 */
368
369 if (!rate || rate > MAX_VCLK_FREQ)
370 return -EINVAL;
371
372 dsi_div->rate = rate;
373 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN | CPG_PL5_SDIV_DIV_DSI_B_WEN |
374 (priv->mux_dsi_div_params.dsi_div_a << 0) |
375 (priv->mux_dsi_div_params.dsi_div_b << 8),
376 priv->base + CPG_PL5_SDIV);
377
378 return 0;
379 }
380
381 static const struct clk_ops rzg2l_cpg_dsi_div_ops = {
382 .recalc_rate = rzg2l_cpg_dsi_div_recalc_rate,
383 .determine_rate = rzg2l_cpg_dsi_div_determine_rate,
384 .set_rate = rzg2l_cpg_dsi_div_set_rate,
385 };
386
387 static struct clk * __init
rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk * core,struct clk ** clks,struct rzg2l_cpg_priv * priv)388 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk *core,
389 struct clk **clks,
390 struct rzg2l_cpg_priv *priv)
391 {
392 struct dsi_div_hw_data *clk_hw_data;
393 const struct clk *parent;
394 const char *parent_name;
395 struct clk_init_data init;
396 struct clk_hw *clk_hw;
397 int ret;
398
399 parent = clks[core->parent & 0xffff];
400 if (IS_ERR(parent))
401 return ERR_CAST(parent);
402
403 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
404 if (!clk_hw_data)
405 return ERR_PTR(-ENOMEM);
406
407 clk_hw_data->priv = priv;
408
409 parent_name = __clk_get_name(parent);
410 init.name = core->name;
411 init.ops = &rzg2l_cpg_dsi_div_ops;
412 init.flags = CLK_SET_RATE_PARENT;
413 init.parent_names = &parent_name;
414 init.num_parents = 1;
415
416 clk_hw = &clk_hw_data->hw;
417 clk_hw->init = &init;
418
419 ret = devm_clk_hw_register(priv->dev, clk_hw);
420 if (ret)
421 return ERR_PTR(ret);
422
423 return clk_hw->clk;
424 }
425
426 struct pll5_mux_hw_data {
427 struct clk_hw hw;
428 u32 conf;
429 unsigned long rate;
430 struct rzg2l_cpg_priv *priv;
431 };
432
433 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
434
rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)435 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw *hw,
436 struct clk_rate_request *req)
437 {
438 struct clk_hw *parent;
439 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
440 struct rzg2l_cpg_priv *priv = hwdata->priv;
441
442 parent = clk_hw_get_parent_by_index(hw, priv->mux_dsi_div_params.clksrc);
443 req->best_parent_hw = parent;
444 req->best_parent_rate = req->rate;
445
446 return 0;
447 }
448
rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw * hw,u8 index)449 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw *hw, u8 index)
450 {
451 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
452 struct rzg2l_cpg_priv *priv = hwdata->priv;
453
454 /*
455 * FOUTPOSTDIV--->|
456 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
457 * |--FOUT1PH0-->|
458 *
459 * Based on the dot clock, the DSI divider clock calculates the parent
460 * rate and clk source for the MUX. It propagates that info to
461 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
462 */
463
464 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN | index,
465 priv->base + CPG_OTHERFUNC1_REG);
466
467 return 0;
468 }
469
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw * hw)470 static u8 rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw *hw)
471 {
472 struct pll5_mux_hw_data *hwdata = to_pll5_mux_hw_data(hw);
473 struct rzg2l_cpg_priv *priv = hwdata->priv;
474
475 return readl(priv->base + GET_REG_OFFSET(hwdata->conf));
476 }
477
478 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops = {
479 .determine_rate = rzg2l_cpg_pll5_4_clk_mux_determine_rate,
480 .set_parent = rzg2l_cpg_pll5_4_clk_mux_set_parent,
481 .get_parent = rzg2l_cpg_pll5_4_clk_mux_get_parent,
482 };
483
484 static struct clk * __init
rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk * core,struct rzg2l_cpg_priv * priv)485 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk *core,
486 struct rzg2l_cpg_priv *priv)
487 {
488 struct pll5_mux_hw_data *clk_hw_data;
489 struct clk_init_data init;
490 struct clk_hw *clk_hw;
491 int ret;
492
493 clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
494 if (!clk_hw_data)
495 return ERR_PTR(-ENOMEM);
496
497 clk_hw_data->priv = priv;
498 clk_hw_data->conf = core->conf;
499
500 init.name = core->name;
501 init.ops = &rzg2l_cpg_pll5_4_clk_mux_ops;
502 init.flags = CLK_SET_RATE_PARENT;
503 init.num_parents = core->num_parents;
504 init.parent_names = core->parent_names;
505
506 clk_hw = &clk_hw_data->hw;
507 clk_hw->init = &init;
508
509 ret = devm_clk_hw_register(priv->dev, clk_hw);
510 if (ret)
511 return ERR_PTR(ret);
512
513 return clk_hw->clk;
514 }
515
516 struct sipll5 {
517 struct clk_hw hw;
518 u32 conf;
519 unsigned long foutpostdiv_rate;
520 struct rzg2l_cpg_priv *priv;
521 };
522
523 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
524
rzg2l_cpg_get_vclk_rate(struct clk_hw * hw,unsigned long rate)525 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw *hw,
526 unsigned long rate)
527 {
528 struct sipll5 *sipll5 = to_sipll5(hw);
529 struct rzg2l_cpg_priv *priv = sipll5->priv;
530 unsigned long vclk;
531
532 vclk = rate / ((1 << priv->mux_dsi_div_params.dsi_div_a) *
533 (priv->mux_dsi_div_params.dsi_div_b + 1));
534
535 if (priv->mux_dsi_div_params.clksrc)
536 vclk /= 2;
537
538 return vclk;
539 }
540
rzg2l_cpg_sipll5_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)541 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
542 unsigned long parent_rate)
543 {
544 struct sipll5 *sipll5 = to_sipll5(hw);
545 unsigned long pll5_rate = sipll5->foutpostdiv_rate;
546
547 if (!pll5_rate)
548 pll5_rate = parent_rate;
549
550 return pll5_rate;
551 }
552
rzg2l_cpg_sipll5_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)553 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
554 unsigned long rate,
555 unsigned long *parent_rate)
556 {
557 return rate;
558 }
559
rzg2l_cpg_sipll5_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)560 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
561 unsigned long rate,
562 unsigned long parent_rate)
563 {
564 struct sipll5 *sipll5 = to_sipll5(hw);
565 struct rzg2l_cpg_priv *priv = sipll5->priv;
566 struct rzg2l_pll5_param params;
567 unsigned long vclk_rate;
568 int ret;
569 u32 val;
570
571 /*
572 * OSC --> PLL5 --> FOUTPOSTDIV-->|
573 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
574 * |--FOUT1PH0-->|
575 *
576 * Based on the dot clock, the DSI divider clock calculates the parent
577 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
578 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
579 *
580 * OSC --> PLL5 --> FOUTPOSTDIV
581 */
582
583 if (!rate)
584 return -EINVAL;
585
586 vclk_rate = rzg2l_cpg_get_vclk_rate(hw, rate);
587 sipll5->foutpostdiv_rate =
588 rzg2l_cpg_get_foutpostdiv_rate(¶ms, vclk_rate);
589
590 /* Put PLL5 into standby mode */
591 writel(CPG_SIPLL5_STBY_RESETB_WEN, priv->base + CPG_SIPLL5_STBY);
592 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
593 !(val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
594 if (ret) {
595 dev_err(priv->dev, "failed to release pll5 lock");
596 return ret;
597 }
598
599 /* Output clock setting 1 */
600 writel((params.pl5_postdiv1 << 0) | (params.pl5_postdiv2 << 4) |
601 (params.pl5_refdiv << 8), priv->base + CPG_SIPLL5_CLK1);
602
603 /* Output clock setting, SSCG modulation value setting 3 */
604 writel((params.pl5_fracin << 8), priv->base + CPG_SIPLL5_CLK3);
605
606 /* Output clock setting 4 */
607 writel(CPG_SIPLL5_CLK4_RESV_LSB | (params.pl5_intin << 16),
608 priv->base + CPG_SIPLL5_CLK4);
609
610 /* Output clock setting 5 */
611 writel(params.pl5_spread, priv->base + CPG_SIPLL5_CLK5);
612
613 /* PLL normal mode setting */
614 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN | CPG_SIPLL5_STBY_SSCG_EN_WEN |
615 CPG_SIPLL5_STBY_RESETB_WEN | CPG_SIPLL5_STBY_RESETB,
616 priv->base + CPG_SIPLL5_STBY);
617
618 /* PLL normal mode transition, output clock stability check */
619 ret = readl_poll_timeout(priv->base + CPG_SIPLL5_MON, val,
620 (val & CPG_SIPLL5_MON_PLL5_LOCK), 100, 250000);
621 if (ret) {
622 dev_err(priv->dev, "failed to lock pll5");
623 return ret;
624 }
625
626 return 0;
627 }
628
629 static const struct clk_ops rzg2l_cpg_sipll5_ops = {
630 .recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
631 .round_rate = rzg2l_cpg_sipll5_round_rate,
632 .set_rate = rzg2l_cpg_sipll5_set_rate,
633 };
634
635 static struct clk * __init
rzg2l_cpg_sipll5_register(const struct cpg_core_clk * core,struct clk ** clks,struct rzg2l_cpg_priv * priv)636 rzg2l_cpg_sipll5_register(const struct cpg_core_clk *core,
637 struct clk **clks,
638 struct rzg2l_cpg_priv *priv)
639 {
640 const struct clk *parent;
641 struct clk_init_data init;
642 const char *parent_name;
643 struct sipll5 *sipll5;
644 struct clk_hw *clk_hw;
645 int ret;
646
647 parent = clks[core->parent & 0xffff];
648 if (IS_ERR(parent))
649 return ERR_CAST(parent);
650
651 sipll5 = devm_kzalloc(priv->dev, sizeof(*sipll5), GFP_KERNEL);
652 if (!sipll5)
653 return ERR_PTR(-ENOMEM);
654
655 init.name = core->name;
656 parent_name = __clk_get_name(parent);
657 init.ops = &rzg2l_cpg_sipll5_ops;
658 init.flags = 0;
659 init.parent_names = &parent_name;
660 init.num_parents = 1;
661
662 sipll5->hw.init = &init;
663 sipll5->conf = core->conf;
664 sipll5->priv = priv;
665
666 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN | CPG_SIPLL5_STBY_RESETB_WEN |
667 CPG_SIPLL5_STBY_RESETB, priv->base + CPG_SIPLL5_STBY);
668
669 clk_hw = &sipll5->hw;
670 clk_hw->init = &init;
671
672 ret = devm_clk_hw_register(priv->dev, clk_hw);
673 if (ret)
674 return ERR_PTR(ret);
675
676 priv->mux_dsi_div_params.clksrc = 1; /* Use clk src 1 for DSI */
677 priv->mux_dsi_div_params.dsi_div_a = 1; /* Divided by 2 */
678 priv->mux_dsi_div_params.dsi_div_b = 2; /* Divided by 3 */
679
680 return clk_hw->clk;
681 }
682
683 struct pll_clk {
684 struct clk_hw hw;
685 unsigned int conf;
686 unsigned int type;
687 void __iomem *base;
688 struct rzg2l_cpg_priv *priv;
689 };
690
691 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
692
rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)693 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
694 unsigned long parent_rate)
695 {
696 struct pll_clk *pll_clk = to_pll(hw);
697 struct rzg2l_cpg_priv *priv = pll_clk->priv;
698 unsigned int val1, val2;
699 u64 rate;
700
701 if (pll_clk->type != CLK_TYPE_SAM_PLL)
702 return parent_rate;
703
704 val1 = readl(priv->base + GET_REG_SAMPLL_CLK1(pll_clk->conf));
705 val2 = readl(priv->base + GET_REG_SAMPLL_CLK2(pll_clk->conf));
706
707 rate = mul_u64_u32_shr(parent_rate, (MDIV(val1) << 16) + KDIV(val1),
708 16 + SDIV(val2));
709
710 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(val1));
711 }
712
713 static const struct clk_ops rzg2l_cpg_pll_ops = {
714 .recalc_rate = rzg2l_cpg_pll_clk_recalc_rate,
715 };
716
717 static struct clk * __init
rzg2l_cpg_pll_clk_register(const struct cpg_core_clk * core,struct clk ** clks,void __iomem * base,struct rzg2l_cpg_priv * priv)718 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk *core,
719 struct clk **clks,
720 void __iomem *base,
721 struct rzg2l_cpg_priv *priv)
722 {
723 struct device *dev = priv->dev;
724 const struct clk *parent;
725 struct clk_init_data init;
726 const char *parent_name;
727 struct pll_clk *pll_clk;
728
729 parent = clks[core->parent & 0xffff];
730 if (IS_ERR(parent))
731 return ERR_CAST(parent);
732
733 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
734 if (!pll_clk)
735 return ERR_PTR(-ENOMEM);
736
737 parent_name = __clk_get_name(parent);
738 init.name = core->name;
739 init.ops = &rzg2l_cpg_pll_ops;
740 init.flags = 0;
741 init.parent_names = &parent_name;
742 init.num_parents = 1;
743
744 pll_clk->hw.init = &init;
745 pll_clk->conf = core->conf;
746 pll_clk->base = base;
747 pll_clk->priv = priv;
748 pll_clk->type = core->type;
749
750 return clk_register(NULL, &pll_clk->hw);
751 }
752
753 static struct clk
rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args * clkspec,void * data)754 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
755 void *data)
756 {
757 unsigned int clkidx = clkspec->args[1];
758 struct rzg2l_cpg_priv *priv = data;
759 struct device *dev = priv->dev;
760 const char *type;
761 struct clk *clk;
762
763 switch (clkspec->args[0]) {
764 case CPG_CORE:
765 type = "core";
766 if (clkidx > priv->last_dt_core_clk) {
767 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
768 return ERR_PTR(-EINVAL);
769 }
770 clk = priv->clks[clkidx];
771 break;
772
773 case CPG_MOD:
774 type = "module";
775 if (clkidx >= priv->num_mod_clks) {
776 dev_err(dev, "Invalid %s clock index %u\n", type,
777 clkidx);
778 return ERR_PTR(-EINVAL);
779 }
780 clk = priv->clks[priv->num_core_clks + clkidx];
781 break;
782
783 default:
784 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
785 return ERR_PTR(-EINVAL);
786 }
787
788 if (IS_ERR(clk))
789 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
790 PTR_ERR(clk));
791 else
792 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
793 clkspec->args[0], clkspec->args[1], clk,
794 clk_get_rate(clk));
795 return clk;
796 }
797
798 static void __init
rzg2l_cpg_register_core_clk(const struct cpg_core_clk * core,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)799 rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
800 const struct rzg2l_cpg_info *info,
801 struct rzg2l_cpg_priv *priv)
802 {
803 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
804 struct device *dev = priv->dev;
805 unsigned int id = core->id, div = core->div;
806 const char *parent_name;
807
808 WARN_DEBUG(id >= priv->num_core_clks);
809 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
810
811 if (!core->name) {
812 /* Skip NULLified clock */
813 return;
814 }
815
816 switch (core->type) {
817 case CLK_TYPE_IN:
818 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
819 break;
820 case CLK_TYPE_FF:
821 WARN_DEBUG(core->parent >= priv->num_core_clks);
822 parent = priv->clks[core->parent];
823 if (IS_ERR(parent)) {
824 clk = parent;
825 goto fail;
826 }
827
828 parent_name = __clk_get_name(parent);
829 clk = clk_register_fixed_factor(NULL, core->name,
830 parent_name, CLK_SET_RATE_PARENT,
831 core->mult, div);
832 break;
833 case CLK_TYPE_SAM_PLL:
834 clk = rzg2l_cpg_pll_clk_register(core, priv->clks,
835 priv->base, priv);
836 break;
837 case CLK_TYPE_SIPLL5:
838 clk = rzg2l_cpg_sipll5_register(core, priv->clks, priv);
839 break;
840 case CLK_TYPE_DIV:
841 clk = rzg2l_cpg_div_clk_register(core, priv->clks,
842 priv->base, priv);
843 break;
844 case CLK_TYPE_MUX:
845 clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
846 break;
847 case CLK_TYPE_SD_MUX:
848 clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
849 break;
850 case CLK_TYPE_PLL5_4_MUX:
851 clk = rzg2l_cpg_pll5_4_mux_clk_register(core, priv);
852 break;
853 case CLK_TYPE_DSI_DIV:
854 clk = rzg2l_cpg_dsi_div_clk_register(core, priv->clks, priv);
855 break;
856 default:
857 goto fail;
858 }
859
860 if (IS_ERR_OR_NULL(clk))
861 goto fail;
862
863 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
864 priv->clks[id] = clk;
865 return;
866
867 fail:
868 dev_err(dev, "Failed to register %s clock %s: %ld\n", "core",
869 core->name, PTR_ERR(clk));
870 }
871
872 /**
873 * struct mstp_clock - MSTP gating clock
874 *
875 * @hw: handle between common and hardware-specific interfaces
876 * @off: register offset
877 * @bit: ON/MON bit
878 * @enabled: soft state of the clock, if it is coupled with another clock
879 * @priv: CPG/MSTP private data
880 * @sibling: pointer to the other coupled clock
881 */
882 struct mstp_clock {
883 struct clk_hw hw;
884 u16 off;
885 u8 bit;
886 bool enabled;
887 struct rzg2l_cpg_priv *priv;
888 struct mstp_clock *sibling;
889 };
890
891 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
892
rzg2l_mod_clock_endisable(struct clk_hw * hw,bool enable)893 static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
894 {
895 struct mstp_clock *clock = to_mod_clock(hw);
896 struct rzg2l_cpg_priv *priv = clock->priv;
897 unsigned int reg = clock->off;
898 struct device *dev = priv->dev;
899 unsigned long flags;
900 u32 bitmask = BIT(clock->bit);
901 u32 value;
902 int error;
903
904 if (!clock->off) {
905 dev_dbg(dev, "%pC does not support ON/OFF\n", hw->clk);
906 return 0;
907 }
908
909 dev_dbg(dev, "CLK_ON %u/%pC %s\n", CLK_ON_R(reg), hw->clk,
910 enable ? "ON" : "OFF");
911 spin_lock_irqsave(&priv->rmw_lock, flags);
912
913 if (enable)
914 value = (bitmask << 16) | bitmask;
915 else
916 value = bitmask << 16;
917 writel(value, priv->base + CLK_ON_R(reg));
918
919 spin_unlock_irqrestore(&priv->rmw_lock, flags);
920
921 if (!enable)
922 return 0;
923
924 if (!priv->info->has_clk_mon_regs)
925 return 0;
926
927 error = readl_poll_timeout_atomic(priv->base + CLK_MON_R(reg), value,
928 value & bitmask, 0, 10);
929 if (error)
930 dev_err(dev, "Failed to enable CLK_ON %p\n",
931 priv->base + CLK_ON_R(reg));
932
933 return error;
934 }
935
rzg2l_mod_clock_enable(struct clk_hw * hw)936 static int rzg2l_mod_clock_enable(struct clk_hw *hw)
937 {
938 struct mstp_clock *clock = to_mod_clock(hw);
939
940 if (clock->sibling) {
941 struct rzg2l_cpg_priv *priv = clock->priv;
942 unsigned long flags;
943 bool enabled;
944
945 spin_lock_irqsave(&priv->rmw_lock, flags);
946 enabled = clock->sibling->enabled;
947 clock->enabled = true;
948 spin_unlock_irqrestore(&priv->rmw_lock, flags);
949 if (enabled)
950 return 0;
951 }
952
953 return rzg2l_mod_clock_endisable(hw, true);
954 }
955
rzg2l_mod_clock_disable(struct clk_hw * hw)956 static void rzg2l_mod_clock_disable(struct clk_hw *hw)
957 {
958 struct mstp_clock *clock = to_mod_clock(hw);
959
960 if (clock->sibling) {
961 struct rzg2l_cpg_priv *priv = clock->priv;
962 unsigned long flags;
963 bool enabled;
964
965 spin_lock_irqsave(&priv->rmw_lock, flags);
966 enabled = clock->sibling->enabled;
967 clock->enabled = false;
968 spin_unlock_irqrestore(&priv->rmw_lock, flags);
969 if (enabled)
970 return;
971 }
972
973 rzg2l_mod_clock_endisable(hw, false);
974 }
975
rzg2l_mod_clock_is_enabled(struct clk_hw * hw)976 static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
977 {
978 struct mstp_clock *clock = to_mod_clock(hw);
979 struct rzg2l_cpg_priv *priv = clock->priv;
980 u32 bitmask = BIT(clock->bit);
981 u32 value;
982
983 if (!clock->off) {
984 dev_dbg(priv->dev, "%pC does not support ON/OFF\n", hw->clk);
985 return 1;
986 }
987
988 if (clock->sibling)
989 return clock->enabled;
990
991 if (priv->info->has_clk_mon_regs)
992 value = readl(priv->base + CLK_MON_R(clock->off));
993 else
994 value = readl(priv->base + clock->off);
995
996 return value & bitmask;
997 }
998
999 static const struct clk_ops rzg2l_mod_clock_ops = {
1000 .enable = rzg2l_mod_clock_enable,
1001 .disable = rzg2l_mod_clock_disable,
1002 .is_enabled = rzg2l_mod_clock_is_enabled,
1003 };
1004
1005 static struct mstp_clock
rzg2l_mod_clock_get_sibling(struct mstp_clock * clock,struct rzg2l_cpg_priv * priv)1006 *rzg2l_mod_clock_get_sibling(struct mstp_clock *clock,
1007 struct rzg2l_cpg_priv *priv)
1008 {
1009 struct clk_hw *hw;
1010 unsigned int i;
1011
1012 for (i = 0; i < priv->num_mod_clks; i++) {
1013 struct mstp_clock *clk;
1014
1015 if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
1016 continue;
1017
1018 hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
1019 clk = to_mod_clock(hw);
1020 if (clock->off == clk->off && clock->bit == clk->bit)
1021 return clk;
1022 }
1023
1024 return NULL;
1025 }
1026
1027 static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk * mod,const struct rzg2l_cpg_info * info,struct rzg2l_cpg_priv * priv)1028 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
1029 const struct rzg2l_cpg_info *info,
1030 struct rzg2l_cpg_priv *priv)
1031 {
1032 struct mstp_clock *clock = NULL;
1033 struct device *dev = priv->dev;
1034 unsigned int id = mod->id;
1035 struct clk_init_data init;
1036 struct clk *parent, *clk;
1037 const char *parent_name;
1038 unsigned int i;
1039
1040 WARN_DEBUG(id < priv->num_core_clks);
1041 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
1042 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
1043 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
1044
1045 if (!mod->name) {
1046 /* Skip NULLified clock */
1047 return;
1048 }
1049
1050 parent = priv->clks[mod->parent];
1051 if (IS_ERR(parent)) {
1052 clk = parent;
1053 goto fail;
1054 }
1055
1056 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
1057 if (!clock) {
1058 clk = ERR_PTR(-ENOMEM);
1059 goto fail;
1060 }
1061
1062 init.name = mod->name;
1063 init.ops = &rzg2l_mod_clock_ops;
1064 init.flags = CLK_SET_RATE_PARENT;
1065 for (i = 0; i < info->num_crit_mod_clks; i++)
1066 if (id == info->crit_mod_clks[i]) {
1067 dev_dbg(dev, "CPG %s setting CLK_IS_CRITICAL\n",
1068 mod->name);
1069 init.flags |= CLK_IS_CRITICAL;
1070 break;
1071 }
1072
1073 parent_name = __clk_get_name(parent);
1074 init.parent_names = &parent_name;
1075 init.num_parents = 1;
1076
1077 clock->off = mod->off;
1078 clock->bit = mod->bit;
1079 clock->priv = priv;
1080 clock->hw.init = &init;
1081
1082 clk = clk_register(NULL, &clock->hw);
1083 if (IS_ERR(clk))
1084 goto fail;
1085
1086 dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
1087 priv->clks[id] = clk;
1088
1089 if (mod->is_coupled) {
1090 struct mstp_clock *sibling;
1091
1092 clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
1093 sibling = rzg2l_mod_clock_get_sibling(clock, priv);
1094 if (sibling) {
1095 clock->sibling = sibling;
1096 sibling->sibling = clock;
1097 }
1098 }
1099
1100 return;
1101
1102 fail:
1103 dev_err(dev, "Failed to register %s clock %s: %ld\n", "module",
1104 mod->name, PTR_ERR(clk));
1105 }
1106
1107 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1108
rzg2l_cpg_assert(struct reset_controller_dev * rcdev,unsigned long id)1109 static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
1110 unsigned long id)
1111 {
1112 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1113 const struct rzg2l_cpg_info *info = priv->info;
1114 unsigned int reg = info->resets[id].off;
1115 u32 mask = BIT(info->resets[id].bit);
1116 s8 monbit = info->resets[id].monbit;
1117 u32 value = mask << 16;
1118
1119 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
1120
1121 writel(value, priv->base + CLK_RST_R(reg));
1122
1123 if (info->has_clk_mon_regs) {
1124 reg = CLK_MRST_R(reg);
1125 } else if (monbit >= 0) {
1126 reg = CPG_RST_MON;
1127 mask = BIT(monbit);
1128 } else {
1129 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1130 udelay(35);
1131 return 0;
1132 }
1133
1134 return readl_poll_timeout_atomic(priv->base + reg, value,
1135 value & mask, 10, 200);
1136 }
1137
rzg2l_cpg_deassert(struct reset_controller_dev * rcdev,unsigned long id)1138 static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
1139 unsigned long id)
1140 {
1141 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1142 const struct rzg2l_cpg_info *info = priv->info;
1143 unsigned int reg = info->resets[id].off;
1144 u32 mask = BIT(info->resets[id].bit);
1145 s8 monbit = info->resets[id].monbit;
1146 u32 value = (mask << 16) | mask;
1147
1148 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
1149 CLK_RST_R(reg));
1150
1151 writel(value, priv->base + CLK_RST_R(reg));
1152
1153 if (info->has_clk_mon_regs) {
1154 reg = CLK_MRST_R(reg);
1155 } else if (monbit >= 0) {
1156 reg = CPG_RST_MON;
1157 mask = BIT(monbit);
1158 } else {
1159 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1160 udelay(35);
1161 return 0;
1162 }
1163
1164 return readl_poll_timeout_atomic(priv->base + reg, value,
1165 !(value & mask), 10, 200);
1166 }
1167
rzg2l_cpg_reset(struct reset_controller_dev * rcdev,unsigned long id)1168 static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
1169 unsigned long id)
1170 {
1171 int ret;
1172
1173 ret = rzg2l_cpg_assert(rcdev, id);
1174 if (ret)
1175 return ret;
1176
1177 return rzg2l_cpg_deassert(rcdev, id);
1178 }
1179
rzg2l_cpg_status(struct reset_controller_dev * rcdev,unsigned long id)1180 static int rzg2l_cpg_status(struct reset_controller_dev *rcdev,
1181 unsigned long id)
1182 {
1183 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1184 const struct rzg2l_cpg_info *info = priv->info;
1185 s8 monbit = info->resets[id].monbit;
1186 unsigned int reg;
1187 u32 bitmask;
1188
1189 if (info->has_clk_mon_regs) {
1190 reg = CLK_MRST_R(info->resets[id].off);
1191 bitmask = BIT(info->resets[id].bit);
1192 } else if (monbit >= 0) {
1193 reg = CPG_RST_MON;
1194 bitmask = BIT(monbit);
1195 } else {
1196 return -ENOTSUPP;
1197 }
1198
1199 return !!(readl(priv->base + reg) & bitmask);
1200 }
1201
1202 static const struct reset_control_ops rzg2l_cpg_reset_ops = {
1203 .reset = rzg2l_cpg_reset,
1204 .assert = rzg2l_cpg_assert,
1205 .deassert = rzg2l_cpg_deassert,
1206 .status = rzg2l_cpg_status,
1207 };
1208
rzg2l_cpg_reset_xlate(struct reset_controller_dev * rcdev,const struct of_phandle_args * reset_spec)1209 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev,
1210 const struct of_phandle_args *reset_spec)
1211 {
1212 struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
1213 const struct rzg2l_cpg_info *info = priv->info;
1214 unsigned int id = reset_spec->args[0];
1215
1216 if (id >= rcdev->nr_resets || !info->resets[id].off) {
1217 dev_err(rcdev->dev, "Invalid reset index %u\n", id);
1218 return -EINVAL;
1219 }
1220
1221 return id;
1222 }
1223
rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv * priv)1224 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv)
1225 {
1226 priv->rcdev.ops = &rzg2l_cpg_reset_ops;
1227 priv->rcdev.of_node = priv->dev->of_node;
1228 priv->rcdev.dev = priv->dev;
1229 priv->rcdev.of_reset_n_cells = 1;
1230 priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate;
1231 priv->rcdev.nr_resets = priv->num_resets;
1232
1233 return devm_reset_controller_register(priv->dev, &priv->rcdev);
1234 }
1235
rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv * priv,const struct of_phandle_args * clkspec)1236 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv *priv,
1237 const struct of_phandle_args *clkspec)
1238 {
1239 const struct rzg2l_cpg_info *info = priv->info;
1240 unsigned int id;
1241 unsigned int i;
1242
1243 if (clkspec->args_count != 2)
1244 return false;
1245
1246 if (clkspec->args[0] != CPG_MOD)
1247 return false;
1248
1249 id = clkspec->args[1] + info->num_total_core_clks;
1250 for (i = 0; i < info->num_no_pm_mod_clks; i++) {
1251 if (info->no_pm_mod_clks[i] == id)
1252 return false;
1253 }
1254
1255 return true;
1256 }
1257
rzg2l_cpg_attach_dev(struct generic_pm_domain * domain,struct device * dev)1258 static int rzg2l_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
1259 {
1260 struct rzg2l_cpg_priv *priv = container_of(domain, struct rzg2l_cpg_priv, genpd);
1261 struct device_node *np = dev->of_node;
1262 struct of_phandle_args clkspec;
1263 bool once = true;
1264 struct clk *clk;
1265 int error;
1266 int i = 0;
1267
1268 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
1269 &clkspec)) {
1270 if (rzg2l_cpg_is_pm_clk(priv, &clkspec)) {
1271 if (once) {
1272 once = false;
1273 error = pm_clk_create(dev);
1274 if (error) {
1275 of_node_put(clkspec.np);
1276 goto err;
1277 }
1278 }
1279 clk = of_clk_get_from_provider(&clkspec);
1280 of_node_put(clkspec.np);
1281 if (IS_ERR(clk)) {
1282 error = PTR_ERR(clk);
1283 goto fail_destroy;
1284 }
1285
1286 error = pm_clk_add_clk(dev, clk);
1287 if (error) {
1288 dev_err(dev, "pm_clk_add_clk failed %d\n",
1289 error);
1290 goto fail_put;
1291 }
1292 } else {
1293 of_node_put(clkspec.np);
1294 }
1295 i++;
1296 }
1297
1298 return 0;
1299
1300 fail_put:
1301 clk_put(clk);
1302
1303 fail_destroy:
1304 pm_clk_destroy(dev);
1305 err:
1306 return error;
1307 }
1308
rzg2l_cpg_detach_dev(struct generic_pm_domain * unused,struct device * dev)1309 static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
1310 {
1311 if (!pm_clk_no_clocks(dev))
1312 pm_clk_destroy(dev);
1313 }
1314
rzg2l_cpg_genpd_remove(void * data)1315 static void rzg2l_cpg_genpd_remove(void *data)
1316 {
1317 pm_genpd_remove(data);
1318 }
1319
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv * priv)1320 static int __init rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv *priv)
1321 {
1322 struct device *dev = priv->dev;
1323 struct device_node *np = dev->of_node;
1324 struct generic_pm_domain *genpd = &priv->genpd;
1325 int ret;
1326
1327 genpd->name = np->name;
1328 genpd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ALWAYS_ON |
1329 GENPD_FLAG_ACTIVE_WAKEUP;
1330 genpd->attach_dev = rzg2l_cpg_attach_dev;
1331 genpd->detach_dev = rzg2l_cpg_detach_dev;
1332 ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
1333 if (ret)
1334 return ret;
1335
1336 ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
1337 if (ret)
1338 return ret;
1339
1340 return of_genpd_add_provider_simple(np, genpd);
1341 }
1342
rzg2l_cpg_probe(struct platform_device * pdev)1343 static int __init rzg2l_cpg_probe(struct platform_device *pdev)
1344 {
1345 struct device *dev = &pdev->dev;
1346 struct device_node *np = dev->of_node;
1347 const struct rzg2l_cpg_info *info;
1348 struct rzg2l_cpg_priv *priv;
1349 unsigned int nclks, i;
1350 struct clk **clks;
1351 int error;
1352
1353 info = of_device_get_match_data(dev);
1354
1355 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
1356 if (!priv)
1357 return -ENOMEM;
1358
1359 priv->dev = dev;
1360 priv->info = info;
1361 spin_lock_init(&priv->rmw_lock);
1362
1363 priv->base = devm_platform_ioremap_resource(pdev, 0);
1364 if (IS_ERR(priv->base))
1365 return PTR_ERR(priv->base);
1366
1367 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
1368 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
1369 if (!clks)
1370 return -ENOMEM;
1371
1372 dev_set_drvdata(dev, priv);
1373 priv->clks = clks;
1374 priv->num_core_clks = info->num_total_core_clks;
1375 priv->num_mod_clks = info->num_hw_mod_clks;
1376 priv->num_resets = info->num_resets;
1377 priv->last_dt_core_clk = info->last_dt_core_clk;
1378
1379 for (i = 0; i < nclks; i++)
1380 clks[i] = ERR_PTR(-ENOENT);
1381
1382 for (i = 0; i < info->num_core_clks; i++)
1383 rzg2l_cpg_register_core_clk(&info->core_clks[i], info, priv);
1384
1385 for (i = 0; i < info->num_mod_clks; i++)
1386 rzg2l_cpg_register_mod_clk(&info->mod_clks[i], info, priv);
1387
1388 error = of_clk_add_provider(np, rzg2l_cpg_clk_src_twocell_get, priv);
1389 if (error)
1390 return error;
1391
1392 error = devm_add_action_or_reset(dev, rzg2l_cpg_del_clk_provider, np);
1393 if (error)
1394 return error;
1395
1396 error = rzg2l_cpg_add_clk_domain(priv);
1397 if (error)
1398 return error;
1399
1400 error = rzg2l_cpg_reset_controller_register(priv);
1401 if (error)
1402 return error;
1403
1404 return 0;
1405 }
1406
1407 static const struct of_device_id rzg2l_cpg_match[] = {
1408 #ifdef CONFIG_CLK_R9A07G043
1409 {
1410 .compatible = "renesas,r9a07g043-cpg",
1411 .data = &r9a07g043_cpg_info,
1412 },
1413 #endif
1414 #ifdef CONFIG_CLK_R9A07G044
1415 {
1416 .compatible = "renesas,r9a07g044-cpg",
1417 .data = &r9a07g044_cpg_info,
1418 },
1419 #endif
1420 #ifdef CONFIG_CLK_R9A07G054
1421 {
1422 .compatible = "renesas,r9a07g054-cpg",
1423 .data = &r9a07g054_cpg_info,
1424 },
1425 #endif
1426 #ifdef CONFIG_CLK_R9A09G011
1427 {
1428 .compatible = "renesas,r9a09g011-cpg",
1429 .data = &r9a09g011_cpg_info,
1430 },
1431 #endif
1432 { /* sentinel */ }
1433 };
1434
1435 static struct platform_driver rzg2l_cpg_driver = {
1436 .driver = {
1437 .name = "rzg2l-cpg",
1438 .of_match_table = rzg2l_cpg_match,
1439 },
1440 };
1441
rzg2l_cpg_init(void)1442 static int __init rzg2l_cpg_init(void)
1443 {
1444 return platform_driver_probe(&rzg2l_cpg_driver, rzg2l_cpg_probe);
1445 }
1446
1447 subsys_initcall(rzg2l_cpg_init);
1448
1449 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");
1450