xref: /openbmc/linux/drivers/clk/clk-stm32mp1.c (revision 8ad00c14)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
4  * Author: Olivier Bideau <olivier.bideau@st.com> for STMicroelectronics.
5  * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/clk-provider.h>
10 #include <linux/delay.h>
11 #include <linux/err.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/platform_device.h>
17 #include <linux/reset-controller.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 
21 #include <dt-bindings/clock/stm32mp1-clks.h>
22 
23 static DEFINE_SPINLOCK(rlock);
24 
25 #define RCC_OCENSETR		0x0C
26 #define RCC_HSICFGR		0x18
27 #define RCC_RDLSICR		0x144
28 #define RCC_PLL1CR		0x80
29 #define RCC_PLL1CFGR1		0x84
30 #define RCC_PLL1CFGR2		0x88
31 #define RCC_PLL2CR		0x94
32 #define RCC_PLL2CFGR1		0x98
33 #define RCC_PLL2CFGR2		0x9C
34 #define RCC_PLL3CR		0x880
35 #define RCC_PLL3CFGR1		0x884
36 #define RCC_PLL3CFGR2		0x888
37 #define RCC_PLL4CR		0x894
38 #define RCC_PLL4CFGR1		0x898
39 #define RCC_PLL4CFGR2		0x89C
40 #define RCC_APB1ENSETR		0xA00
41 #define RCC_APB2ENSETR		0xA08
42 #define RCC_APB3ENSETR		0xA10
43 #define RCC_APB4ENSETR		0x200
44 #define RCC_APB5ENSETR		0x208
45 #define RCC_AHB2ENSETR		0xA18
46 #define RCC_AHB3ENSETR		0xA20
47 #define RCC_AHB4ENSETR		0xA28
48 #define RCC_AHB5ENSETR		0x210
49 #define RCC_AHB6ENSETR		0x218
50 #define RCC_AHB6LPENSETR	0x318
51 #define RCC_RCK12SELR		0x28
52 #define RCC_RCK3SELR		0x820
53 #define RCC_RCK4SELR		0x824
54 #define RCC_MPCKSELR		0x20
55 #define RCC_ASSCKSELR		0x24
56 #define RCC_MSSCKSELR		0x48
57 #define RCC_SPI6CKSELR		0xC4
58 #define RCC_SDMMC12CKSELR	0x8F4
59 #define RCC_SDMMC3CKSELR	0x8F8
60 #define RCC_FMCCKSELR		0x904
61 #define RCC_I2C46CKSELR		0xC0
62 #define RCC_I2C12CKSELR		0x8C0
63 #define RCC_I2C35CKSELR		0x8C4
64 #define RCC_UART1CKSELR		0xC8
65 #define RCC_QSPICKSELR		0x900
66 #define RCC_ETHCKSELR		0x8FC
67 #define RCC_RNG1CKSELR		0xCC
68 #define RCC_RNG2CKSELR		0x920
69 #define RCC_GPUCKSELR		0x938
70 #define RCC_USBCKSELR		0x91C
71 #define RCC_STGENCKSELR		0xD4
72 #define RCC_SPDIFCKSELR		0x914
73 #define RCC_SPI2S1CKSELR	0x8D8
74 #define RCC_SPI2S23CKSELR	0x8DC
75 #define RCC_SPI2S45CKSELR	0x8E0
76 #define RCC_CECCKSELR		0x918
77 #define RCC_LPTIM1CKSELR	0x934
78 #define RCC_LPTIM23CKSELR	0x930
79 #define RCC_LPTIM45CKSELR	0x92C
80 #define RCC_UART24CKSELR	0x8E8
81 #define RCC_UART35CKSELR	0x8EC
82 #define RCC_UART6CKSELR		0x8E4
83 #define RCC_UART78CKSELR	0x8F0
84 #define RCC_FDCANCKSELR		0x90C
85 #define RCC_SAI1CKSELR		0x8C8
86 #define RCC_SAI2CKSELR		0x8CC
87 #define RCC_SAI3CKSELR		0x8D0
88 #define RCC_SAI4CKSELR		0x8D4
89 #define RCC_ADCCKSELR		0x928
90 #define RCC_MPCKDIVR		0x2C
91 #define RCC_DSICKSELR		0x924
92 #define RCC_CPERCKSELR		0xD0
93 #define RCC_MCO1CFGR		0x800
94 #define RCC_MCO2CFGR		0x804
95 #define RCC_BDCR		0x140
96 #define RCC_AXIDIVR		0x30
97 #define RCC_MCUDIVR		0x830
98 #define RCC_APB1DIVR		0x834
99 #define RCC_APB2DIVR		0x838
100 #define RCC_APB3DIVR		0x83C
101 #define RCC_APB4DIVR		0x3C
102 #define RCC_APB5DIVR		0x40
103 #define RCC_TIMG1PRER		0x828
104 #define RCC_TIMG2PRER		0x82C
105 #define RCC_RTCDIVR		0x44
106 #define RCC_DBGCFGR		0x80C
107 
108 #define RCC_CLR	0x4
109 
110 static const char * const ref12_parents[] = {
111 	"ck_hsi", "ck_hse"
112 };
113 
114 static const char * const ref3_parents[] = {
115 	"ck_hsi", "ck_hse", "ck_csi"
116 };
117 
118 static const char * const ref4_parents[] = {
119 	"ck_hsi", "ck_hse", "ck_csi"
120 };
121 
122 static const char * const cpu_src[] = {
123 	"ck_hsi", "ck_hse", "pll1_p"
124 };
125 
126 static const char * const axi_src[] = {
127 	"ck_hsi", "ck_hse", "pll2_p"
128 };
129 
130 static const char * const per_src[] = {
131 	"ck_hsi", "ck_csi", "ck_hse"
132 };
133 
134 static const char * const mcu_src[] = {
135 	"ck_hsi", "ck_hse", "ck_csi", "pll3_p"
136 };
137 
138 static const char * const sdmmc12_src[] = {
139 	"ck_axi", "pll3_r", "pll4_p", "ck_hsi"
140 };
141 
142 static const char * const sdmmc3_src[] = {
143 	"ck_mcu", "pll3_r", "pll4_p", "ck_hsi"
144 };
145 
146 static const char * const fmc_src[] = {
147 	"ck_axi", "pll3_r", "pll4_p", "ck_per"
148 };
149 
150 static const char * const qspi_src[] = {
151 	"ck_axi", "pll3_r", "pll4_p", "ck_per"
152 };
153 
154 static const char * const eth_src[] = {
155 	"pll4_p", "pll3_q"
156 };
157 
158 static const struct clk_parent_data ethrx_src[] = {
159 	{ .name = "ethck_k", .fw_name = "ETH_RX_CLK/ETH_REF_CLK" },
160 };
161 
162 static const char * const rng_src[] = {
163 	"ck_csi", "pll4_r", "ck_lse", "ck_lsi"
164 };
165 
166 static const char * const usbphy_src[] = {
167 	"ck_hse", "pll4_r", "clk-hse-div2"
168 };
169 
170 static const char * const usbo_src[] = {
171 	"pll4_r", "ck_usbo_48m"
172 };
173 
174 static const char * const stgen_src[] = {
175 	"ck_hsi", "ck_hse"
176 };
177 
178 static const char * const spdif_src[] = {
179 	"pll4_p", "pll3_q", "ck_hsi"
180 };
181 
182 static const char * const spi123_src[] = {
183 	"pll4_p", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
184 };
185 
186 static const char * const spi45_src[] = {
187 	"pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
188 };
189 
190 static const char * const spi6_src[] = {
191 	"pclk5", "pll4_q", "ck_hsi", "ck_csi", "ck_hse", "pll3_q"
192 };
193 
194 static const char * const cec_src[] = {
195 	"ck_lse", "ck_lsi", "ck_csi"
196 };
197 
198 static const char * const i2c12_src[] = {
199 	"pclk1", "pll4_r", "ck_hsi", "ck_csi"
200 };
201 
202 static const char * const i2c35_src[] = {
203 	"pclk1", "pll4_r", "ck_hsi", "ck_csi"
204 };
205 
206 static const char * const i2c46_src[] = {
207 	"pclk5", "pll3_q", "ck_hsi", "ck_csi"
208 };
209 
210 static const char * const lptim1_src[] = {
211 	"pclk1", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
212 };
213 
214 static const char * const lptim23_src[] = {
215 	"pclk3", "pll4_q", "ck_per", "ck_lse", "ck_lsi"
216 };
217 
218 static const char * const lptim45_src[] = {
219 	"pclk3", "pll4_p", "pll3_q", "ck_lse", "ck_lsi", "ck_per"
220 };
221 
222 static const char * const usart1_src[] = {
223 	"pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse"
224 };
225 
226 static const char * const usart234578_src[] = {
227 	"pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
228 };
229 
230 static const char * const usart6_src[] = {
231 	"pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
232 };
233 
234 static const char * const fdcan_src[] = {
235 	"ck_hse", "pll3_q", "pll4_q", "pll4_r"
236 };
237 
238 static const char * const sai_src[] = {
239 	"pll4_q", "pll3_q", "i2s_ckin", "ck_per", "pll3_r"
240 };
241 
242 static const char * const sai2_src[] = {
243 	"pll4_q", "pll3_q", "i2s_ckin", "ck_per", "spdif_ck_symb", "pll3_r"
244 };
245 
246 static const char * const adc12_src[] = {
247 	"pll4_r", "ck_per", "pll3_q"
248 };
249 
250 static const char * const dsi_src[] = {
251 	"ck_dsi_phy", "pll4_p"
252 };
253 
254 static const char * const rtc_src[] = {
255 	"off", "ck_lse", "ck_lsi", "ck_hse"
256 };
257 
258 static const char * const mco1_src[] = {
259 	"ck_hsi", "ck_hse", "ck_csi", "ck_lsi", "ck_lse"
260 };
261 
262 static const char * const mco2_src[] = {
263 	"ck_mpu", "ck_axi", "ck_mcu", "pll4_p", "ck_hse", "ck_hsi"
264 };
265 
266 static const char * const ck_trace_src[] = {
267 	"ck_axi"
268 };
269 
270 static const struct clk_div_table axi_div_table[] = {
271 	{ 0, 1 }, { 1, 2 }, { 2, 3 }, { 3, 4 },
272 	{ 4, 4 }, { 5, 4 }, { 6, 4 }, { 7, 4 },
273 	{ 0 },
274 };
275 
276 static const struct clk_div_table mcu_div_table[] = {
277 	{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
278 	{ 4, 16 }, { 5, 32 }, { 6, 64 }, { 7, 128 },
279 	{ 8, 256 }, { 9, 512 }, { 10, 512}, { 11, 512 },
280 	{ 12, 512 }, { 13, 512 }, { 14, 512}, { 15, 512 },
281 	{ 0 },
282 };
283 
284 static const struct clk_div_table apb_div_table[] = {
285 	{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
286 	{ 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
287 	{ 0 },
288 };
289 
290 static const struct clk_div_table ck_trace_div_table[] = {
291 	{ 0, 1 }, { 1, 2 }, { 2, 4 }, { 3, 8 },
292 	{ 4, 16 }, { 5, 16 }, { 6, 16 }, { 7, 16 },
293 	{ 0 },
294 };
295 
296 #define MAX_MUX_CLK 2
297 
298 struct stm32_mmux {
299 	u8 nbr_clk;
300 	struct clk_hw *hws[MAX_MUX_CLK];
301 };
302 
303 struct stm32_clk_mmux {
304 	struct clk_mux mux;
305 	struct stm32_mmux *mmux;
306 };
307 
308 struct stm32_mgate {
309 	u8 nbr_clk;
310 	u32 flag;
311 };
312 
313 struct stm32_clk_mgate {
314 	struct clk_gate gate;
315 	struct stm32_mgate *mgate;
316 	u32 mask;
317 };
318 
319 struct clock_config {
320 	u32 id;
321 	const char *name;
322 	const char *parent_name;
323 	const char * const *parent_names;
324 	const struct clk_parent_data *parent_data;
325 	int num_parents;
326 	unsigned long flags;
327 	void *cfg;
328 	struct clk_hw * (*func)(struct device *dev,
329 				struct clk_hw_onecell_data *clk_data,
330 				void __iomem *base, spinlock_t *lock,
331 				const struct clock_config *cfg);
332 };
333 
334 #define NO_ID ~0
335 
336 struct gate_cfg {
337 	u32 reg_off;
338 	u8 bit_idx;
339 	u8 gate_flags;
340 };
341 
342 struct fixed_factor_cfg {
343 	unsigned int mult;
344 	unsigned int div;
345 };
346 
347 struct div_cfg {
348 	u32 reg_off;
349 	u8 shift;
350 	u8 width;
351 	u8 div_flags;
352 	const struct clk_div_table *table;
353 };
354 
355 struct mux_cfg {
356 	u32 reg_off;
357 	u8 shift;
358 	u8 width;
359 	u8 mux_flags;
360 	u32 *table;
361 };
362 
363 struct stm32_gate_cfg {
364 	struct gate_cfg		*gate;
365 	struct stm32_mgate	*mgate;
366 	const struct clk_ops	*ops;
367 };
368 
369 struct stm32_div_cfg {
370 	struct div_cfg		*div;
371 	const struct clk_ops	*ops;
372 };
373 
374 struct stm32_mux_cfg {
375 	struct mux_cfg		*mux;
376 	struct stm32_mmux	*mmux;
377 	const struct clk_ops	*ops;
378 };
379 
380 /* STM32 Composite clock */
381 struct stm32_composite_cfg {
382 	const struct stm32_gate_cfg	*gate;
383 	const struct stm32_div_cfg	*div;
384 	const struct stm32_mux_cfg	*mux;
385 };
386 
387 static struct clk_hw *
_clk_hw_register_gate(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)388 _clk_hw_register_gate(struct device *dev,
389 		      struct clk_hw_onecell_data *clk_data,
390 		      void __iomem *base, spinlock_t *lock,
391 		      const struct clock_config *cfg)
392 {
393 	struct gate_cfg *gate_cfg = cfg->cfg;
394 
395 	return clk_hw_register_gate(dev,
396 				    cfg->name,
397 				    cfg->parent_name,
398 				    cfg->flags,
399 				    gate_cfg->reg_off + base,
400 				    gate_cfg->bit_idx,
401 				    gate_cfg->gate_flags,
402 				    lock);
403 }
404 
405 static struct clk_hw *
_clk_hw_register_fixed_factor(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)406 _clk_hw_register_fixed_factor(struct device *dev,
407 			      struct clk_hw_onecell_data *clk_data,
408 			      void __iomem *base, spinlock_t *lock,
409 			      const struct clock_config *cfg)
410 {
411 	struct fixed_factor_cfg *ff_cfg = cfg->cfg;
412 
413 	return clk_hw_register_fixed_factor(dev, cfg->name, cfg->parent_name,
414 					    cfg->flags, ff_cfg->mult,
415 					    ff_cfg->div);
416 }
417 
418 static struct clk_hw *
_clk_hw_register_divider_table(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)419 _clk_hw_register_divider_table(struct device *dev,
420 			       struct clk_hw_onecell_data *clk_data,
421 			       void __iomem *base, spinlock_t *lock,
422 			       const struct clock_config *cfg)
423 {
424 	struct div_cfg *div_cfg = cfg->cfg;
425 
426 	return clk_hw_register_divider_table(dev,
427 					     cfg->name,
428 					     cfg->parent_name,
429 					     cfg->flags,
430 					     div_cfg->reg_off + base,
431 					     div_cfg->shift,
432 					     div_cfg->width,
433 					     div_cfg->div_flags,
434 					     div_cfg->table,
435 					     lock);
436 }
437 
438 static struct clk_hw *
_clk_hw_register_mux(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)439 _clk_hw_register_mux(struct device *dev,
440 		     struct clk_hw_onecell_data *clk_data,
441 		     void __iomem *base, spinlock_t *lock,
442 		     const struct clock_config *cfg)
443 {
444 	struct mux_cfg *mux_cfg = cfg->cfg;
445 
446 	return clk_hw_register_mux(dev, cfg->name, cfg->parent_names,
447 				   cfg->num_parents, cfg->flags,
448 				   mux_cfg->reg_off + base, mux_cfg->shift,
449 				   mux_cfg->width, mux_cfg->mux_flags, lock);
450 }
451 
452 /* MP1 Gate clock with set & clear registers */
453 
mp1_gate_clk_enable(struct clk_hw * hw)454 static int mp1_gate_clk_enable(struct clk_hw *hw)
455 {
456 	if (!clk_gate_ops.is_enabled(hw))
457 		clk_gate_ops.enable(hw);
458 
459 	return 0;
460 }
461 
mp1_gate_clk_disable(struct clk_hw * hw)462 static void mp1_gate_clk_disable(struct clk_hw *hw)
463 {
464 	struct clk_gate *gate = to_clk_gate(hw);
465 	unsigned long flags = 0;
466 
467 	if (clk_gate_ops.is_enabled(hw)) {
468 		spin_lock_irqsave(gate->lock, flags);
469 		writel_relaxed(BIT(gate->bit_idx), gate->reg + RCC_CLR);
470 		spin_unlock_irqrestore(gate->lock, flags);
471 	}
472 }
473 
474 static const struct clk_ops mp1_gate_clk_ops = {
475 	.enable		= mp1_gate_clk_enable,
476 	.disable	= mp1_gate_clk_disable,
477 	.is_enabled	= clk_gate_is_enabled,
478 };
479 
_get_stm32_mux(struct device * dev,void __iomem * base,const struct stm32_mux_cfg * cfg,spinlock_t * lock)480 static struct clk_hw *_get_stm32_mux(struct device *dev, void __iomem *base,
481 				     const struct stm32_mux_cfg *cfg,
482 				     spinlock_t *lock)
483 {
484 	struct stm32_clk_mmux *mmux;
485 	struct clk_mux *mux;
486 	struct clk_hw *mux_hw;
487 
488 	if (cfg->mmux) {
489 		mmux = devm_kzalloc(dev, sizeof(*mmux), GFP_KERNEL);
490 		if (!mmux)
491 			return ERR_PTR(-ENOMEM);
492 
493 		mmux->mux.reg = cfg->mux->reg_off + base;
494 		mmux->mux.shift = cfg->mux->shift;
495 		mmux->mux.mask = (1 << cfg->mux->width) - 1;
496 		mmux->mux.flags = cfg->mux->mux_flags;
497 		mmux->mux.table = cfg->mux->table;
498 		mmux->mux.lock = lock;
499 		mmux->mmux = cfg->mmux;
500 		mux_hw = &mmux->mux.hw;
501 		cfg->mmux->hws[cfg->mmux->nbr_clk++] = mux_hw;
502 
503 	} else {
504 		mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
505 		if (!mux)
506 			return ERR_PTR(-ENOMEM);
507 
508 		mux->reg = cfg->mux->reg_off + base;
509 		mux->shift = cfg->mux->shift;
510 		mux->mask = (1 << cfg->mux->width) - 1;
511 		mux->flags = cfg->mux->mux_flags;
512 		mux->table = cfg->mux->table;
513 		mux->lock = lock;
514 		mux_hw = &mux->hw;
515 	}
516 
517 	return mux_hw;
518 }
519 
_get_stm32_div(struct device * dev,void __iomem * base,const struct stm32_div_cfg * cfg,spinlock_t * lock)520 static struct clk_hw *_get_stm32_div(struct device *dev, void __iomem *base,
521 				     const struct stm32_div_cfg *cfg,
522 				     spinlock_t *lock)
523 {
524 	struct clk_divider *div;
525 
526 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
527 
528 	if (!div)
529 		return ERR_PTR(-ENOMEM);
530 
531 	div->reg = cfg->div->reg_off + base;
532 	div->shift = cfg->div->shift;
533 	div->width = cfg->div->width;
534 	div->flags = cfg->div->div_flags;
535 	div->table = cfg->div->table;
536 	div->lock = lock;
537 
538 	return &div->hw;
539 }
540 
_get_stm32_gate(struct device * dev,void __iomem * base,const struct stm32_gate_cfg * cfg,spinlock_t * lock)541 static struct clk_hw *_get_stm32_gate(struct device *dev, void __iomem *base,
542 				      const struct stm32_gate_cfg *cfg,
543 				      spinlock_t *lock)
544 {
545 	struct stm32_clk_mgate *mgate;
546 	struct clk_gate *gate;
547 	struct clk_hw *gate_hw;
548 
549 	if (cfg->mgate) {
550 		mgate = devm_kzalloc(dev, sizeof(*mgate), GFP_KERNEL);
551 		if (!mgate)
552 			return ERR_PTR(-ENOMEM);
553 
554 		mgate->gate.reg = cfg->gate->reg_off + base;
555 		mgate->gate.bit_idx = cfg->gate->bit_idx;
556 		mgate->gate.flags = cfg->gate->gate_flags;
557 		mgate->gate.lock = lock;
558 		mgate->mask = BIT(cfg->mgate->nbr_clk++);
559 
560 		mgate->mgate = cfg->mgate;
561 
562 		gate_hw = &mgate->gate.hw;
563 
564 	} else {
565 		gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
566 		if (!gate)
567 			return ERR_PTR(-ENOMEM);
568 
569 		gate->reg = cfg->gate->reg_off + base;
570 		gate->bit_idx = cfg->gate->bit_idx;
571 		gate->flags = cfg->gate->gate_flags;
572 		gate->lock = lock;
573 
574 		gate_hw = &gate->hw;
575 	}
576 
577 	return gate_hw;
578 }
579 
580 static struct clk_hw *
clk_stm32_register_gate_ops(struct device * dev,const char * name,const char * parent_name,const struct clk_parent_data * parent_data,unsigned long flags,void __iomem * base,const struct stm32_gate_cfg * cfg,spinlock_t * lock)581 clk_stm32_register_gate_ops(struct device *dev,
582 			    const char *name,
583 			    const char *parent_name,
584 			    const struct clk_parent_data *parent_data,
585 			    unsigned long flags,
586 			    void __iomem *base,
587 			    const struct stm32_gate_cfg *cfg,
588 			    spinlock_t *lock)
589 {
590 	struct clk_init_data init = { NULL };
591 	struct clk_hw *hw;
592 	int ret;
593 
594 	init.name = name;
595 	if (parent_name)
596 		init.parent_names = &parent_name;
597 	if (parent_data)
598 		init.parent_data = parent_data;
599 	init.num_parents = 1;
600 	init.flags = flags;
601 
602 	init.ops = &clk_gate_ops;
603 
604 	if (cfg->ops)
605 		init.ops = cfg->ops;
606 
607 	hw = _get_stm32_gate(dev, base, cfg, lock);
608 	if (IS_ERR(hw))
609 		return ERR_PTR(-ENOMEM);
610 
611 	hw->init = &init;
612 
613 	ret = clk_hw_register(dev, hw);
614 	if (ret)
615 		hw = ERR_PTR(ret);
616 
617 	return hw;
618 }
619 
620 static struct clk_hw *
clk_stm32_register_composite(struct device * dev,const char * name,const char * const * parent_names,const struct clk_parent_data * parent_data,int num_parents,void __iomem * base,const struct stm32_composite_cfg * cfg,unsigned long flags,spinlock_t * lock)621 clk_stm32_register_composite(struct device *dev,
622 			     const char *name, const char * const *parent_names,
623 			     const struct clk_parent_data *parent_data,
624 			     int num_parents, void __iomem *base,
625 			     const struct stm32_composite_cfg *cfg,
626 			     unsigned long flags, spinlock_t *lock)
627 {
628 	const struct clk_ops *mux_ops, *div_ops, *gate_ops;
629 	struct clk_hw *mux_hw, *div_hw, *gate_hw;
630 
631 	mux_hw = NULL;
632 	div_hw = NULL;
633 	gate_hw = NULL;
634 	mux_ops = NULL;
635 	div_ops = NULL;
636 	gate_ops = NULL;
637 
638 	if (cfg->mux) {
639 		mux_hw = _get_stm32_mux(dev, base, cfg->mux, lock);
640 
641 		if (!IS_ERR(mux_hw)) {
642 			mux_ops = &clk_mux_ops;
643 
644 			if (cfg->mux->ops)
645 				mux_ops = cfg->mux->ops;
646 		}
647 	}
648 
649 	if (cfg->div) {
650 		div_hw = _get_stm32_div(dev, base, cfg->div, lock);
651 
652 		if (!IS_ERR(div_hw)) {
653 			div_ops = &clk_divider_ops;
654 
655 			if (cfg->div->ops)
656 				div_ops = cfg->div->ops;
657 		}
658 	}
659 
660 	if (cfg->gate) {
661 		gate_hw = _get_stm32_gate(dev, base, cfg->gate, lock);
662 
663 		if (!IS_ERR(gate_hw)) {
664 			gate_ops = &clk_gate_ops;
665 
666 			if (cfg->gate->ops)
667 				gate_ops = cfg->gate->ops;
668 		}
669 	}
670 
671 	return clk_hw_register_composite(dev, name, parent_names, num_parents,
672 				       mux_hw, mux_ops, div_hw, div_ops,
673 				       gate_hw, gate_ops, flags);
674 }
675 
676 #define to_clk_mgate(_gate) container_of(_gate, struct stm32_clk_mgate, gate)
677 
mp1_mgate_clk_enable(struct clk_hw * hw)678 static int mp1_mgate_clk_enable(struct clk_hw *hw)
679 {
680 	struct clk_gate *gate = to_clk_gate(hw);
681 	struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
682 
683 	clk_mgate->mgate->flag |= clk_mgate->mask;
684 
685 	mp1_gate_clk_enable(hw);
686 
687 	return  0;
688 }
689 
mp1_mgate_clk_disable(struct clk_hw * hw)690 static void mp1_mgate_clk_disable(struct clk_hw *hw)
691 {
692 	struct clk_gate *gate = to_clk_gate(hw);
693 	struct stm32_clk_mgate *clk_mgate = to_clk_mgate(gate);
694 
695 	clk_mgate->mgate->flag &= ~clk_mgate->mask;
696 
697 	if (clk_mgate->mgate->flag == 0)
698 		mp1_gate_clk_disable(hw);
699 }
700 
701 static const struct clk_ops mp1_mgate_clk_ops = {
702 	.enable		= mp1_mgate_clk_enable,
703 	.disable	= mp1_mgate_clk_disable,
704 	.is_enabled	= clk_gate_is_enabled,
705 
706 };
707 
708 #define to_clk_mmux(_mux) container_of(_mux, struct stm32_clk_mmux, mux)
709 
clk_mmux_get_parent(struct clk_hw * hw)710 static u8 clk_mmux_get_parent(struct clk_hw *hw)
711 {
712 	return clk_mux_ops.get_parent(hw);
713 }
714 
clk_mmux_set_parent(struct clk_hw * hw,u8 index)715 static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
716 {
717 	struct clk_mux *mux = to_clk_mux(hw);
718 	struct stm32_clk_mmux *clk_mmux = to_clk_mmux(mux);
719 	struct clk_hw *hwp;
720 	int ret, n;
721 
722 	ret = clk_mux_ops.set_parent(hw, index);
723 	if (ret)
724 		return ret;
725 
726 	hwp = clk_hw_get_parent(hw);
727 
728 	for (n = 0; n < clk_mmux->mmux->nbr_clk; n++)
729 		if (clk_mmux->mmux->hws[n] != hw)
730 			clk_hw_reparent(clk_mmux->mmux->hws[n], hwp);
731 
732 	return 0;
733 }
734 
735 static const struct clk_ops clk_mmux_ops = {
736 	.get_parent	= clk_mmux_get_parent,
737 	.set_parent	= clk_mmux_set_parent,
738 	.determine_rate	= __clk_mux_determine_rate,
739 };
740 
741 /* STM32 PLL */
742 struct stm32_pll_obj {
743 	/* lock pll enable/disable registers */
744 	spinlock_t *lock;
745 	void __iomem *reg;
746 	struct clk_hw hw;
747 	struct clk_mux mux;
748 };
749 
750 #define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
751 
752 #define PLL_ON		BIT(0)
753 #define PLL_RDY		BIT(1)
754 #define DIVN_MASK	0x1FF
755 #define DIVM_MASK	0x3F
756 #define DIVM_SHIFT	16
757 #define DIVN_SHIFT	0
758 #define FRAC_OFFSET	0xC
759 #define FRAC_MASK	0x1FFF
760 #define FRAC_SHIFT	3
761 #define FRACLE		BIT(16)
762 #define PLL_MUX_SHIFT	0
763 #define PLL_MUX_MASK	3
764 
__pll_is_enabled(struct clk_hw * hw)765 static int __pll_is_enabled(struct clk_hw *hw)
766 {
767 	struct stm32_pll_obj *clk_elem = to_pll(hw);
768 
769 	return readl_relaxed(clk_elem->reg) & PLL_ON;
770 }
771 
772 #define TIMEOUT 5
773 
pll_enable(struct clk_hw * hw)774 static int pll_enable(struct clk_hw *hw)
775 {
776 	struct stm32_pll_obj *clk_elem = to_pll(hw);
777 	u32 reg;
778 	unsigned long flags = 0;
779 	unsigned int timeout = TIMEOUT;
780 	int bit_status = 0;
781 
782 	spin_lock_irqsave(clk_elem->lock, flags);
783 
784 	if (__pll_is_enabled(hw))
785 		goto unlock;
786 
787 	reg = readl_relaxed(clk_elem->reg);
788 	reg |= PLL_ON;
789 	writel_relaxed(reg, clk_elem->reg);
790 
791 	/* We can't use readl_poll_timeout() because we can be blocked if
792 	 * someone enables this clock before clocksource changes.
793 	 * Only jiffies counter is available. Jiffies are incremented by
794 	 * interruptions and enable op does not allow to be interrupted.
795 	 */
796 	do {
797 		bit_status = !(readl_relaxed(clk_elem->reg) & PLL_RDY);
798 
799 		if (bit_status)
800 			udelay(120);
801 
802 	} while (bit_status && --timeout);
803 
804 unlock:
805 	spin_unlock_irqrestore(clk_elem->lock, flags);
806 
807 	return bit_status;
808 }
809 
pll_disable(struct clk_hw * hw)810 static void pll_disable(struct clk_hw *hw)
811 {
812 	struct stm32_pll_obj *clk_elem = to_pll(hw);
813 	u32 reg;
814 	unsigned long flags = 0;
815 
816 	spin_lock_irqsave(clk_elem->lock, flags);
817 
818 	reg = readl_relaxed(clk_elem->reg);
819 	reg &= ~PLL_ON;
820 	writel_relaxed(reg, clk_elem->reg);
821 
822 	spin_unlock_irqrestore(clk_elem->lock, flags);
823 }
824 
pll_frac_val(struct clk_hw * hw)825 static u32 pll_frac_val(struct clk_hw *hw)
826 {
827 	struct stm32_pll_obj *clk_elem = to_pll(hw);
828 	u32 reg, frac = 0;
829 
830 	reg = readl_relaxed(clk_elem->reg + FRAC_OFFSET);
831 	if (reg & FRACLE)
832 		frac = (reg >> FRAC_SHIFT) & FRAC_MASK;
833 
834 	return frac;
835 }
836 
pll_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)837 static unsigned long pll_recalc_rate(struct clk_hw *hw,
838 				     unsigned long parent_rate)
839 {
840 	struct stm32_pll_obj *clk_elem = to_pll(hw);
841 	u32 reg;
842 	u32 frac, divm, divn;
843 	u64 rate, rate_frac = 0;
844 
845 	reg = readl_relaxed(clk_elem->reg + 4);
846 
847 	divm = ((reg >> DIVM_SHIFT) & DIVM_MASK) + 1;
848 	divn = ((reg >> DIVN_SHIFT) & DIVN_MASK) + 1;
849 	rate = (u64)parent_rate * divn;
850 
851 	do_div(rate, divm);
852 
853 	frac = pll_frac_val(hw);
854 	if (frac) {
855 		rate_frac = (u64)parent_rate * (u64)frac;
856 		do_div(rate_frac, (divm * 8192));
857 	}
858 
859 	return rate + rate_frac;
860 }
861 
pll_is_enabled(struct clk_hw * hw)862 static int pll_is_enabled(struct clk_hw *hw)
863 {
864 	struct stm32_pll_obj *clk_elem = to_pll(hw);
865 	unsigned long flags = 0;
866 	int ret;
867 
868 	spin_lock_irqsave(clk_elem->lock, flags);
869 	ret = __pll_is_enabled(hw);
870 	spin_unlock_irqrestore(clk_elem->lock, flags);
871 
872 	return ret;
873 }
874 
pll_get_parent(struct clk_hw * hw)875 static u8 pll_get_parent(struct clk_hw *hw)
876 {
877 	struct stm32_pll_obj *clk_elem = to_pll(hw);
878 	struct clk_hw *mux_hw = &clk_elem->mux.hw;
879 
880 	__clk_hw_set_clk(mux_hw, hw);
881 
882 	return clk_mux_ops.get_parent(mux_hw);
883 }
884 
885 static const struct clk_ops pll_ops = {
886 	.enable		= pll_enable,
887 	.disable	= pll_disable,
888 	.recalc_rate	= pll_recalc_rate,
889 	.is_enabled	= pll_is_enabled,
890 	.get_parent	= pll_get_parent,
891 };
892 
clk_register_pll(struct device * dev,const char * name,const char * const * parent_names,int num_parents,void __iomem * reg,void __iomem * mux_reg,unsigned long flags,spinlock_t * lock)893 static struct clk_hw *clk_register_pll(struct device *dev, const char *name,
894 				       const char * const *parent_names,
895 				       int num_parents,
896 				       void __iomem *reg,
897 				       void __iomem *mux_reg,
898 				       unsigned long flags,
899 				       spinlock_t *lock)
900 {
901 	struct stm32_pll_obj *element;
902 	struct clk_init_data init;
903 	struct clk_hw *hw;
904 	int err;
905 
906 	element = devm_kzalloc(dev, sizeof(*element), GFP_KERNEL);
907 	if (!element)
908 		return ERR_PTR(-ENOMEM);
909 
910 	init.name = name;
911 	init.ops = &pll_ops;
912 	init.flags = flags;
913 	init.parent_names = parent_names;
914 	init.num_parents = num_parents;
915 
916 	element->mux.lock = lock;
917 	element->mux.reg =  mux_reg;
918 	element->mux.shift = PLL_MUX_SHIFT;
919 	element->mux.mask =  PLL_MUX_MASK;
920 	element->mux.flags =  CLK_MUX_READ_ONLY;
921 	element->mux.reg =  mux_reg;
922 
923 	element->hw.init = &init;
924 	element->reg = reg;
925 	element->lock = lock;
926 
927 	hw = &element->hw;
928 	err = clk_hw_register(dev, hw);
929 
930 	if (err)
931 		return ERR_PTR(err);
932 
933 	return hw;
934 }
935 
936 /* Kernel Timer */
937 struct timer_cker {
938 	/* lock the kernel output divider register */
939 	spinlock_t *lock;
940 	void __iomem *apbdiv;
941 	void __iomem *timpre;
942 	struct clk_hw hw;
943 };
944 
945 #define to_timer_cker(_hw) container_of(_hw, struct timer_cker, hw)
946 
947 #define APB_DIV_MASK 0x07
948 #define TIM_PRE_MASK 0x01
949 
__bestmult(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)950 static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
951 				unsigned long parent_rate)
952 {
953 	struct timer_cker *tim_ker = to_timer_cker(hw);
954 	u32 prescaler;
955 	unsigned int mult = 0;
956 
957 	prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
958 	if (prescaler < 2)
959 		return 1;
960 
961 	mult = 2;
962 
963 	if (rate / parent_rate >= 4)
964 		mult = 4;
965 
966 	return mult;
967 }
968 
timer_ker_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)969 static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
970 				 unsigned long *parent_rate)
971 {
972 	unsigned long factor = __bestmult(hw, rate, *parent_rate);
973 
974 	return *parent_rate * factor;
975 }
976 
timer_ker_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)977 static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
978 			      unsigned long parent_rate)
979 {
980 	struct timer_cker *tim_ker = to_timer_cker(hw);
981 	unsigned long flags = 0;
982 	unsigned long factor = __bestmult(hw, rate, parent_rate);
983 	int ret = 0;
984 
985 	spin_lock_irqsave(tim_ker->lock, flags);
986 
987 	switch (factor) {
988 	case 1:
989 		break;
990 	case 2:
991 		writel_relaxed(0, tim_ker->timpre);
992 		break;
993 	case 4:
994 		writel_relaxed(1, tim_ker->timpre);
995 		break;
996 	default:
997 		ret = -EINVAL;
998 	}
999 	spin_unlock_irqrestore(tim_ker->lock, flags);
1000 
1001 	return ret;
1002 }
1003 
timer_ker_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1004 static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
1005 					   unsigned long parent_rate)
1006 {
1007 	struct timer_cker *tim_ker = to_timer_cker(hw);
1008 	u32 prescaler, timpre;
1009 	u32 mul;
1010 
1011 	prescaler = readl_relaxed(tim_ker->apbdiv) & APB_DIV_MASK;
1012 
1013 	timpre = readl_relaxed(tim_ker->timpre) & TIM_PRE_MASK;
1014 
1015 	if (!prescaler)
1016 		return parent_rate;
1017 
1018 	mul = (timpre + 1) * 2;
1019 
1020 	return parent_rate * mul;
1021 }
1022 
1023 static const struct clk_ops timer_ker_ops = {
1024 	.recalc_rate	= timer_ker_recalc_rate,
1025 	.round_rate	= timer_ker_round_rate,
1026 	.set_rate	= timer_ker_set_rate,
1027 
1028 };
1029 
clk_register_cktim(struct device * dev,const char * name,const char * parent_name,unsigned long flags,void __iomem * apbdiv,void __iomem * timpre,spinlock_t * lock)1030 static struct clk_hw *clk_register_cktim(struct device *dev, const char *name,
1031 					 const char *parent_name,
1032 					 unsigned long flags,
1033 					 void __iomem *apbdiv,
1034 					 void __iomem *timpre,
1035 					 spinlock_t *lock)
1036 {
1037 	struct timer_cker *tim_ker;
1038 	struct clk_init_data init;
1039 	struct clk_hw *hw;
1040 	int err;
1041 
1042 	tim_ker = devm_kzalloc(dev, sizeof(*tim_ker), GFP_KERNEL);
1043 	if (!tim_ker)
1044 		return ERR_PTR(-ENOMEM);
1045 
1046 	init.name = name;
1047 	init.ops = &timer_ker_ops;
1048 	init.flags = flags;
1049 	init.parent_names = &parent_name;
1050 	init.num_parents = 1;
1051 
1052 	tim_ker->hw.init = &init;
1053 	tim_ker->lock = lock;
1054 	tim_ker->apbdiv = apbdiv;
1055 	tim_ker->timpre = timpre;
1056 
1057 	hw = &tim_ker->hw;
1058 	err = clk_hw_register(dev, hw);
1059 
1060 	if (err)
1061 		return ERR_PTR(err);
1062 
1063 	return hw;
1064 }
1065 
1066 /* The divider of RTC clock concerns only ck_hse clock */
1067 #define HSE_RTC 3
1068 
clk_divider_rtc_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1069 static unsigned long clk_divider_rtc_recalc_rate(struct clk_hw *hw,
1070 						 unsigned long parent_rate)
1071 {
1072 	if (clk_hw_get_parent(hw) == clk_hw_get_parent_by_index(hw, HSE_RTC))
1073 		return clk_divider_ops.recalc_rate(hw, parent_rate);
1074 
1075 	return parent_rate;
1076 }
1077 
clk_divider_rtc_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1078 static int clk_divider_rtc_set_rate(struct clk_hw *hw, unsigned long rate,
1079 				    unsigned long parent_rate)
1080 {
1081 	if (clk_hw_get_parent(hw) == clk_hw_get_parent_by_index(hw, HSE_RTC))
1082 		return clk_divider_ops.set_rate(hw, rate, parent_rate);
1083 
1084 	return parent_rate;
1085 }
1086 
clk_divider_rtc_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1087 static int clk_divider_rtc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1088 {
1089 	if (req->best_parent_hw == clk_hw_get_parent_by_index(hw, HSE_RTC))
1090 		return clk_divider_ops.determine_rate(hw, req);
1091 
1092 	req->rate = req->best_parent_rate;
1093 
1094 	return 0;
1095 }
1096 
1097 static const struct clk_ops rtc_div_clk_ops = {
1098 	.recalc_rate	= clk_divider_rtc_recalc_rate,
1099 	.set_rate	= clk_divider_rtc_set_rate,
1100 	.determine_rate = clk_divider_rtc_determine_rate
1101 };
1102 
1103 struct stm32_pll_cfg {
1104 	u32 offset;
1105 	u32 muxoff;
1106 };
1107 
_clk_register_pll(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)1108 static struct clk_hw *_clk_register_pll(struct device *dev,
1109 					struct clk_hw_onecell_data *clk_data,
1110 					void __iomem *base, spinlock_t *lock,
1111 					const struct clock_config *cfg)
1112 {
1113 	struct stm32_pll_cfg *stm_pll_cfg = cfg->cfg;
1114 
1115 	return clk_register_pll(dev, cfg->name, cfg->parent_names,
1116 				cfg->num_parents,
1117 				base + stm_pll_cfg->offset,
1118 				base + stm_pll_cfg->muxoff,
1119 				cfg->flags, lock);
1120 }
1121 
1122 struct stm32_cktim_cfg {
1123 	u32 offset_apbdiv;
1124 	u32 offset_timpre;
1125 };
1126 
_clk_register_cktim(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)1127 static struct clk_hw *_clk_register_cktim(struct device *dev,
1128 					  struct clk_hw_onecell_data *clk_data,
1129 					  void __iomem *base, spinlock_t *lock,
1130 					  const struct clock_config *cfg)
1131 {
1132 	struct stm32_cktim_cfg *cktim_cfg = cfg->cfg;
1133 
1134 	return clk_register_cktim(dev, cfg->name, cfg->parent_name, cfg->flags,
1135 				  cktim_cfg->offset_apbdiv + base,
1136 				  cktim_cfg->offset_timpre + base, lock);
1137 }
1138 
1139 static struct clk_hw *
_clk_stm32_register_gate(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)1140 _clk_stm32_register_gate(struct device *dev,
1141 			 struct clk_hw_onecell_data *clk_data,
1142 			 void __iomem *base, spinlock_t *lock,
1143 			 const struct clock_config *cfg)
1144 {
1145 	return clk_stm32_register_gate_ops(dev,
1146 				    cfg->name,
1147 				    cfg->parent_name,
1148 				    cfg->parent_data,
1149 				    cfg->flags,
1150 				    base,
1151 				    cfg->cfg,
1152 				    lock);
1153 }
1154 
1155 static struct clk_hw *
_clk_stm32_register_composite(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)1156 _clk_stm32_register_composite(struct device *dev,
1157 			      struct clk_hw_onecell_data *clk_data,
1158 			      void __iomem *base, spinlock_t *lock,
1159 			      const struct clock_config *cfg)
1160 {
1161 	return clk_stm32_register_composite(dev, cfg->name, cfg->parent_names,
1162 					    cfg->parent_data, cfg->num_parents,
1163 					    base, cfg->cfg, cfg->flags, lock);
1164 }
1165 
1166 #define GATE(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
1167 {\
1168 	.id		= _id,\
1169 	.name		= _name,\
1170 	.parent_name	= _parent,\
1171 	.flags		= _flags,\
1172 	.cfg		=  &(struct gate_cfg) {\
1173 		.reg_off	= _offset,\
1174 		.bit_idx	= _bit_idx,\
1175 		.gate_flags	= _gate_flags,\
1176 	},\
1177 	.func		= _clk_hw_register_gate,\
1178 }
1179 
1180 #define FIXED_FACTOR(_id, _name, _parent, _flags, _mult, _div)\
1181 {\
1182 	.id		= _id,\
1183 	.name		= _name,\
1184 	.parent_name	= _parent,\
1185 	.flags		= _flags,\
1186 	.cfg		=  &(struct fixed_factor_cfg) {\
1187 		.mult = _mult,\
1188 		.div = _div,\
1189 	},\
1190 	.func		= _clk_hw_register_fixed_factor,\
1191 }
1192 
1193 #define DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
1194 		  _div_flags, _div_table)\
1195 {\
1196 	.id		= _id,\
1197 	.name		= _name,\
1198 	.parent_name	= _parent,\
1199 	.flags		= _flags,\
1200 	.cfg		=  &(struct div_cfg) {\
1201 		.reg_off	= _offset,\
1202 		.shift		= _shift,\
1203 		.width		= _width,\
1204 		.div_flags	= _div_flags,\
1205 		.table		= _div_table,\
1206 	},\
1207 	.func		= _clk_hw_register_divider_table,\
1208 }
1209 
1210 #define DIV(_id, _name, _parent, _flags, _offset, _shift, _width, _div_flags)\
1211 	DIV_TABLE(_id, _name, _parent, _flags, _offset, _shift, _width,\
1212 		  _div_flags, NULL)
1213 
1214 #define MUX(_id, _name, _parents, _flags, _offset, _shift, _width, _mux_flags)\
1215 {\
1216 	.id		= _id,\
1217 	.name		= _name,\
1218 	.parent_names	= _parents,\
1219 	.num_parents	= ARRAY_SIZE(_parents),\
1220 	.flags		= _flags,\
1221 	.cfg		=  &(struct mux_cfg) {\
1222 		.reg_off	= _offset,\
1223 		.shift		= _shift,\
1224 		.width		= _width,\
1225 		.mux_flags	= _mux_flags,\
1226 	},\
1227 	.func		= _clk_hw_register_mux,\
1228 }
1229 
1230 #define PLL(_id, _name, _parents, _flags, _offset_p, _offset_mux)\
1231 {\
1232 	.id		= _id,\
1233 	.name		= _name,\
1234 	.parent_names	= _parents,\
1235 	.num_parents	= ARRAY_SIZE(_parents),\
1236 	.flags		= CLK_IGNORE_UNUSED | (_flags),\
1237 	.cfg		=  &(struct stm32_pll_cfg) {\
1238 		.offset = _offset_p,\
1239 		.muxoff = _offset_mux,\
1240 	},\
1241 	.func		= _clk_register_pll,\
1242 }
1243 
1244 #define STM32_CKTIM(_name, _parent, _flags, _offset_apbdiv, _offset_timpre)\
1245 {\
1246 	.id		= NO_ID,\
1247 	.name		= _name,\
1248 	.parent_name	= _parent,\
1249 	.flags		= _flags,\
1250 	.cfg		=  &(struct stm32_cktim_cfg) {\
1251 		.offset_apbdiv = _offset_apbdiv,\
1252 		.offset_timpre = _offset_timpre,\
1253 	},\
1254 	.func		= _clk_register_cktim,\
1255 }
1256 
1257 #define STM32_TIM(_id, _name, _parent, _offset_set, _bit_idx)\
1258 		  GATE_MP1(_id, _name, _parent, CLK_SET_RATE_PARENT,\
1259 			   _offset_set, _bit_idx, 0)
1260 
1261 /* STM32 GATE */
1262 #define STM32_GATE(_id, _name, _parent, _flags, _gate)\
1263 {\
1264 	.id		= _id,\
1265 	.name		= _name,\
1266 	.parent_name	= _parent,\
1267 	.flags		= _flags,\
1268 	.cfg		= (struct stm32_gate_cfg *) {_gate},\
1269 	.func		= _clk_stm32_register_gate,\
1270 }
1271 
1272 #define STM32_GATE_PDATA(_id, _name, _parent, _flags, _gate)\
1273 {\
1274 	.id		= _id,\
1275 	.name		= _name,\
1276 	.parent_data	= _parent,\
1277 	.flags		= _flags,\
1278 	.cfg		= (struct stm32_gate_cfg *) {_gate},\
1279 	.func		= _clk_stm32_register_gate,\
1280 }
1281 
1282 #define _STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags, _mgate, _ops)\
1283 	(&(struct stm32_gate_cfg) {\
1284 		&(struct gate_cfg) {\
1285 			.reg_off	= _gate_offset,\
1286 			.bit_idx	= _gate_bit_idx,\
1287 			.gate_flags	= _gate_flags,\
1288 		},\
1289 		.mgate		= _mgate,\
1290 		.ops		= _ops,\
1291 	})
1292 
1293 #define _STM32_MGATE(_mgate)\
1294 	(&per_gate_cfg[_mgate])
1295 
1296 #define _GATE(_gate_offset, _gate_bit_idx, _gate_flags)\
1297 	_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
1298 		    NULL, NULL)\
1299 
1300 #define _GATE_MP1(_gate_offset, _gate_bit_idx, _gate_flags)\
1301 	_STM32_GATE(_gate_offset, _gate_bit_idx, _gate_flags,\
1302 		    NULL, &mp1_gate_clk_ops)\
1303 
1304 #define _MGATE_MP1(_mgate)\
1305 	.gate = &per_gate_cfg[_mgate]
1306 
1307 #define GATE_MP1(_id, _name, _parent, _flags, _offset, _bit_idx, _gate_flags)\
1308 	STM32_GATE(_id, _name, _parent, _flags,\
1309 		   _GATE_MP1(_offset, _bit_idx, _gate_flags))
1310 
1311 #define MGATE_MP1(_id, _name, _parent, _flags, _mgate)\
1312 	STM32_GATE(_id, _name, _parent, _flags,\
1313 		   _STM32_MGATE(_mgate))
1314 
1315 #define MGATE_MP1_PDATA(_id, _name, _parent, _flags, _mgate)\
1316 	STM32_GATE_PDATA(_id, _name, _parent, _flags,\
1317 		   _STM32_MGATE(_mgate))
1318 
1319 #define _STM32_DIV(_div_offset, _div_shift, _div_width,\
1320 		   _div_flags, _div_table, _ops)\
1321 	.div = &(struct stm32_div_cfg) {\
1322 		&(struct div_cfg) {\
1323 			.reg_off	= _div_offset,\
1324 			.shift		= _div_shift,\
1325 			.width		= _div_width,\
1326 			.div_flags	= _div_flags,\
1327 			.table		= _div_table,\
1328 		},\
1329 		.ops		= _ops,\
1330 	}
1331 
1332 #define _DIV(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
1333 	_STM32_DIV(_div_offset, _div_shift, _div_width,\
1334 		   _div_flags, _div_table, NULL)\
1335 
1336 #define _DIV_RTC(_div_offset, _div_shift, _div_width, _div_flags, _div_table)\
1337 	_STM32_DIV(_div_offset, _div_shift, _div_width,\
1338 		   _div_flags, _div_table, &rtc_div_clk_ops)
1339 
1340 #define _STM32_MUX(_offset, _shift, _width, _mux_flags, _mmux, _ops)\
1341 	.mux = &(struct stm32_mux_cfg) {\
1342 		&(struct mux_cfg) {\
1343 			.reg_off	= _offset,\
1344 			.shift		= _shift,\
1345 			.width		= _width,\
1346 			.mux_flags	= _mux_flags,\
1347 			.table		= NULL,\
1348 		},\
1349 		.mmux		= _mmux,\
1350 		.ops		= _ops,\
1351 	}
1352 
1353 #define _MUX(_offset, _shift, _width, _mux_flags)\
1354 	_STM32_MUX(_offset, _shift, _width, _mux_flags, NULL, NULL)\
1355 
1356 #define _MMUX(_mmux) .mux = &ker_mux_cfg[_mmux]
1357 
1358 #define PARENT(_parent) ((const char *[]) { _parent})
1359 
1360 #define _NO_MUX .mux = NULL
1361 #define _NO_DIV .div = NULL
1362 #define _NO_GATE .gate = NULL
1363 
1364 #define COMPOSITE(_id, _name, _parents, _flags, _gate, _mux, _div)\
1365 {\
1366 	.id		= _id,\
1367 	.name		= _name,\
1368 	.parent_names	= _parents,\
1369 	.num_parents	= ARRAY_SIZE(_parents),\
1370 	.flags		= _flags,\
1371 	.cfg		= &(struct stm32_composite_cfg) {\
1372 		_gate,\
1373 		_mux,\
1374 		_div,\
1375 	},\
1376 	.func		= _clk_stm32_register_composite,\
1377 }
1378 
1379 #define PCLK(_id, _name, _parent, _flags, _mgate)\
1380 	MGATE_MP1(_id, _name, _parent, _flags, _mgate)
1381 
1382 #define PCLK_PDATA(_id, _name, _parent, _flags, _mgate)\
1383 	MGATE_MP1_PDATA(_id, _name, _parent, _flags, _mgate)
1384 
1385 #define KCLK(_id, _name, _parents, _flags, _mgate, _mmux)\
1386 	     COMPOSITE(_id, _name, _parents, CLK_OPS_PARENT_ENABLE |\
1387 		       CLK_SET_RATE_NO_REPARENT | _flags,\
1388 		       _MGATE_MP1(_mgate),\
1389 		       _MMUX(_mmux),\
1390 		       _NO_DIV)
1391 
1392 enum {
1393 	G_SAI1,
1394 	G_SAI2,
1395 	G_SAI3,
1396 	G_SAI4,
1397 	G_SPI1,
1398 	G_SPI2,
1399 	G_SPI3,
1400 	G_SPI4,
1401 	G_SPI5,
1402 	G_SPI6,
1403 	G_SPDIF,
1404 	G_I2C1,
1405 	G_I2C2,
1406 	G_I2C3,
1407 	G_I2C4,
1408 	G_I2C5,
1409 	G_I2C6,
1410 	G_USART2,
1411 	G_UART4,
1412 	G_USART3,
1413 	G_UART5,
1414 	G_USART1,
1415 	G_USART6,
1416 	G_UART7,
1417 	G_UART8,
1418 	G_LPTIM1,
1419 	G_LPTIM2,
1420 	G_LPTIM3,
1421 	G_LPTIM4,
1422 	G_LPTIM5,
1423 	G_LTDC,
1424 	G_DSI,
1425 	G_QSPI,
1426 	G_FMC,
1427 	G_SDMMC1,
1428 	G_SDMMC2,
1429 	G_SDMMC3,
1430 	G_USBO,
1431 	G_USBPHY,
1432 	G_RNG1,
1433 	G_RNG2,
1434 	G_FDCAN,
1435 	G_DAC12,
1436 	G_CEC,
1437 	G_ADC12,
1438 	G_GPU,
1439 	G_STGEN,
1440 	G_DFSDM,
1441 	G_ADFSDM,
1442 	G_TIM2,
1443 	G_TIM3,
1444 	G_TIM4,
1445 	G_TIM5,
1446 	G_TIM6,
1447 	G_TIM7,
1448 	G_TIM12,
1449 	G_TIM13,
1450 	G_TIM14,
1451 	G_MDIO,
1452 	G_TIM1,
1453 	G_TIM8,
1454 	G_TIM15,
1455 	G_TIM16,
1456 	G_TIM17,
1457 	G_SYSCFG,
1458 	G_VREF,
1459 	G_TMPSENS,
1460 	G_PMBCTRL,
1461 	G_HDP,
1462 	G_IWDG2,
1463 	G_STGENRO,
1464 	G_DMA1,
1465 	G_DMA2,
1466 	G_DMAMUX,
1467 	G_DCMI,
1468 	G_CRYP2,
1469 	G_HASH2,
1470 	G_CRC2,
1471 	G_HSEM,
1472 	G_IPCC,
1473 	G_GPIOA,
1474 	G_GPIOB,
1475 	G_GPIOC,
1476 	G_GPIOD,
1477 	G_GPIOE,
1478 	G_GPIOF,
1479 	G_GPIOG,
1480 	G_GPIOH,
1481 	G_GPIOI,
1482 	G_GPIOJ,
1483 	G_GPIOK,
1484 	G_MDMA,
1485 	G_ETHCK,
1486 	G_ETHTX,
1487 	G_ETHRX,
1488 	G_ETHMAC,
1489 	G_CRC1,
1490 	G_USBH,
1491 	G_ETHSTP,
1492 	G_RTCAPB,
1493 	G_TZC1,
1494 	G_TZC2,
1495 	G_TZPC,
1496 	G_IWDG1,
1497 	G_BSEC,
1498 	G_GPIOZ,
1499 	G_CRYP1,
1500 	G_HASH1,
1501 	G_BKPSRAM,
1502 	G_DDRPERFM,
1503 
1504 	G_LAST
1505 };
1506 
1507 static struct stm32_mgate mp1_mgate[G_LAST];
1508 
1509 #define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
1510 	       _mgate, _ops)\
1511 	[_id] = {\
1512 		&(struct gate_cfg) {\
1513 			.reg_off	= _gate_offset,\
1514 			.bit_idx	= _gate_bit_idx,\
1515 			.gate_flags	= _gate_flags,\
1516 		},\
1517 		.mgate		= _mgate,\
1518 		.ops		= _ops,\
1519 	}
1520 
1521 #define K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
1522 	_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
1523 	       NULL, &mp1_gate_clk_ops)
1524 
1525 #define K_MGATE(_id, _gate_offset, _gate_bit_idx, _gate_flags)\
1526 	_K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
1527 	       &mp1_mgate[_id], &mp1_mgate_clk_ops)
1528 
1529 /* Peripheral gates */
1530 static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
1531 	/* Multi gates */
1532 	K_GATE(G_MDIO,		RCC_APB1ENSETR, 31, 0),
1533 	K_MGATE(G_DAC12,	RCC_APB1ENSETR, 29, 0),
1534 	K_MGATE(G_CEC,		RCC_APB1ENSETR, 27, 0),
1535 	K_MGATE(G_SPDIF,	RCC_APB1ENSETR, 26, 0),
1536 	K_MGATE(G_I2C5,		RCC_APB1ENSETR, 24, 0),
1537 	K_MGATE(G_I2C3,		RCC_APB1ENSETR, 23, 0),
1538 	K_MGATE(G_I2C2,		RCC_APB1ENSETR, 22, 0),
1539 	K_MGATE(G_I2C1,		RCC_APB1ENSETR, 21, 0),
1540 	K_MGATE(G_UART8,	RCC_APB1ENSETR, 19, 0),
1541 	K_MGATE(G_UART7,	RCC_APB1ENSETR, 18, 0),
1542 	K_MGATE(G_UART5,	RCC_APB1ENSETR, 17, 0),
1543 	K_MGATE(G_UART4,	RCC_APB1ENSETR, 16, 0),
1544 	K_MGATE(G_USART3,	RCC_APB1ENSETR, 15, 0),
1545 	K_MGATE(G_USART2,	RCC_APB1ENSETR, 14, 0),
1546 	K_MGATE(G_SPI3,		RCC_APB1ENSETR, 12, 0),
1547 	K_MGATE(G_SPI2,		RCC_APB1ENSETR, 11, 0),
1548 	K_MGATE(G_LPTIM1,	RCC_APB1ENSETR, 9, 0),
1549 	K_GATE(G_TIM14,		RCC_APB1ENSETR, 8, 0),
1550 	K_GATE(G_TIM13,		RCC_APB1ENSETR, 7, 0),
1551 	K_GATE(G_TIM12,		RCC_APB1ENSETR, 6, 0),
1552 	K_GATE(G_TIM7,		RCC_APB1ENSETR, 5, 0),
1553 	K_GATE(G_TIM6,		RCC_APB1ENSETR, 4, 0),
1554 	K_GATE(G_TIM5,		RCC_APB1ENSETR, 3, 0),
1555 	K_GATE(G_TIM4,		RCC_APB1ENSETR, 2, 0),
1556 	K_GATE(G_TIM3,		RCC_APB1ENSETR, 1, 0),
1557 	K_GATE(G_TIM2,		RCC_APB1ENSETR, 0, 0),
1558 
1559 	K_MGATE(G_FDCAN,	RCC_APB2ENSETR, 24, 0),
1560 	K_GATE(G_ADFSDM,	RCC_APB2ENSETR, 21, 0),
1561 	K_GATE(G_DFSDM,		RCC_APB2ENSETR, 20, 0),
1562 	K_MGATE(G_SAI3,		RCC_APB2ENSETR, 18, 0),
1563 	K_MGATE(G_SAI2,		RCC_APB2ENSETR, 17, 0),
1564 	K_MGATE(G_SAI1,		RCC_APB2ENSETR, 16, 0),
1565 	K_MGATE(G_USART6,	RCC_APB2ENSETR, 13, 0),
1566 	K_MGATE(G_SPI5,		RCC_APB2ENSETR, 10, 0),
1567 	K_MGATE(G_SPI4,		RCC_APB2ENSETR, 9, 0),
1568 	K_MGATE(G_SPI1,		RCC_APB2ENSETR, 8, 0),
1569 	K_GATE(G_TIM17,		RCC_APB2ENSETR, 4, 0),
1570 	K_GATE(G_TIM16,		RCC_APB2ENSETR, 3, 0),
1571 	K_GATE(G_TIM15,		RCC_APB2ENSETR, 2, 0),
1572 	K_GATE(G_TIM8,		RCC_APB2ENSETR, 1, 0),
1573 	K_GATE(G_TIM1,		RCC_APB2ENSETR, 0, 0),
1574 
1575 	K_GATE(G_HDP,		RCC_APB3ENSETR, 20, 0),
1576 	K_GATE(G_PMBCTRL,	RCC_APB3ENSETR, 17, 0),
1577 	K_GATE(G_TMPSENS,	RCC_APB3ENSETR, 16, 0),
1578 	K_GATE(G_VREF,		RCC_APB3ENSETR, 13, 0),
1579 	K_GATE(G_SYSCFG,	RCC_APB3ENSETR, 11, 0),
1580 	K_MGATE(G_SAI4,		RCC_APB3ENSETR, 8, 0),
1581 	K_MGATE(G_LPTIM5,	RCC_APB3ENSETR, 3, 0),
1582 	K_MGATE(G_LPTIM4,	RCC_APB3ENSETR, 2, 0),
1583 	K_MGATE(G_LPTIM3,	RCC_APB3ENSETR, 1, 0),
1584 	K_MGATE(G_LPTIM2,	RCC_APB3ENSETR, 0, 0),
1585 
1586 	K_GATE(G_STGENRO,	RCC_APB4ENSETR, 20, 0),
1587 	K_MGATE(G_USBPHY,	RCC_APB4ENSETR, 16, 0),
1588 	K_GATE(G_IWDG2,		RCC_APB4ENSETR, 15, 0),
1589 	K_GATE(G_DDRPERFM,	RCC_APB4ENSETR, 8, 0),
1590 	K_MGATE(G_DSI,		RCC_APB4ENSETR, 4, 0),
1591 	K_MGATE(G_LTDC,		RCC_APB4ENSETR, 0, 0),
1592 
1593 	K_GATE(G_STGEN,		RCC_APB5ENSETR, 20, 0),
1594 	K_GATE(G_BSEC,		RCC_APB5ENSETR, 16, 0),
1595 	K_GATE(G_IWDG1,		RCC_APB5ENSETR, 15, 0),
1596 	K_GATE(G_TZPC,		RCC_APB5ENSETR, 13, 0),
1597 	K_GATE(G_TZC2,		RCC_APB5ENSETR, 12, 0),
1598 	K_GATE(G_TZC1,		RCC_APB5ENSETR, 11, 0),
1599 	K_GATE(G_RTCAPB,	RCC_APB5ENSETR, 8, 0),
1600 	K_MGATE(G_USART1,	RCC_APB5ENSETR, 4, 0),
1601 	K_MGATE(G_I2C6,		RCC_APB5ENSETR, 3, 0),
1602 	K_MGATE(G_I2C4,		RCC_APB5ENSETR, 2, 0),
1603 	K_MGATE(G_SPI6,		RCC_APB5ENSETR, 0, 0),
1604 
1605 	K_MGATE(G_SDMMC3,	RCC_AHB2ENSETR, 16, 0),
1606 	K_MGATE(G_USBO,		RCC_AHB2ENSETR, 8, 0),
1607 	K_MGATE(G_ADC12,	RCC_AHB2ENSETR, 5, 0),
1608 	K_GATE(G_DMAMUX,	RCC_AHB2ENSETR, 2, 0),
1609 	K_GATE(G_DMA2,		RCC_AHB2ENSETR, 1, 0),
1610 	K_GATE(G_DMA1,		RCC_AHB2ENSETR, 0, 0),
1611 
1612 	K_GATE(G_IPCC,		RCC_AHB3ENSETR, 12, 0),
1613 	K_GATE(G_HSEM,		RCC_AHB3ENSETR, 11, 0),
1614 	K_GATE(G_CRC2,		RCC_AHB3ENSETR, 7, 0),
1615 	K_MGATE(G_RNG2,		RCC_AHB3ENSETR, 6, 0),
1616 	K_GATE(G_HASH2,		RCC_AHB3ENSETR, 5, 0),
1617 	K_GATE(G_CRYP2,		RCC_AHB3ENSETR, 4, 0),
1618 	K_GATE(G_DCMI,		RCC_AHB3ENSETR, 0, 0),
1619 
1620 	K_GATE(G_GPIOK,		RCC_AHB4ENSETR, 10, 0),
1621 	K_GATE(G_GPIOJ,		RCC_AHB4ENSETR, 9, 0),
1622 	K_GATE(G_GPIOI,		RCC_AHB4ENSETR, 8, 0),
1623 	K_GATE(G_GPIOH,		RCC_AHB4ENSETR, 7, 0),
1624 	K_GATE(G_GPIOG,		RCC_AHB4ENSETR, 6, 0),
1625 	K_GATE(G_GPIOF,		RCC_AHB4ENSETR, 5, 0),
1626 	K_GATE(G_GPIOE,		RCC_AHB4ENSETR, 4, 0),
1627 	K_GATE(G_GPIOD,		RCC_AHB4ENSETR, 3, 0),
1628 	K_GATE(G_GPIOC,		RCC_AHB4ENSETR, 2, 0),
1629 	K_GATE(G_GPIOB,		RCC_AHB4ENSETR, 1, 0),
1630 	K_GATE(G_GPIOA,		RCC_AHB4ENSETR, 0, 0),
1631 
1632 	K_GATE(G_BKPSRAM,	RCC_AHB5ENSETR, 8, 0),
1633 	K_MGATE(G_RNG1,		RCC_AHB5ENSETR, 6, 0),
1634 	K_GATE(G_HASH1,		RCC_AHB5ENSETR, 5, 0),
1635 	K_GATE(G_CRYP1,		RCC_AHB5ENSETR, 4, 0),
1636 	K_GATE(G_GPIOZ,		RCC_AHB5ENSETR, 0, 0),
1637 
1638 	K_GATE(G_USBH,		RCC_AHB6ENSETR, 24, 0),
1639 	K_GATE(G_CRC1,		RCC_AHB6ENSETR, 20, 0),
1640 	K_MGATE(G_SDMMC2,	RCC_AHB6ENSETR, 17, 0),
1641 	K_MGATE(G_SDMMC1,	RCC_AHB6ENSETR, 16, 0),
1642 	K_MGATE(G_QSPI,		RCC_AHB6ENSETR, 14, 0),
1643 	K_MGATE(G_FMC,		RCC_AHB6ENSETR, 12, 0),
1644 	K_GATE(G_ETHMAC,	RCC_AHB6ENSETR, 10, 0),
1645 	K_GATE(G_ETHRX,		RCC_AHB6ENSETR, 9, 0),
1646 	K_GATE(G_ETHTX,		RCC_AHB6ENSETR, 8, 0),
1647 	K_GATE(G_ETHCK,		RCC_AHB6ENSETR, 7, 0),
1648 	K_MGATE(G_GPU,		RCC_AHB6ENSETR, 5, 0),
1649 	K_GATE(G_MDMA,		RCC_AHB6ENSETR, 0, 0),
1650 	K_GATE(G_ETHSTP,	RCC_AHB6LPENSETR, 11, 0),
1651 };
1652 
1653 enum {
1654 	M_SDMMC12,
1655 	M_SDMMC3,
1656 	M_FMC,
1657 	M_QSPI,
1658 	M_RNG1,
1659 	M_RNG2,
1660 	M_USBPHY,
1661 	M_USBO,
1662 	M_STGEN,
1663 	M_SPDIF,
1664 	M_SPI1,
1665 	M_SPI23,
1666 	M_SPI45,
1667 	M_SPI6,
1668 	M_CEC,
1669 	M_I2C12,
1670 	M_I2C35,
1671 	M_I2C46,
1672 	M_LPTIM1,
1673 	M_LPTIM23,
1674 	M_LPTIM45,
1675 	M_USART1,
1676 	M_UART24,
1677 	M_UART35,
1678 	M_USART6,
1679 	M_UART78,
1680 	M_SAI1,
1681 	M_SAI2,
1682 	M_SAI3,
1683 	M_SAI4,
1684 	M_DSI,
1685 	M_FDCAN,
1686 	M_ADC12,
1687 	M_ETHCK,
1688 	M_CKPER,
1689 	M_LAST
1690 };
1691 
1692 static struct stm32_mmux ker_mux[M_LAST];
1693 
1694 #define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
1695 	[_id] = {\
1696 		&(struct mux_cfg) {\
1697 			.reg_off	= _offset,\
1698 			.shift		= _shift,\
1699 			.width		= _width,\
1700 			.mux_flags	= _mux_flags,\
1701 			.table		= NULL,\
1702 		},\
1703 		.mmux		= _mmux,\
1704 		.ops		= _ops,\
1705 	}
1706 
1707 #define K_MUX(_id, _offset, _shift, _width, _mux_flags)\
1708 	_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
1709 			NULL, NULL)
1710 
1711 #define K_MMUX(_id, _offset, _shift, _width, _mux_flags)\
1712 	_K_MUX(_id, _offset, _shift, _width, _mux_flags,\
1713 			&ker_mux[_id], &clk_mmux_ops)
1714 
1715 static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
1716 	/* Kernel multi mux */
1717 	K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0),
1718 	K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0),
1719 	K_MMUX(M_SPI45, RCC_SPI2S45CKSELR, 0, 3, 0),
1720 	K_MMUX(M_I2C12, RCC_I2C12CKSELR, 0, 3, 0),
1721 	K_MMUX(M_I2C35, RCC_I2C35CKSELR, 0, 3, 0),
1722 	K_MMUX(M_LPTIM23, RCC_LPTIM23CKSELR, 0, 3, 0),
1723 	K_MMUX(M_LPTIM45, RCC_LPTIM45CKSELR, 0, 3, 0),
1724 	K_MMUX(M_UART24, RCC_UART24CKSELR, 0, 3, 0),
1725 	K_MMUX(M_UART35, RCC_UART35CKSELR, 0, 3, 0),
1726 	K_MMUX(M_UART78, RCC_UART78CKSELR, 0, 3, 0),
1727 	K_MMUX(M_SAI1, RCC_SAI1CKSELR, 0, 3, 0),
1728 	K_MMUX(M_ETHCK, RCC_ETHCKSELR, 0, 2, 0),
1729 	K_MMUX(M_I2C46, RCC_I2C46CKSELR, 0, 3, 0),
1730 
1731 	/*  Kernel simple mux */
1732 	K_MUX(M_RNG2, RCC_RNG2CKSELR, 0, 2, 0),
1733 	K_MUX(M_SDMMC3, RCC_SDMMC3CKSELR, 0, 3, 0),
1734 	K_MUX(M_FMC, RCC_FMCCKSELR, 0, 2, 0),
1735 	K_MUX(M_QSPI, RCC_QSPICKSELR, 0, 2, 0),
1736 	K_MUX(M_USBPHY, RCC_USBCKSELR, 0, 2, 0),
1737 	K_MUX(M_USBO, RCC_USBCKSELR, 4, 1, 0),
1738 	K_MUX(M_SPDIF, RCC_SPDIFCKSELR, 0, 2, 0),
1739 	K_MUX(M_SPI1, RCC_SPI2S1CKSELR, 0, 3, 0),
1740 	K_MUX(M_CEC, RCC_CECCKSELR, 0, 2, 0),
1741 	K_MUX(M_LPTIM1, RCC_LPTIM1CKSELR, 0, 3, 0),
1742 	K_MUX(M_USART6, RCC_UART6CKSELR, 0, 3, 0),
1743 	K_MUX(M_FDCAN, RCC_FDCANCKSELR, 0, 2, 0),
1744 	K_MUX(M_SAI2, RCC_SAI2CKSELR, 0, 3, 0),
1745 	K_MUX(M_SAI3, RCC_SAI3CKSELR, 0, 3, 0),
1746 	K_MUX(M_SAI4, RCC_SAI4CKSELR, 0, 3, 0),
1747 	K_MUX(M_ADC12, RCC_ADCCKSELR, 0, 2, 0),
1748 	K_MUX(M_DSI, RCC_DSICKSELR, 0, 1, 0),
1749 	K_MUX(M_CKPER, RCC_CPERCKSELR, 0, 2, 0),
1750 	K_MUX(M_RNG1, RCC_RNG1CKSELR, 0, 2, 0),
1751 	K_MUX(M_STGEN, RCC_STGENCKSELR, 0, 2, 0),
1752 	K_MUX(M_USART1, RCC_UART1CKSELR, 0, 3, 0),
1753 	K_MUX(M_SPI6, RCC_SPI6CKSELR, 0, 3, 0),
1754 };
1755 
1756 static const struct clock_config stm32mp1_clock_cfg[] = {
1757 	/*  External / Internal Oscillators */
1758 	GATE_MP1(CK_HSE, "ck_hse", "clk-hse", 0, RCC_OCENSETR, 8, 0),
1759 	/* ck_csi is used by IO compensation and should be critical */
1760 	GATE_MP1(CK_CSI, "ck_csi", "clk-csi", CLK_IS_CRITICAL,
1761 		 RCC_OCENSETR, 4, 0),
1762 	COMPOSITE(CK_HSI, "ck_hsi", PARENT("clk-hsi"), 0,
1763 		  _GATE_MP1(RCC_OCENSETR, 0, 0),
1764 		  _NO_MUX,
1765 		  _DIV(RCC_HSICFGR, 0, 2, CLK_DIVIDER_POWER_OF_TWO |
1766 		       CLK_DIVIDER_READ_ONLY, NULL)),
1767 	GATE(CK_LSI, "ck_lsi", "clk-lsi", 0, RCC_RDLSICR, 0, 0),
1768 	GATE(CK_LSE, "ck_lse", "clk-lse", 0, RCC_BDCR, 0, 0),
1769 
1770 	FIXED_FACTOR(CK_HSE_DIV2, "clk-hse-div2", "ck_hse", 0, 1, 2),
1771 
1772 	/* PLLs */
1773 	PLL(PLL1, "pll1", ref12_parents, 0, RCC_PLL1CR, RCC_RCK12SELR),
1774 	PLL(PLL2, "pll2", ref12_parents, 0, RCC_PLL2CR, RCC_RCK12SELR),
1775 	PLL(PLL3, "pll3", ref3_parents, 0, RCC_PLL3CR, RCC_RCK3SELR),
1776 	PLL(PLL4, "pll4", ref4_parents, 0, RCC_PLL4CR, RCC_RCK4SELR),
1777 
1778 	/* ODF */
1779 	COMPOSITE(PLL1_P, "pll1_p", PARENT("pll1"), 0,
1780 		  _GATE(RCC_PLL1CR, 4, 0),
1781 		  _NO_MUX,
1782 		  _DIV(RCC_PLL1CFGR2, 0, 7, 0, NULL)),
1783 
1784 	COMPOSITE(PLL2_P, "pll2_p", PARENT("pll2"), 0,
1785 		  _GATE(RCC_PLL2CR, 4, 0),
1786 		  _NO_MUX,
1787 		  _DIV(RCC_PLL2CFGR2, 0, 7, 0, NULL)),
1788 
1789 	COMPOSITE(PLL2_Q, "pll2_q", PARENT("pll2"), 0,
1790 		  _GATE(RCC_PLL2CR, 5, 0),
1791 		  _NO_MUX,
1792 		  _DIV(RCC_PLL2CFGR2, 8, 7, 0, NULL)),
1793 
1794 	COMPOSITE(PLL2_R, "pll2_r", PARENT("pll2"), CLK_IS_CRITICAL,
1795 		  _GATE(RCC_PLL2CR, 6, 0),
1796 		  _NO_MUX,
1797 		  _DIV(RCC_PLL2CFGR2, 16, 7, 0, NULL)),
1798 
1799 	COMPOSITE(PLL3_P, "pll3_p", PARENT("pll3"), 0,
1800 		  _GATE(RCC_PLL3CR, 4, 0),
1801 		  _NO_MUX,
1802 		  _DIV(RCC_PLL3CFGR2, 0, 7, 0, NULL)),
1803 
1804 	COMPOSITE(PLL3_Q, "pll3_q", PARENT("pll3"), 0,
1805 		  _GATE(RCC_PLL3CR, 5, 0),
1806 		  _NO_MUX,
1807 		  _DIV(RCC_PLL3CFGR2, 8, 7, 0, NULL)),
1808 
1809 	COMPOSITE(PLL3_R, "pll3_r", PARENT("pll3"), 0,
1810 		  _GATE(RCC_PLL3CR, 6, 0),
1811 		  _NO_MUX,
1812 		  _DIV(RCC_PLL3CFGR2, 16, 7, 0, NULL)),
1813 
1814 	COMPOSITE(PLL4_P, "pll4_p", PARENT("pll4"), 0,
1815 		  _GATE(RCC_PLL4CR, 4, 0),
1816 		  _NO_MUX,
1817 		  _DIV(RCC_PLL4CFGR2, 0, 7, 0, NULL)),
1818 
1819 	COMPOSITE(PLL4_Q, "pll4_q", PARENT("pll4"), 0,
1820 		  _GATE(RCC_PLL4CR, 5, 0),
1821 		  _NO_MUX,
1822 		  _DIV(RCC_PLL4CFGR2, 8, 7, 0, NULL)),
1823 
1824 	COMPOSITE(PLL4_R, "pll4_r", PARENT("pll4"), 0,
1825 		  _GATE(RCC_PLL4CR, 6, 0),
1826 		  _NO_MUX,
1827 		  _DIV(RCC_PLL4CFGR2, 16, 7, 0, NULL)),
1828 
1829 	/* MUX system clocks */
1830 	MUX(CK_PER, "ck_per", per_src, CLK_OPS_PARENT_ENABLE,
1831 	    RCC_CPERCKSELR, 0, 2, 0),
1832 
1833 	MUX(CK_MPU, "ck_mpu", cpu_src, CLK_OPS_PARENT_ENABLE |
1834 	     CLK_IS_CRITICAL, RCC_MPCKSELR, 0, 2, 0),
1835 
1836 	COMPOSITE(CK_AXI, "ck_axi", axi_src, CLK_IS_CRITICAL |
1837 		   CLK_OPS_PARENT_ENABLE,
1838 		   _NO_GATE,
1839 		   _MUX(RCC_ASSCKSELR, 0, 2, 0),
1840 		   _DIV(RCC_AXIDIVR, 0, 3, 0, axi_div_table)),
1841 
1842 	COMPOSITE(CK_MCU, "ck_mcu", mcu_src, CLK_IS_CRITICAL |
1843 		   CLK_OPS_PARENT_ENABLE,
1844 		   _NO_GATE,
1845 		   _MUX(RCC_MSSCKSELR, 0, 2, 0),
1846 		   _DIV(RCC_MCUDIVR, 0, 4, 0, mcu_div_table)),
1847 
1848 	DIV_TABLE(NO_ID, "pclk1", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB1DIVR, 0,
1849 		  3, CLK_DIVIDER_READ_ONLY, apb_div_table),
1850 
1851 	DIV_TABLE(NO_ID, "pclk2", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB2DIVR, 0,
1852 		  3, CLK_DIVIDER_READ_ONLY, apb_div_table),
1853 
1854 	DIV_TABLE(NO_ID, "pclk3", "ck_mcu", CLK_IGNORE_UNUSED, RCC_APB3DIVR, 0,
1855 		  3, CLK_DIVIDER_READ_ONLY, apb_div_table),
1856 
1857 	DIV_TABLE(NO_ID, "pclk4", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB4DIVR, 0,
1858 		  3, CLK_DIVIDER_READ_ONLY, apb_div_table),
1859 
1860 	DIV_TABLE(NO_ID, "pclk5", "ck_axi", CLK_IGNORE_UNUSED, RCC_APB5DIVR, 0,
1861 		  3, CLK_DIVIDER_READ_ONLY, apb_div_table),
1862 
1863 	/* Kernel Timers */
1864 	STM32_CKTIM("ck1_tim", "pclk1", 0, RCC_APB1DIVR, RCC_TIMG1PRER),
1865 	STM32_CKTIM("ck2_tim", "pclk2", 0, RCC_APB2DIVR, RCC_TIMG2PRER),
1866 
1867 	STM32_TIM(TIM2_K, "tim2_k", "ck1_tim", RCC_APB1ENSETR, 0),
1868 	STM32_TIM(TIM3_K, "tim3_k", "ck1_tim", RCC_APB1ENSETR, 1),
1869 	STM32_TIM(TIM4_K, "tim4_k", "ck1_tim", RCC_APB1ENSETR, 2),
1870 	STM32_TIM(TIM5_K, "tim5_k", "ck1_tim", RCC_APB1ENSETR, 3),
1871 	STM32_TIM(TIM6_K, "tim6_k", "ck1_tim", RCC_APB1ENSETR, 4),
1872 	STM32_TIM(TIM7_K, "tim7_k", "ck1_tim", RCC_APB1ENSETR, 5),
1873 	STM32_TIM(TIM12_K, "tim12_k", "ck1_tim", RCC_APB1ENSETR, 6),
1874 	STM32_TIM(TIM13_K, "tim13_k", "ck1_tim", RCC_APB1ENSETR, 7),
1875 	STM32_TIM(TIM14_K, "tim14_k", "ck1_tim", RCC_APB1ENSETR, 8),
1876 	STM32_TIM(TIM1_K, "tim1_k", "ck2_tim", RCC_APB2ENSETR, 0),
1877 	STM32_TIM(TIM8_K, "tim8_k", "ck2_tim", RCC_APB2ENSETR, 1),
1878 	STM32_TIM(TIM15_K, "tim15_k", "ck2_tim", RCC_APB2ENSETR, 2),
1879 	STM32_TIM(TIM16_K, "tim16_k", "ck2_tim", RCC_APB2ENSETR, 3),
1880 	STM32_TIM(TIM17_K, "tim17_k", "ck2_tim", RCC_APB2ENSETR, 4),
1881 
1882 	/* Peripheral clocks */
1883 	PCLK(TIM2, "tim2", "pclk1", CLK_IGNORE_UNUSED, G_TIM2),
1884 	PCLK(TIM3, "tim3", "pclk1", CLK_IGNORE_UNUSED, G_TIM3),
1885 	PCLK(TIM4, "tim4", "pclk1", CLK_IGNORE_UNUSED, G_TIM4),
1886 	PCLK(TIM5, "tim5", "pclk1", CLK_IGNORE_UNUSED, G_TIM5),
1887 	PCLK(TIM6, "tim6", "pclk1", CLK_IGNORE_UNUSED, G_TIM6),
1888 	PCLK(TIM7, "tim7", "pclk1", CLK_IGNORE_UNUSED, G_TIM7),
1889 	PCLK(TIM12, "tim12", "pclk1", CLK_IGNORE_UNUSED, G_TIM12),
1890 	PCLK(TIM13, "tim13", "pclk1", CLK_IGNORE_UNUSED, G_TIM13),
1891 	PCLK(TIM14, "tim14", "pclk1", CLK_IGNORE_UNUSED, G_TIM14),
1892 	PCLK(LPTIM1, "lptim1", "pclk1", 0, G_LPTIM1),
1893 	PCLK(SPI2, "spi2", "pclk1", 0, G_SPI2),
1894 	PCLK(SPI3, "spi3", "pclk1", 0, G_SPI3),
1895 	PCLK(USART2, "usart2", "pclk1", 0, G_USART2),
1896 	PCLK(USART3, "usart3", "pclk1", 0, G_USART3),
1897 	PCLK(UART4, "uart4", "pclk1", 0, G_UART4),
1898 	PCLK(UART5, "uart5", "pclk1", 0, G_UART5),
1899 	PCLK(UART7, "uart7", "pclk1", 0, G_UART7),
1900 	PCLK(UART8, "uart8", "pclk1", 0, G_UART8),
1901 	PCLK(I2C1, "i2c1", "pclk1", 0, G_I2C1),
1902 	PCLK(I2C2, "i2c2", "pclk1", 0, G_I2C2),
1903 	PCLK(I2C3, "i2c3", "pclk1", 0, G_I2C3),
1904 	PCLK(I2C5, "i2c5", "pclk1", 0, G_I2C5),
1905 	PCLK(SPDIF, "spdif", "pclk1", 0, G_SPDIF),
1906 	PCLK(CEC, "cec", "pclk1", 0, G_CEC),
1907 	PCLK(DAC12, "dac12", "pclk1", 0, G_DAC12),
1908 	PCLK(MDIO, "mdio", "pclk1", 0, G_MDIO),
1909 	PCLK(TIM1, "tim1", "pclk2", CLK_IGNORE_UNUSED, G_TIM1),
1910 	PCLK(TIM8, "tim8", "pclk2", CLK_IGNORE_UNUSED, G_TIM8),
1911 	PCLK(TIM15, "tim15", "pclk2", CLK_IGNORE_UNUSED, G_TIM15),
1912 	PCLK(TIM16, "tim16", "pclk2", CLK_IGNORE_UNUSED, G_TIM16),
1913 	PCLK(TIM17, "tim17", "pclk2", CLK_IGNORE_UNUSED, G_TIM17),
1914 	PCLK(SPI1, "spi1", "pclk2", 0, G_SPI1),
1915 	PCLK(SPI4, "spi4", "pclk2", 0, G_SPI4),
1916 	PCLK(SPI5, "spi5", "pclk2", 0, G_SPI5),
1917 	PCLK(USART6, "usart6", "pclk2", 0, G_USART6),
1918 	PCLK(SAI1, "sai1", "pclk2", 0, G_SAI1),
1919 	PCLK(SAI2, "sai2", "pclk2", 0, G_SAI2),
1920 	PCLK(SAI3, "sai3", "pclk2", 0, G_SAI3),
1921 	PCLK(DFSDM, "dfsdm", "pclk2", 0, G_DFSDM),
1922 	PCLK(FDCAN, "fdcan", "pclk2", 0, G_FDCAN),
1923 	PCLK(LPTIM2, "lptim2", "pclk3", 0, G_LPTIM2),
1924 	PCLK(LPTIM3, "lptim3", "pclk3", 0, G_LPTIM3),
1925 	PCLK(LPTIM4, "lptim4", "pclk3", 0, G_LPTIM4),
1926 	PCLK(LPTIM5, "lptim5", "pclk3", 0, G_LPTIM5),
1927 	PCLK(SAI4, "sai4", "pclk3", 0, G_SAI4),
1928 	PCLK(SYSCFG, "syscfg", "pclk3", 0, G_SYSCFG),
1929 	PCLK(VREF, "vref", "pclk3", 13, G_VREF),
1930 	PCLK(TMPSENS, "tmpsens", "pclk3", 0, G_TMPSENS),
1931 	PCLK(PMBCTRL, "pmbctrl", "pclk3", 0, G_PMBCTRL),
1932 	PCLK(HDP, "hdp", "pclk3", 0, G_HDP),
1933 	PCLK(LTDC, "ltdc", "pclk4", 0, G_LTDC),
1934 	PCLK(DSI, "dsi", "pclk4", 0, G_DSI),
1935 	PCLK(IWDG2, "iwdg2", "pclk4", 0, G_IWDG2),
1936 	PCLK(USBPHY, "usbphy", "pclk4", 0, G_USBPHY),
1937 	PCLK(STGENRO, "stgenro", "pclk4", 0, G_STGENRO),
1938 	PCLK(SPI6, "spi6", "pclk5", 0, G_SPI6),
1939 	PCLK(I2C4, "i2c4", "pclk5", 0, G_I2C4),
1940 	PCLK(I2C6, "i2c6", "pclk5", 0, G_I2C6),
1941 	PCLK(USART1, "usart1", "pclk5", 0, G_USART1),
1942 	PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED |
1943 	     CLK_IS_CRITICAL, G_RTCAPB),
1944 	PCLK(TZC1, "tzc1", "ck_axi", CLK_IGNORE_UNUSED, G_TZC1),
1945 	PCLK(TZC2, "tzc2", "ck_axi", CLK_IGNORE_UNUSED, G_TZC2),
1946 	PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC),
1947 	PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1),
1948 	PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC),
1949 	PCLK(STGEN, "stgen", "pclk5", CLK_IGNORE_UNUSED, G_STGEN),
1950 	PCLK(DMA1, "dma1", "ck_mcu", 0, G_DMA1),
1951 	PCLK(DMA2, "dma2", "ck_mcu",  0, G_DMA2),
1952 	PCLK(DMAMUX, "dmamux", "ck_mcu", 0, G_DMAMUX),
1953 	PCLK(ADC12, "adc12", "ck_mcu", 0, G_ADC12),
1954 	PCLK(USBO, "usbo", "ck_mcu", 0, G_USBO),
1955 	PCLK(SDMMC3, "sdmmc3", "ck_mcu", 0, G_SDMMC3),
1956 	PCLK(DCMI, "dcmi", "ck_mcu", 0, G_DCMI),
1957 	PCLK(CRYP2, "cryp2", "ck_mcu", 0, G_CRYP2),
1958 	PCLK(HASH2, "hash2", "ck_mcu", 0, G_HASH2),
1959 	PCLK(RNG2, "rng2", "ck_mcu", 0, G_RNG2),
1960 	PCLK(CRC2, "crc2", "ck_mcu", 0, G_CRC2),
1961 	PCLK(HSEM, "hsem", "ck_mcu", 0, G_HSEM),
1962 	PCLK(IPCC, "ipcc", "ck_mcu", 0, G_IPCC),
1963 	PCLK(GPIOA, "gpioa", "ck_mcu", 0, G_GPIOA),
1964 	PCLK(GPIOB, "gpiob", "ck_mcu", 0, G_GPIOB),
1965 	PCLK(GPIOC, "gpioc", "ck_mcu", 0, G_GPIOC),
1966 	PCLK(GPIOD, "gpiod", "ck_mcu", 0, G_GPIOD),
1967 	PCLK(GPIOE, "gpioe", "ck_mcu", 0, G_GPIOE),
1968 	PCLK(GPIOF, "gpiof", "ck_mcu", 0, G_GPIOF),
1969 	PCLK(GPIOG, "gpiog", "ck_mcu", 0, G_GPIOG),
1970 	PCLK(GPIOH, "gpioh", "ck_mcu", 0, G_GPIOH),
1971 	PCLK(GPIOI, "gpioi", "ck_mcu", 0, G_GPIOI),
1972 	PCLK(GPIOJ, "gpioj", "ck_mcu", 0, G_GPIOJ),
1973 	PCLK(GPIOK, "gpiok", "ck_mcu", 0, G_GPIOK),
1974 	PCLK(GPIOZ, "gpioz", "ck_axi", CLK_IGNORE_UNUSED, G_GPIOZ),
1975 	PCLK(CRYP1, "cryp1", "ck_axi", CLK_IGNORE_UNUSED, G_CRYP1),
1976 	PCLK(HASH1, "hash1", "ck_axi", CLK_IGNORE_UNUSED, G_HASH1),
1977 	PCLK(RNG1, "rng1", "ck_axi", 0, G_RNG1),
1978 	PCLK(BKPSRAM, "bkpsram", "ck_axi", CLK_IGNORE_UNUSED, G_BKPSRAM),
1979 	PCLK(MDMA, "mdma", "ck_axi", 0, G_MDMA),
1980 	PCLK(GPU, "gpu", "ck_axi", 0, G_GPU),
1981 	PCLK(ETHTX, "ethtx", "ck_axi", 0, G_ETHTX),
1982 	PCLK_PDATA(ETHRX, "ethrx", ethrx_src, 0, G_ETHRX),
1983 	PCLK(ETHMAC, "ethmac", "ck_axi", 0, G_ETHMAC),
1984 	PCLK(FMC, "fmc", "ck_axi", CLK_IGNORE_UNUSED, G_FMC),
1985 	PCLK(QSPI, "qspi", "ck_axi", CLK_IGNORE_UNUSED, G_QSPI),
1986 	PCLK(SDMMC1, "sdmmc1", "ck_axi", 0, G_SDMMC1),
1987 	PCLK(SDMMC2, "sdmmc2", "ck_axi", 0, G_SDMMC2),
1988 	PCLK(CRC1, "crc1", "ck_axi", 0, G_CRC1),
1989 	PCLK(USBH, "usbh", "ck_axi", 0, G_USBH),
1990 	PCLK(ETHSTP, "ethstp", "ck_axi", 0, G_ETHSTP),
1991 	PCLK(DDRPERFM, "ddrperfm", "pclk4", 0, G_DDRPERFM),
1992 
1993 	/* Kernel clocks */
1994 	KCLK(SDMMC1_K, "sdmmc1_k", sdmmc12_src, 0, G_SDMMC1, M_SDMMC12),
1995 	KCLK(SDMMC2_K, "sdmmc2_k", sdmmc12_src, 0, G_SDMMC2, M_SDMMC12),
1996 	KCLK(SDMMC3_K, "sdmmc3_k", sdmmc3_src, 0, G_SDMMC3, M_SDMMC3),
1997 	KCLK(FMC_K, "fmc_k", fmc_src, 0, G_FMC, M_FMC),
1998 	KCLK(QSPI_K, "qspi_k", qspi_src, 0, G_QSPI, M_QSPI),
1999 	KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
2000 	KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
2001 	KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
2002 	KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
2003 	KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
2004 	KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
2005 	KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
2006 	KCLK(SPI3_K, "spi3_k", spi123_src, 0, G_SPI3, M_SPI23),
2007 	KCLK(SPI4_K, "spi4_k", spi45_src, 0, G_SPI4, M_SPI45),
2008 	KCLK(SPI5_K, "spi5_k", spi45_src, 0, G_SPI5, M_SPI45),
2009 	KCLK(SPI6_K, "spi6_k", spi6_src, 0, G_SPI6, M_SPI6),
2010 	KCLK(CEC_K, "cec_k", cec_src, 0, G_CEC, M_CEC),
2011 	KCLK(I2C1_K, "i2c1_k", i2c12_src, 0, G_I2C1, M_I2C12),
2012 	KCLK(I2C2_K, "i2c2_k", i2c12_src, 0, G_I2C2, M_I2C12),
2013 	KCLK(I2C3_K, "i2c3_k", i2c35_src, 0, G_I2C3, M_I2C35),
2014 	KCLK(I2C5_K, "i2c5_k", i2c35_src, 0, G_I2C5, M_I2C35),
2015 	KCLK(I2C4_K, "i2c4_k", i2c46_src, 0, G_I2C4, M_I2C46),
2016 	KCLK(I2C6_K, "i2c6_k", i2c46_src, 0, G_I2C6, M_I2C46),
2017 	KCLK(LPTIM1_K, "lptim1_k", lptim1_src, 0, G_LPTIM1, M_LPTIM1),
2018 	KCLK(LPTIM2_K, "lptim2_k", lptim23_src, 0, G_LPTIM2, M_LPTIM23),
2019 	KCLK(LPTIM3_K, "lptim3_k", lptim23_src, 0, G_LPTIM3, M_LPTIM23),
2020 	KCLK(LPTIM4_K, "lptim4_k", lptim45_src, 0, G_LPTIM4, M_LPTIM45),
2021 	KCLK(LPTIM5_K, "lptim5_k", lptim45_src, 0, G_LPTIM5, M_LPTIM45),
2022 	KCLK(USART1_K, "usart1_k", usart1_src, 0, G_USART1, M_USART1),
2023 	KCLK(USART2_K, "usart2_k", usart234578_src, 0, G_USART2, M_UART24),
2024 	KCLK(USART3_K, "usart3_k", usart234578_src, 0, G_USART3, M_UART35),
2025 	KCLK(UART4_K, "uart4_k", usart234578_src, 0, G_UART4, M_UART24),
2026 	KCLK(UART5_K, "uart5_k", usart234578_src, 0, G_UART5, M_UART35),
2027 	KCLK(USART6_K, "uart6_k", usart6_src, 0, G_USART6, M_USART6),
2028 	KCLK(UART7_K, "uart7_k", usart234578_src, 0, G_UART7, M_UART78),
2029 	KCLK(UART8_K, "uart8_k", usart234578_src, 0, G_UART8, M_UART78),
2030 	KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN),
2031 	KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1),
2032 	KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2),
2033 	KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI3, M_SAI3),
2034 	KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI4, M_SAI4),
2035 	KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12),
2036 	KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI),
2037 	KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
2038 	KCLK(USBO_K, "usbo_k", usbo_src, 0, G_USBO, M_USBO),
2039 
2040 	/* Particulary Kernel Clocks (no mux or no gate) */
2041 	MGATE_MP1(DFSDM_K, "dfsdm_k", "ck_mcu", 0, G_DFSDM),
2042 	MGATE_MP1(DSI_PX, "dsi_px", "pll4_q", CLK_SET_RATE_PARENT, G_DSI),
2043 	MGATE_MP1(LTDC_PX, "ltdc_px", "pll4_q", CLK_SET_RATE_PARENT, G_LTDC),
2044 	MGATE_MP1(GPU_K, "gpu_k", "pll2_q", 0, G_GPU),
2045 	MGATE_MP1(DAC12_K, "dac12_k", "ck_lsi", 0, G_DAC12),
2046 
2047 	COMPOSITE(NO_ID, "ck_ker_eth", eth_src, CLK_OPS_PARENT_ENABLE |
2048 		  CLK_SET_RATE_NO_REPARENT,
2049 		  _NO_GATE,
2050 		  _MMUX(M_ETHCK),
2051 		  _NO_DIV),
2052 
2053 	MGATE_MP1(ETHCK_K, "ethck_k", "ck_ker_eth", 0, G_ETHCK),
2054 
2055 	DIV(ETHPTP_K, "ethptp_k", "ck_ker_eth", CLK_OPS_PARENT_ENABLE |
2056 	    CLK_SET_RATE_NO_REPARENT, RCC_ETHCKSELR, 4, 4, 0),
2057 
2058 	/* RTC clock */
2059 	COMPOSITE(RTC, "ck_rtc", rtc_src, CLK_OPS_PARENT_ENABLE,
2060 		  _GATE(RCC_BDCR, 20, 0),
2061 		  _MUX(RCC_BDCR, 16, 2, 0),
2062 		  _DIV_RTC(RCC_RTCDIVR, 0, 6, 0, NULL)),
2063 
2064 	/* MCO clocks */
2065 	COMPOSITE(CK_MCO1, "ck_mco1", mco1_src, CLK_OPS_PARENT_ENABLE |
2066 		  CLK_SET_RATE_NO_REPARENT,
2067 		  _GATE(RCC_MCO1CFGR, 12, 0),
2068 		  _MUX(RCC_MCO1CFGR, 0, 3, 0),
2069 		  _DIV(RCC_MCO1CFGR, 4, 4, 0, NULL)),
2070 
2071 	COMPOSITE(CK_MCO2, "ck_mco2", mco2_src, CLK_OPS_PARENT_ENABLE |
2072 		  CLK_SET_RATE_NO_REPARENT,
2073 		  _GATE(RCC_MCO2CFGR, 12, 0),
2074 		  _MUX(RCC_MCO2CFGR, 0, 3, 0),
2075 		  _DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)),
2076 
2077 	/* Debug clocks */
2078 	GATE(CK_DBG, "ck_sys_dbg", "ck_axi", CLK_IGNORE_UNUSED,
2079 	     RCC_DBGCFGR, 8, 0),
2080 
2081 	COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE,
2082 		  _GATE(RCC_DBGCFGR, 9, 0),
2083 		  _NO_MUX,
2084 		  _DIV(RCC_DBGCFGR, 0, 3, 0, ck_trace_div_table)),
2085 };
2086 
2087 static const u32 stm32mp1_clock_secured[] = {
2088 	CK_HSE,
2089 	CK_HSI,
2090 	CK_CSI,
2091 	CK_LSI,
2092 	CK_LSE,
2093 	PLL1,
2094 	PLL2,
2095 	PLL1_P,
2096 	PLL2_P,
2097 	PLL2_Q,
2098 	PLL2_R,
2099 	CK_MPU,
2100 	CK_AXI,
2101 	SPI6,
2102 	I2C4,
2103 	I2C6,
2104 	USART1,
2105 	RTCAPB,
2106 	TZC1,
2107 	TZC2,
2108 	TZPC,
2109 	IWDG1,
2110 	BSEC,
2111 	STGEN,
2112 	GPIOZ,
2113 	CRYP1,
2114 	HASH1,
2115 	RNG1,
2116 	BKPSRAM,
2117 	RNG1_K,
2118 	STGEN_K,
2119 	SPI6_K,
2120 	I2C4_K,
2121 	I2C6_K,
2122 	USART1_K,
2123 	RTC,
2124 };
2125 
stm32_check_security(const struct clock_config * cfg)2126 static bool stm32_check_security(const struct clock_config *cfg)
2127 {
2128 	int i;
2129 
2130 	for (i = 0; i < ARRAY_SIZE(stm32mp1_clock_secured); i++)
2131 		if (cfg->id == stm32mp1_clock_secured[i])
2132 			return true;
2133 	return false;
2134 }
2135 
2136 struct stm32_rcc_match_data {
2137 	const struct clock_config *cfg;
2138 	unsigned int num;
2139 	unsigned int maxbinding;
2140 	u32 clear_offset;
2141 	bool (*check_security)(const struct clock_config *cfg);
2142 };
2143 
2144 static struct stm32_rcc_match_data stm32mp1_data = {
2145 	.cfg		= stm32mp1_clock_cfg,
2146 	.num		= ARRAY_SIZE(stm32mp1_clock_cfg),
2147 	.maxbinding	= STM32MP1_LAST_CLK,
2148 	.clear_offset	= RCC_CLR,
2149 };
2150 
2151 static struct stm32_rcc_match_data stm32mp1_data_secure = {
2152 	.cfg		= stm32mp1_clock_cfg,
2153 	.num		= ARRAY_SIZE(stm32mp1_clock_cfg),
2154 	.maxbinding	= STM32MP1_LAST_CLK,
2155 	.clear_offset	= RCC_CLR,
2156 	.check_security = &stm32_check_security
2157 };
2158 
2159 static const struct of_device_id stm32mp1_match_data[] = {
2160 	{
2161 		.compatible = "st,stm32mp1-rcc",
2162 		.data = &stm32mp1_data,
2163 	},
2164 	{
2165 		.compatible = "st,stm32mp1-rcc-secure",
2166 		.data = &stm32mp1_data_secure,
2167 	},
2168 	{ }
2169 };
2170 MODULE_DEVICE_TABLE(of, stm32mp1_match_data);
2171 
stm32_register_hw_clk(struct device * dev,struct clk_hw_onecell_data * clk_data,void __iomem * base,spinlock_t * lock,const struct clock_config * cfg)2172 static int stm32_register_hw_clk(struct device *dev,
2173 				 struct clk_hw_onecell_data *clk_data,
2174 				 void __iomem *base, spinlock_t *lock,
2175 				 const struct clock_config *cfg)
2176 {
2177 	struct clk_hw **hws;
2178 	struct clk_hw *hw = ERR_PTR(-ENOENT);
2179 
2180 	hws = clk_data->hws;
2181 
2182 	if (cfg->func)
2183 		hw = (*cfg->func)(dev, clk_data, base, lock, cfg);
2184 
2185 	if (IS_ERR(hw)) {
2186 		pr_err("Unable to register %s\n", cfg->name);
2187 		return  PTR_ERR(hw);
2188 	}
2189 
2190 	if (cfg->id != NO_ID)
2191 		hws[cfg->id] = hw;
2192 
2193 	return 0;
2194 }
2195 
2196 #define STM32_RESET_ID_MASK GENMASK(15, 0)
2197 
2198 struct stm32_reset_data {
2199 	/* reset lock */
2200 	spinlock_t			lock;
2201 	struct reset_controller_dev	rcdev;
2202 	void __iomem			*membase;
2203 	u32				clear_offset;
2204 };
2205 
2206 static inline struct stm32_reset_data *
to_stm32_reset_data(struct reset_controller_dev * rcdev)2207 to_stm32_reset_data(struct reset_controller_dev *rcdev)
2208 {
2209 	return container_of(rcdev, struct stm32_reset_data, rcdev);
2210 }
2211 
stm32_reset_update(struct reset_controller_dev * rcdev,unsigned long id,bool assert)2212 static int stm32_reset_update(struct reset_controller_dev *rcdev,
2213 			      unsigned long id, bool assert)
2214 {
2215 	struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
2216 	int reg_width = sizeof(u32);
2217 	int bank = id / (reg_width * BITS_PER_BYTE);
2218 	int offset = id % (reg_width * BITS_PER_BYTE);
2219 
2220 	if (data->clear_offset) {
2221 		void __iomem *addr;
2222 
2223 		addr = data->membase + (bank * reg_width);
2224 		if (!assert)
2225 			addr += data->clear_offset;
2226 
2227 		writel(BIT(offset), addr);
2228 
2229 	} else {
2230 		unsigned long flags;
2231 		u32 reg;
2232 
2233 		spin_lock_irqsave(&data->lock, flags);
2234 
2235 		reg = readl(data->membase + (bank * reg_width));
2236 
2237 		if (assert)
2238 			reg |= BIT(offset);
2239 		else
2240 			reg &= ~BIT(offset);
2241 
2242 		writel(reg, data->membase + (bank * reg_width));
2243 
2244 		spin_unlock_irqrestore(&data->lock, flags);
2245 	}
2246 
2247 	return 0;
2248 }
2249 
stm32_reset_assert(struct reset_controller_dev * rcdev,unsigned long id)2250 static int stm32_reset_assert(struct reset_controller_dev *rcdev,
2251 			      unsigned long id)
2252 {
2253 	return stm32_reset_update(rcdev, id, true);
2254 }
2255 
stm32_reset_deassert(struct reset_controller_dev * rcdev,unsigned long id)2256 static int stm32_reset_deassert(struct reset_controller_dev *rcdev,
2257 				unsigned long id)
2258 {
2259 	return stm32_reset_update(rcdev, id, false);
2260 }
2261 
stm32_reset_status(struct reset_controller_dev * rcdev,unsigned long id)2262 static int stm32_reset_status(struct reset_controller_dev *rcdev,
2263 			      unsigned long id)
2264 {
2265 	struct stm32_reset_data *data = to_stm32_reset_data(rcdev);
2266 	int reg_width = sizeof(u32);
2267 	int bank = id / (reg_width * BITS_PER_BYTE);
2268 	int offset = id % (reg_width * BITS_PER_BYTE);
2269 	u32 reg;
2270 
2271 	reg = readl(data->membase + (bank * reg_width));
2272 
2273 	return !!(reg & BIT(offset));
2274 }
2275 
2276 static const struct reset_control_ops stm32_reset_ops = {
2277 	.assert		= stm32_reset_assert,
2278 	.deassert	= stm32_reset_deassert,
2279 	.status		= stm32_reset_status,
2280 };
2281 
stm32_rcc_reset_init(struct device * dev,void __iomem * base,const struct of_device_id * match)2282 static int stm32_rcc_reset_init(struct device *dev, void __iomem *base,
2283 				const struct of_device_id *match)
2284 {
2285 	const struct stm32_rcc_match_data *data = match->data;
2286 	struct stm32_reset_data *reset_data = NULL;
2287 
2288 	reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
2289 	if (!reset_data)
2290 		return -ENOMEM;
2291 
2292 	spin_lock_init(&reset_data->lock);
2293 	reset_data->membase = base;
2294 	reset_data->rcdev.owner = THIS_MODULE;
2295 	reset_data->rcdev.ops = &stm32_reset_ops;
2296 	reset_data->rcdev.of_node = dev_of_node(dev);
2297 	reset_data->rcdev.nr_resets = STM32_RESET_ID_MASK;
2298 	reset_data->clear_offset = data->clear_offset;
2299 
2300 	return reset_controller_register(&reset_data->rcdev);
2301 }
2302 
stm32_rcc_clock_init(struct device * dev,void __iomem * base,const struct of_device_id * match)2303 static int stm32_rcc_clock_init(struct device *dev, void __iomem *base,
2304 				const struct of_device_id *match)
2305 {
2306 	const struct stm32_rcc_match_data *data = match->data;
2307 	struct clk_hw_onecell_data *clk_data;
2308 	struct clk_hw **hws;
2309 	int err, n, max_binding;
2310 
2311 	max_binding =  data->maxbinding;
2312 
2313 	clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, max_binding),
2314 				GFP_KERNEL);
2315 	if (!clk_data)
2316 		return -ENOMEM;
2317 
2318 	clk_data->num = max_binding;
2319 
2320 	hws = clk_data->hws;
2321 
2322 	for (n = 0; n < max_binding; n++)
2323 		hws[n] = ERR_PTR(-ENOENT);
2324 
2325 	for (n = 0; n < data->num; n++) {
2326 		if (data->check_security && data->check_security(&data->cfg[n]))
2327 			continue;
2328 
2329 		err = stm32_register_hw_clk(dev, clk_data, base, &rlock,
2330 					    &data->cfg[n]);
2331 		if (err) {
2332 			dev_err(dev, "Can't register clk %s: %d\n",
2333 				data->cfg[n].name, err);
2334 
2335 			return err;
2336 		}
2337 	}
2338 
2339 	return of_clk_add_hw_provider(dev_of_node(dev), of_clk_hw_onecell_get, clk_data);
2340 }
2341 
stm32_rcc_init(struct device * dev,void __iomem * base,const struct of_device_id * match_data)2342 static int stm32_rcc_init(struct device *dev, void __iomem *base,
2343 			  const struct of_device_id *match_data)
2344 {
2345 	const struct of_device_id *match;
2346 	int err;
2347 
2348 	match = of_match_node(match_data, dev_of_node(dev));
2349 	if (!match) {
2350 		dev_err(dev, "match data not found\n");
2351 		return -ENODEV;
2352 	}
2353 
2354 	/* RCC Reset Configuration */
2355 	err = stm32_rcc_reset_init(dev, base, match);
2356 	if (err) {
2357 		pr_err("stm32mp1 reset failed to initialize\n");
2358 		return err;
2359 	}
2360 
2361 	/* RCC Clock Configuration */
2362 	err = stm32_rcc_clock_init(dev, base, match);
2363 	if (err) {
2364 		pr_err("stm32mp1 clock failed to initialize\n");
2365 		return err;
2366 	}
2367 
2368 	return 0;
2369 }
2370 
stm32mp1_rcc_init(struct device * dev)2371 static int stm32mp1_rcc_init(struct device *dev)
2372 {
2373 	void __iomem *base;
2374 	int ret;
2375 
2376 	base = of_iomap(dev_of_node(dev), 0);
2377 	if (!base) {
2378 		pr_err("%pOFn: unable to map resource", dev_of_node(dev));
2379 		ret = -ENOMEM;
2380 		goto out;
2381 	}
2382 
2383 	ret = stm32_rcc_init(dev, base, stm32mp1_match_data);
2384 
2385 out:
2386 	if (ret) {
2387 		if (base)
2388 			iounmap(base);
2389 
2390 		of_node_put(dev_of_node(dev));
2391 	}
2392 
2393 	return ret;
2394 }
2395 
get_clock_deps(struct device * dev)2396 static int get_clock_deps(struct device *dev)
2397 {
2398 	static const char * const clock_deps_name[] = {
2399 		"hsi", "hse", "csi", "lsi", "lse",
2400 	};
2401 	size_t deps_size = sizeof(struct clk *) * ARRAY_SIZE(clock_deps_name);
2402 	struct clk **clk_deps;
2403 	int i;
2404 
2405 	clk_deps = devm_kzalloc(dev, deps_size, GFP_KERNEL);
2406 	if (!clk_deps)
2407 		return -ENOMEM;
2408 
2409 	for (i = 0; i < ARRAY_SIZE(clock_deps_name); i++) {
2410 		struct clk *clk = of_clk_get_by_name(dev_of_node(dev),
2411 						     clock_deps_name[i]);
2412 
2413 		if (IS_ERR(clk)) {
2414 			if (PTR_ERR(clk) != -EINVAL && PTR_ERR(clk) != -ENOENT)
2415 				return PTR_ERR(clk);
2416 		} else {
2417 			/* Device gets a reference count on the clock */
2418 			clk_deps[i] = devm_clk_get(dev, __clk_get_name(clk));
2419 			clk_put(clk);
2420 		}
2421 	}
2422 
2423 	return 0;
2424 }
2425 
stm32mp1_rcc_clocks_probe(struct platform_device * pdev)2426 static int stm32mp1_rcc_clocks_probe(struct platform_device *pdev)
2427 {
2428 	struct device *dev = &pdev->dev;
2429 	int ret = get_clock_deps(dev);
2430 
2431 	if (!ret)
2432 		ret = stm32mp1_rcc_init(dev);
2433 
2434 	return ret;
2435 }
2436 
stm32mp1_rcc_clocks_remove(struct platform_device * pdev)2437 static void stm32mp1_rcc_clocks_remove(struct platform_device *pdev)
2438 {
2439 	struct device *dev = &pdev->dev;
2440 	struct device_node *child, *np = dev_of_node(dev);
2441 
2442 	for_each_available_child_of_node(np, child)
2443 		of_clk_del_provider(child);
2444 }
2445 
2446 static struct platform_driver stm32mp1_rcc_clocks_driver = {
2447 	.driver	= {
2448 		.name = "stm32mp1_rcc",
2449 		.of_match_table = stm32mp1_match_data,
2450 	},
2451 	.probe = stm32mp1_rcc_clocks_probe,
2452 	.remove_new = stm32mp1_rcc_clocks_remove,
2453 };
2454 
stm32mp1_clocks_init(void)2455 static int __init stm32mp1_clocks_init(void)
2456 {
2457 	return platform_driver_register(&stm32mp1_rcc_clocks_driver);
2458 }
2459 core_initcall(stm32mp1_clocks_init);
2460