xref: /openbmc/linux/drivers/clk/clk-stm32h7.c (revision 8cb5d748)
1 /*
2  * Copyright (C) Gabriel Fernandez 2017
3  * Author: Gabriel Fernandez <gabriel.fernandez@st.com>
4  *
5  * License terms: GPL V2.0.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/clk.h>
21 #include <linux/clk-provider.h>
22 #include <linux/err.h>
23 #include <linux/io.h>
24 #include <linux/mfd/syscon.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/regmap.h>
30 
31 #include <dt-bindings/clock/stm32h7-clks.h>
32 
33 /* Reset Clock Control Registers */
34 #define RCC_CR		0x00
35 #define RCC_CFGR	0x10
36 #define RCC_D1CFGR	0x18
37 #define RCC_D2CFGR	0x1C
38 #define RCC_D3CFGR	0x20
39 #define RCC_PLLCKSELR	0x28
40 #define RCC_PLLCFGR	0x2C
41 #define RCC_PLL1DIVR	0x30
42 #define RCC_PLL1FRACR	0x34
43 #define RCC_PLL2DIVR	0x38
44 #define RCC_PLL2FRACR	0x3C
45 #define RCC_PLL3DIVR	0x40
46 #define RCC_PLL3FRACR	0x44
47 #define RCC_D1CCIPR	0x4C
48 #define RCC_D2CCIP1R	0x50
49 #define RCC_D2CCIP2R	0x54
50 #define RCC_D3CCIPR	0x58
51 #define RCC_BDCR	0x70
52 #define RCC_CSR		0x74
53 #define RCC_AHB3ENR	0xD4
54 #define RCC_AHB1ENR	0xD8
55 #define RCC_AHB2ENR	0xDC
56 #define RCC_AHB4ENR	0xE0
57 #define RCC_APB3ENR	0xE4
58 #define RCC_APB1LENR	0xE8
59 #define RCC_APB1HENR	0xEC
60 #define RCC_APB2ENR	0xF0
61 #define RCC_APB4ENR	0xF4
62 
63 static DEFINE_SPINLOCK(stm32rcc_lock);
64 
65 static void __iomem *base;
66 static struct clk_hw **hws;
67 
68 /* System clock parent */
69 static const char * const sys_src[] = {
70 	"hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
71 
72 static const char * const tracein_src[] = {
73 	"hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
74 
75 static const char * const per_src[] = {
76 	"hsi_ker", "csi_ker", "hse_ck", "disabled" };
77 
78 static const char * const pll_src[] = {
79 	"hsi_ck", "csi_ck", "hse_ck", "no clock" };
80 
81 static const char * const sdmmc_src[] = { "pll1_q", "pll2_r" };
82 
83 static const char * const dsi_src[] = { "ck_dsi_phy", "pll2_q" };
84 
85 static const char * const qspi_src[] = {
86 	"hclk", "pll1_q", "pll2_r", "per_ck" };
87 
88 static const char * const fmc_src[] = {
89 	"hclk", "pll1_q", "pll2_r", "per_ck" };
90 
91 /* Kernel clock parent */
92 static const char * const swp_src[] = {	"pclk1", "hsi_ker" };
93 
94 static const char * const fdcan_src[] = { "hse_ck", "pll1_q", "pll2_q" };
95 
96 static const char * const dfsdm1_src[] = { "pclk2", "sys_ck" };
97 
98 static const char * const spdifrx_src[] = {
99 	"pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
100 
101 static const char *spi_src1[5] = {
102 	"pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
103 
104 static const char * const spi_src2[] = {
105 	"pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
106 
107 static const char * const spi_src3[] = {
108 	"pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
109 
110 static const char * const lptim_src1[] = {
111 	"pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
112 
113 static const char * const lptim_src2[] = {
114 	"pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
115 
116 static const char * const cec_src[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
117 
118 static const char * const usbotg_src[] = {"pll1_q", "pll3_q", "rc48_ck" };
119 
120 /* i2c 1,2,3 src */
121 static const char * const i2c_src1[] = {
122 	"pclk1", "pll3_r", "hsi_ker", "csi_ker" };
123 
124 static const char * const i2c_src2[] = {
125 	"pclk4", "pll3_r", "hsi_ker", "csi_ker" };
126 
127 static const char * const rng_src[] = {
128 	"rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
129 
130 /* usart 1,6 src */
131 static const char * const usart_src1[] = {
132 	"pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
133 
134 /* usart 2,3,4,5,7,8 src */
135 static const char * const usart_src2[] = {
136 	"pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
137 
138 static const char *sai_src[5] = {
139 	"pll1_q", "pll2_p", "pll3_p", NULL, "per_ck" };
140 
141 static const char * const adc_src[] = { "pll2_p", "pll3_r", "per_ck" };
142 
143 /* lptim 2,3,4,5 src */
144 static const char * const lpuart1_src[] = {
145 	"pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
146 
147 static const char * const hrtim_src[] = { "tim2_ker", "d1cpre" };
148 
149 /* RTC clock parent */
150 static const char * const rtc_src[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
151 
152 /* Micro-controller output clock parent */
153 static const char * const mco_src1[] = {
154 	"hsi_ck", "lse_ck", "hse_ck", "pll1_q",	"rc48_ck" };
155 
156 static const char * const mco_src2[] = {
157 	"sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
158 
159 /* LCD clock */
160 static const char * const ltdc_src[] = {"pll3_r"};
161 
162 /* Gate clock with ready bit and backup domain management */
163 struct stm32_ready_gate {
164 	struct	clk_gate gate;
165 	u8	bit_rdy;
166 };
167 
168 #define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
169 		gate)
170 
171 #define RGATE_TIMEOUT 10000
172 
173 static int ready_gate_clk_enable(struct clk_hw *hw)
174 {
175 	struct clk_gate *gate = to_clk_gate(hw);
176 	struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
177 	int bit_status;
178 	unsigned int timeout = RGATE_TIMEOUT;
179 
180 	if (clk_gate_ops.is_enabled(hw))
181 		return 0;
182 
183 	clk_gate_ops.enable(hw);
184 
185 	/* We can't use readl_poll_timeout() because we can blocked if
186 	 * someone enables this clock before clocksource changes.
187 	 * Only jiffies counter is available. Jiffies are incremented by
188 	 * interruptions and enable op does not allow to be interrupted.
189 	 */
190 	do {
191 		bit_status = !(readl(gate->reg) & BIT(rgate->bit_rdy));
192 
193 		if (bit_status)
194 			udelay(100);
195 
196 	} while (bit_status && --timeout);
197 
198 	return bit_status;
199 }
200 
201 static void ready_gate_clk_disable(struct clk_hw *hw)
202 {
203 	struct clk_gate *gate = to_clk_gate(hw);
204 	struct stm32_ready_gate *rgate = to_ready_gate_clk(gate);
205 	int bit_status;
206 	unsigned int timeout = RGATE_TIMEOUT;
207 
208 	if (!clk_gate_ops.is_enabled(hw))
209 		return;
210 
211 	clk_gate_ops.disable(hw);
212 
213 	do {
214 		bit_status = !!(readl(gate->reg) & BIT(rgate->bit_rdy));
215 
216 		if (bit_status)
217 			udelay(100);
218 
219 	} while (bit_status && --timeout);
220 }
221 
222 static const struct clk_ops ready_gate_clk_ops = {
223 	.enable		= ready_gate_clk_enable,
224 	.disable	= ready_gate_clk_disable,
225 	.is_enabled	= clk_gate_is_enabled,
226 };
227 
228 static struct clk_hw *clk_register_ready_gate(struct device *dev,
229 		const char *name, const char *parent_name,
230 		void __iomem *reg, u8 bit_idx, u8 bit_rdy,
231 		unsigned long flags, spinlock_t *lock)
232 {
233 	struct stm32_ready_gate *rgate;
234 	struct clk_init_data init = { NULL };
235 	struct clk_hw *hw;
236 	int ret;
237 
238 	rgate = kzalloc(sizeof(*rgate), GFP_KERNEL);
239 	if (!rgate)
240 		return ERR_PTR(-ENOMEM);
241 
242 	init.name = name;
243 	init.ops = &ready_gate_clk_ops;
244 	init.flags = flags;
245 	init.parent_names = &parent_name;
246 	init.num_parents = 1;
247 
248 	rgate->bit_rdy = bit_rdy;
249 	rgate->gate.lock = lock;
250 	rgate->gate.reg = reg;
251 	rgate->gate.bit_idx = bit_idx;
252 	rgate->gate.hw.init = &init;
253 
254 	hw = &rgate->gate.hw;
255 	ret = clk_hw_register(dev, hw);
256 	if (ret) {
257 		kfree(rgate);
258 		hw = ERR_PTR(ret);
259 	}
260 
261 	return hw;
262 }
263 
264 struct gate_cfg {
265 	u32 offset;
266 	u8  bit_idx;
267 };
268 
269 struct muxdiv_cfg {
270 	u32 offset;
271 	u8 shift;
272 	u8 width;
273 };
274 
275 struct composite_clk_cfg {
276 	struct gate_cfg *gate;
277 	struct muxdiv_cfg *mux;
278 	struct muxdiv_cfg *div;
279 	const char *name;
280 	const char * const *parent_name;
281 	int num_parents;
282 	u32 flags;
283 };
284 
285 struct composite_clk_gcfg_t {
286 	u8 flags;
287 	const struct clk_ops *ops;
288 };
289 
290 /*
291  * General config definition of a composite clock (only clock diviser for rate)
292  */
293 struct composite_clk_gcfg {
294 	struct composite_clk_gcfg_t *mux;
295 	struct composite_clk_gcfg_t *div;
296 	struct composite_clk_gcfg_t *gate;
297 };
298 
299 #define M_CFG_MUX(_mux_ops, _mux_flags)\
300 	.mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
301 
302 #define M_CFG_DIV(_rate_ops, _rate_flags)\
303 	.div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
304 
305 #define M_CFG_GATE(_gate_ops, _gate_flags)\
306 	.gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
307 
308 static struct clk_mux *_get_cmux(void __iomem *reg, u8 shift, u8 width,
309 		u32 flags, spinlock_t *lock)
310 {
311 	struct clk_mux *mux;
312 
313 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
314 	if (!mux)
315 		return ERR_PTR(-ENOMEM);
316 
317 	mux->reg	= reg;
318 	mux->shift	= shift;
319 	mux->mask	= (1 << width) - 1;
320 	mux->flags	= flags;
321 	mux->lock	= lock;
322 
323 	return mux;
324 }
325 
326 static struct clk_divider *_get_cdiv(void __iomem *reg, u8 shift, u8 width,
327 		u32 flags, spinlock_t *lock)
328 {
329 	struct clk_divider *div;
330 
331 	div = kzalloc(sizeof(*div), GFP_KERNEL);
332 
333 	if (!div)
334 		return ERR_PTR(-ENOMEM);
335 
336 	div->reg   = reg;
337 	div->shift = shift;
338 	div->width = width;
339 	div->flags = flags;
340 	div->lock  = lock;
341 
342 	return div;
343 }
344 
345 static struct clk_gate *_get_cgate(void __iomem *reg, u8 bit_idx, u32 flags,
346 		spinlock_t *lock)
347 {
348 	struct clk_gate *gate;
349 
350 	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
351 	if (!gate)
352 		return ERR_PTR(-ENOMEM);
353 
354 	gate->reg	= reg;
355 	gate->bit_idx	= bit_idx;
356 	gate->flags	= flags;
357 	gate->lock	= lock;
358 
359 	return gate;
360 }
361 
362 struct composite_cfg {
363 	struct clk_hw *mux_hw;
364 	struct clk_hw *div_hw;
365 	struct clk_hw *gate_hw;
366 
367 	const struct clk_ops *mux_ops;
368 	const struct clk_ops *div_ops;
369 	const struct clk_ops *gate_ops;
370 };
371 
372 static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
373 		const struct composite_clk_cfg *cfg,
374 		struct composite_cfg *composite, spinlock_t *lock)
375 {
376 	struct clk_mux     *mux = NULL;
377 	struct clk_divider *div = NULL;
378 	struct clk_gate    *gate = NULL;
379 	const struct clk_ops *mux_ops, *div_ops, *gate_ops;
380 	struct clk_hw *mux_hw;
381 	struct clk_hw *div_hw;
382 	struct clk_hw *gate_hw;
383 
384 	mux_ops = div_ops = gate_ops = NULL;
385 	mux_hw = div_hw = gate_hw = NULL;
386 
387 	if (gcfg->mux && gcfg->mux) {
388 		mux = _get_cmux(base + cfg->mux->offset,
389 				cfg->mux->shift,
390 				cfg->mux->width,
391 				gcfg->mux->flags, lock);
392 
393 		if (!IS_ERR(mux)) {
394 			mux_hw = &mux->hw;
395 			mux_ops = gcfg->mux->ops ?
396 				  gcfg->mux->ops : &clk_mux_ops;
397 		}
398 	}
399 
400 	if (gcfg->div && cfg->div) {
401 		div = _get_cdiv(base + cfg->div->offset,
402 				cfg->div->shift,
403 				cfg->div->width,
404 				gcfg->div->flags, lock);
405 
406 		if (!IS_ERR(div)) {
407 			div_hw = &div->hw;
408 			div_ops = gcfg->div->ops ?
409 				  gcfg->div->ops : &clk_divider_ops;
410 		}
411 	}
412 
413 	if (gcfg->gate && gcfg->gate) {
414 		gate = _get_cgate(base + cfg->gate->offset,
415 				cfg->gate->bit_idx,
416 				gcfg->gate->flags, lock);
417 
418 		if (!IS_ERR(gate)) {
419 			gate_hw = &gate->hw;
420 			gate_ops = gcfg->gate->ops ?
421 				   gcfg->gate->ops : &clk_gate_ops;
422 		}
423 	}
424 
425 	composite->mux_hw = mux_hw;
426 	composite->mux_ops = mux_ops;
427 
428 	composite->div_hw = div_hw;
429 	composite->div_ops = div_ops;
430 
431 	composite->gate_hw = gate_hw;
432 	composite->gate_ops = gate_ops;
433 }
434 
435 /* Kernel Timer */
436 struct timer_ker {
437 	u8 dppre_shift;
438 	struct clk_hw hw;
439 	spinlock_t *lock;
440 };
441 
442 #define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
443 
444 static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
445 		unsigned long parent_rate)
446 {
447 	struct timer_ker *clk_elem = to_timer_ker(hw);
448 	u32 timpre;
449 	u32 dppre_shift = clk_elem->dppre_shift;
450 	u32 prescaler;
451 	u32 mul;
452 
453 	timpre = (readl(base + RCC_CFGR) >> 15) & 0x01;
454 
455 	prescaler = (readl(base + RCC_D2CFGR) >> dppre_shift) & 0x03;
456 
457 	mul = 2;
458 
459 	if (prescaler < 4)
460 		mul = 1;
461 
462 	else if (timpre && prescaler > 4)
463 		mul = 4;
464 
465 	return parent_rate * mul;
466 }
467 
468 static const struct clk_ops timer_ker_ops = {
469 	.recalc_rate = timer_ker_recalc_rate,
470 };
471 
472 static struct clk_hw *clk_register_stm32_timer_ker(struct device *dev,
473 		const char *name, const char *parent_name,
474 		unsigned long flags,
475 		u8 dppre_shift,
476 		spinlock_t *lock)
477 {
478 	struct timer_ker *element;
479 	struct clk_init_data init;
480 	struct clk_hw *hw;
481 	int err;
482 
483 	element = kzalloc(sizeof(*element), GFP_KERNEL);
484 	if (!element)
485 		return ERR_PTR(-ENOMEM);
486 
487 	init.name = name;
488 	init.ops = &timer_ker_ops;
489 	init.flags = flags;
490 	init.parent_names = &parent_name;
491 	init.num_parents = 1;
492 
493 	element->hw.init = &init;
494 	element->lock = lock;
495 	element->dppre_shift = dppre_shift;
496 
497 	hw = &element->hw;
498 	err = clk_hw_register(dev, hw);
499 
500 	if (err) {
501 		kfree(element);
502 		return ERR_PTR(err);
503 	}
504 
505 	return hw;
506 }
507 
508 static const struct clk_div_table d1cpre_div_table[] = {
509 	{ 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
510 	{ 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
511 	{ 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
512 	{ 12, 64 }, { 13, 128 }, { 14, 256 },
513 	{ 15, 512 },
514 	{ 0 },
515 };
516 
517 static const struct clk_div_table ppre_div_table[] = {
518 	{ 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
519 	{ 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
520 	{ 0 },
521 };
522 
523 static void register_core_and_bus_clocks(void)
524 {
525 	/* CORE AND BUS */
526 	hws[SYS_D1CPRE] = clk_hw_register_divider_table(NULL, "d1cpre",
527 			"sys_ck", CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 8, 4, 0,
528 			d1cpre_div_table, &stm32rcc_lock);
529 
530 	hws[HCLK] = clk_hw_register_divider_table(NULL, "hclk", "d1cpre",
531 			CLK_IGNORE_UNUSED, base + RCC_D1CFGR, 0, 4, 0,
532 			d1cpre_div_table, &stm32rcc_lock);
533 
534 	/* D1 DOMAIN */
535 	/* * CPU Systick */
536 	hws[CPU_SYSTICK] = clk_hw_register_fixed_factor(NULL, "systick",
537 			"d1cpre", 0, 1, 8);
538 
539 	/* * APB3 peripheral */
540 	hws[PCLK3] = clk_hw_register_divider_table(NULL, "pclk3", "hclk", 0,
541 			base + RCC_D1CFGR, 4, 3, 0,
542 			ppre_div_table, &stm32rcc_lock);
543 
544 	/* D2 DOMAIN */
545 	/* * APB1 peripheral */
546 	hws[PCLK1] = clk_hw_register_divider_table(NULL, "pclk1", "hclk", 0,
547 			base + RCC_D2CFGR, 4, 3, 0,
548 			ppre_div_table, &stm32rcc_lock);
549 
550 	/* Timers prescaler clocks */
551 	clk_register_stm32_timer_ker(NULL, "tim1_ker", "pclk1", 0,
552 			4, &stm32rcc_lock);
553 
554 	/* * APB2 peripheral */
555 	hws[PCLK2] = clk_hw_register_divider_table(NULL, "pclk2", "hclk", 0,
556 			base + RCC_D2CFGR, 8, 3, 0, ppre_div_table,
557 			&stm32rcc_lock);
558 
559 	clk_register_stm32_timer_ker(NULL, "tim2_ker", "pclk2", 0, 8,
560 			&stm32rcc_lock);
561 
562 	/* D3 DOMAIN */
563 	/* * APB4 peripheral */
564 	hws[PCLK4] = clk_hw_register_divider_table(NULL, "pclk4", "hclk", 0,
565 			base + RCC_D3CFGR, 4, 3, 0,
566 			ppre_div_table, &stm32rcc_lock);
567 }
568 
569 /* MUX clock configuration */
570 struct stm32_mux_clk {
571 	const char *name;
572 	const char * const *parents;
573 	u8 num_parents;
574 	u32 offset;
575 	u8 shift;
576 	u8 width;
577 	u32 flags;
578 };
579 
580 #define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
581 {\
582 	.name		= _name,\
583 	.parents	= _parents,\
584 	.num_parents	= ARRAY_SIZE(_parents),\
585 	.offset		= _mux_offset,\
586 	.shift		= _mux_shift,\
587 	.width		= _mux_width,\
588 	.flags		= _flags,\
589 }
590 
591 #define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
592 	M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
593 
594 static const struct stm32_mux_clk stm32_mclk[] __initconst = {
595 	M_MCLOC("per_ck",	per_src,	RCC_D1CCIPR,	28, 3),
596 	M_MCLOC("pllsrc",	pll_src,	RCC_PLLCKSELR,	 0, 3),
597 	M_MCLOC("sys_ck",	sys_src,	RCC_CFGR,	 0, 3),
598 	M_MCLOC("tracein_ck",	tracein_src,	RCC_CFGR,	 0, 3),
599 };
600 
601 /* Oscillary clock configuration */
602 struct stm32_osc_clk {
603 	const char *name;
604 	const char *parent;
605 	u32 gate_offset;
606 	u8 bit_idx;
607 	u8 bit_rdy;
608 	u32 flags;
609 };
610 
611 #define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
612 {\
613 	.name		= _name,\
614 	.parent		= _parent,\
615 	.gate_offset	= _gate_offset,\
616 	.bit_idx	= _bit_idx,\
617 	.bit_rdy	= _bit_rdy,\
618 	.flags		= _flags,\
619 }
620 
621 #define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
622 	OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
623 
624 static const struct stm32_osc_clk stm32_oclk[] __initconst = {
625 	OSC_CLKF("hsi_ck",  "hsidiv",   RCC_CR,   0,  2, CLK_IGNORE_UNUSED),
626 	OSC_CLKF("hsi_ker", "hsidiv",   RCC_CR,   1,  2, CLK_IGNORE_UNUSED),
627 	OSC_CLKF("csi_ck",  "clk-csi",  RCC_CR,   7,  8, CLK_IGNORE_UNUSED),
628 	OSC_CLKF("csi_ker", "clk-csi",  RCC_CR,   9,  8, CLK_IGNORE_UNUSED),
629 	OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR,  12, 13, CLK_IGNORE_UNUSED),
630 	OSC_CLKF("lsi_ck",  "clk-lsi",  RCC_CSR,  0,  1, CLK_IGNORE_UNUSED),
631 };
632 
633 /* PLL configuration */
634 struct st32h7_pll_cfg {
635 	u8 bit_idx;
636 	u32 offset_divr;
637 	u8 bit_frac_en;
638 	u32 offset_frac;
639 	u8 divm;
640 };
641 
642 struct stm32_pll_data {
643 	const char *name;
644 	const char *parent_name;
645 	unsigned long flags;
646 	const struct st32h7_pll_cfg *cfg;
647 };
648 
649 static const struct st32h7_pll_cfg stm32h7_pll1 = {
650 	.bit_idx = 24,
651 	.offset_divr = RCC_PLL1DIVR,
652 	.bit_frac_en = 0,
653 	.offset_frac = RCC_PLL1FRACR,
654 	.divm = 4,
655 };
656 
657 static const struct st32h7_pll_cfg stm32h7_pll2 = {
658 	.bit_idx = 26,
659 	.offset_divr = RCC_PLL2DIVR,
660 	.bit_frac_en = 4,
661 	.offset_frac = RCC_PLL2FRACR,
662 	.divm = 12,
663 };
664 
665 static const struct st32h7_pll_cfg stm32h7_pll3 = {
666 	.bit_idx = 28,
667 	.offset_divr = RCC_PLL3DIVR,
668 	.bit_frac_en = 8,
669 	.offset_frac = RCC_PLL3FRACR,
670 	.divm = 20,
671 };
672 
673 static const struct stm32_pll_data stm32_pll[] = {
674 	{ "vco1", "pllsrc", CLK_IGNORE_UNUSED, &stm32h7_pll1 },
675 	{ "vco2", "pllsrc", 0, &stm32h7_pll2 },
676 	{ "vco3", "pllsrc", 0, &stm32h7_pll3 },
677 };
678 
679 struct stm32_fractional_divider {
680 	void __iomem	*mreg;
681 	u8		mshift;
682 	u8		mwidth;
683 	u32		mmask;
684 
685 	void __iomem	*nreg;
686 	u8		nshift;
687 	u8		nwidth;
688 
689 	void __iomem	*freg_status;
690 	u8		freg_bit;
691 	void __iomem	*freg_value;
692 	u8		fshift;
693 	u8		fwidth;
694 
695 	u8		flags;
696 	struct clk_hw	hw;
697 	spinlock_t	*lock;
698 };
699 
700 struct stm32_pll_obj {
701 	spinlock_t *lock;
702 	struct stm32_fractional_divider div;
703 	struct stm32_ready_gate rgate;
704 	struct clk_hw hw;
705 };
706 
707 #define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
708 
709 static int pll_is_enabled(struct clk_hw *hw)
710 {
711 	struct stm32_pll_obj *clk_elem = to_pll(hw);
712 	struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
713 
714 	__clk_hw_set_clk(_hw, hw);
715 
716 	return ready_gate_clk_ops.is_enabled(_hw);
717 }
718 
719 static int pll_enable(struct clk_hw *hw)
720 {
721 	struct stm32_pll_obj *clk_elem = to_pll(hw);
722 	struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
723 
724 	__clk_hw_set_clk(_hw, hw);
725 
726 	return ready_gate_clk_ops.enable(_hw);
727 }
728 
729 static void pll_disable(struct clk_hw *hw)
730 {
731 	struct stm32_pll_obj *clk_elem = to_pll(hw);
732 	struct clk_hw *_hw = &clk_elem->rgate.gate.hw;
733 
734 	__clk_hw_set_clk(_hw, hw);
735 
736 	ready_gate_clk_ops.disable(_hw);
737 }
738 
739 static int pll_frac_is_enabled(struct clk_hw *hw)
740 {
741 	struct stm32_pll_obj *clk_elem = to_pll(hw);
742 	struct stm32_fractional_divider *fd = &clk_elem->div;
743 
744 	return (readl(fd->freg_status) >> fd->freg_bit) & 0x01;
745 }
746 
747 static unsigned long pll_read_frac(struct clk_hw *hw)
748 {
749 	struct stm32_pll_obj *clk_elem = to_pll(hw);
750 	struct stm32_fractional_divider *fd = &clk_elem->div;
751 
752 	return (readl(fd->freg_value) >> fd->fshift) &
753 		GENMASK(fd->fwidth - 1, 0);
754 }
755 
756 static unsigned long pll_fd_recalc_rate(struct clk_hw *hw,
757 		unsigned long parent_rate)
758 {
759 	struct stm32_pll_obj *clk_elem = to_pll(hw);
760 	struct stm32_fractional_divider *fd = &clk_elem->div;
761 	unsigned long m, n;
762 	u32 val, mask;
763 	u64 rate, rate1 = 0;
764 
765 	val = readl(fd->mreg);
766 	mask = GENMASK(fd->mwidth - 1, 0) << fd->mshift;
767 	m = (val & mask) >> fd->mshift;
768 
769 	val = readl(fd->nreg);
770 	mask = GENMASK(fd->nwidth - 1, 0) << fd->nshift;
771 	n = ((val & mask) >> fd->nshift) + 1;
772 
773 	if (!n || !m)
774 		return parent_rate;
775 
776 	rate = (u64)parent_rate * n;
777 	do_div(rate, m);
778 
779 	if (pll_frac_is_enabled(hw)) {
780 		val = pll_read_frac(hw);
781 		rate1 = (u64)parent_rate * (u64)val;
782 		do_div(rate1, (m * 8191));
783 	}
784 
785 	return rate + rate1;
786 }
787 
788 static const struct clk_ops pll_ops = {
789 	.enable		= pll_enable,
790 	.disable	= pll_disable,
791 	.is_enabled	= pll_is_enabled,
792 	.recalc_rate	= pll_fd_recalc_rate,
793 };
794 
795 static struct clk_hw *clk_register_stm32_pll(struct device *dev,
796 		const char *name,
797 		const char *parent,
798 		unsigned long flags,
799 		const struct st32h7_pll_cfg *cfg,
800 		spinlock_t *lock)
801 {
802 	struct stm32_pll_obj *pll;
803 	struct clk_init_data init = { NULL };
804 	struct clk_hw *hw;
805 	int ret;
806 	struct stm32_fractional_divider *div = NULL;
807 	struct stm32_ready_gate *rgate;
808 
809 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
810 	if (!pll)
811 		return ERR_PTR(-ENOMEM);
812 
813 	init.name = name;
814 	init.ops = &pll_ops;
815 	init.flags = flags;
816 	init.parent_names = &parent;
817 	init.num_parents = 1;
818 	pll->hw.init = &init;
819 
820 	hw = &pll->hw;
821 	rgate = &pll->rgate;
822 
823 	rgate->bit_rdy = cfg->bit_idx + 1;
824 	rgate->gate.lock = lock;
825 	rgate->gate.reg = base + RCC_CR;
826 	rgate->gate.bit_idx = cfg->bit_idx;
827 
828 	div = &pll->div;
829 	div->flags = 0;
830 	div->mreg = base + RCC_PLLCKSELR;
831 	div->mshift = cfg->divm;
832 	div->mwidth = 6;
833 	div->nreg = base +  cfg->offset_divr;
834 	div->nshift = 0;
835 	div->nwidth = 9;
836 
837 	div->freg_status = base + RCC_PLLCFGR;
838 	div->freg_bit = cfg->bit_frac_en;
839 	div->freg_value = base +  cfg->offset_frac;
840 	div->fshift = 3;
841 	div->fwidth = 13;
842 
843 	div->lock = lock;
844 
845 	ret = clk_hw_register(dev, hw);
846 	if (ret) {
847 		kfree(pll);
848 		hw = ERR_PTR(ret);
849 	}
850 
851 	return hw;
852 }
853 
854 /* ODF CLOCKS */
855 static unsigned long odf_divider_recalc_rate(struct clk_hw *hw,
856 		unsigned long parent_rate)
857 {
858 	return clk_divider_ops.recalc_rate(hw, parent_rate);
859 }
860 
861 static long odf_divider_round_rate(struct clk_hw *hw, unsigned long rate,
862 		unsigned long *prate)
863 {
864 	return clk_divider_ops.round_rate(hw, rate, prate);
865 }
866 
867 static int odf_divider_set_rate(struct clk_hw *hw, unsigned long rate,
868 		unsigned long parent_rate)
869 {
870 	struct clk_hw *hwp;
871 	int pll_status;
872 	int ret;
873 
874 	hwp = clk_hw_get_parent(hw);
875 
876 	pll_status = pll_is_enabled(hwp);
877 
878 	if (pll_status)
879 		pll_disable(hwp);
880 
881 	ret = clk_divider_ops.set_rate(hw, rate, parent_rate);
882 
883 	if (pll_status)
884 		pll_enable(hwp);
885 
886 	return ret;
887 }
888 
889 static const struct clk_ops odf_divider_ops = {
890 	.recalc_rate	= odf_divider_recalc_rate,
891 	.round_rate	= odf_divider_round_rate,
892 	.set_rate	= odf_divider_set_rate,
893 };
894 
895 static int odf_gate_enable(struct clk_hw *hw)
896 {
897 	struct clk_hw *hwp;
898 	int pll_status;
899 	int ret;
900 
901 	if (clk_gate_ops.is_enabled(hw))
902 		return 0;
903 
904 	hwp = clk_hw_get_parent(hw);
905 
906 	pll_status = pll_is_enabled(hwp);
907 
908 	if (pll_status)
909 		pll_disable(hwp);
910 
911 	ret = clk_gate_ops.enable(hw);
912 
913 	if (pll_status)
914 		pll_enable(hwp);
915 
916 	return ret;
917 }
918 
919 static void odf_gate_disable(struct clk_hw *hw)
920 {
921 	struct clk_hw *hwp;
922 	int pll_status;
923 
924 	if (!clk_gate_ops.is_enabled(hw))
925 		return;
926 
927 	hwp = clk_hw_get_parent(hw);
928 
929 	pll_status = pll_is_enabled(hwp);
930 
931 	if (pll_status)
932 		pll_disable(hwp);
933 
934 	clk_gate_ops.disable(hw);
935 
936 	if (pll_status)
937 		pll_enable(hwp);
938 }
939 
940 static const struct clk_ops odf_gate_ops = {
941 	.enable		= odf_gate_enable,
942 	.disable	= odf_gate_disable,
943 	.is_enabled	= clk_gate_is_enabled,
944 };
945 
946 static struct composite_clk_gcfg odf_clk_gcfg = {
947 	M_CFG_DIV(&odf_divider_ops, 0),
948 	M_CFG_GATE(&odf_gate_ops, 0),
949 };
950 
951 #define M_ODF_F(_name, _parent, _gate_offset,  _bit_idx, _rate_offset,\
952 		_rate_shift, _rate_width, _flags)\
953 {\
954 	.mux = NULL,\
955 	.div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
956 	.gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
957 	.name = _name,\
958 	.parent_name = &(const char *) {_parent},\
959 	.num_parents = 1,\
960 	.flags = _flags,\
961 }
962 
963 #define M_ODF(_name, _parent, _gate_offset,  _bit_idx, _rate_offset,\
964 		_rate_shift, _rate_width)\
965 M_ODF_F(_name, _parent, _gate_offset,  _bit_idx, _rate_offset,\
966 		_rate_shift, _rate_width, 0)\
967 
968 static const struct composite_clk_cfg stm32_odf[3][3] = {
969 	{
970 		M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR, 16, RCC_PLL1DIVR,  9, 7,
971 				CLK_IGNORE_UNUSED),
972 		M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR, 17, RCC_PLL1DIVR, 16, 7,
973 				CLK_IGNORE_UNUSED),
974 		M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR, 18, RCC_PLL1DIVR, 24, 7,
975 				CLK_IGNORE_UNUSED),
976 	},
977 
978 	{
979 		M_ODF("pll2_p", "vco2", RCC_PLLCFGR, 19, RCC_PLL2DIVR,  9, 7),
980 		M_ODF("pll2_q", "vco2", RCC_PLLCFGR, 20, RCC_PLL2DIVR, 16, 7),
981 		M_ODF("pll2_r", "vco2", RCC_PLLCFGR, 21, RCC_PLL2DIVR, 24, 7),
982 	},
983 	{
984 		M_ODF("pll3_p", "vco3", RCC_PLLCFGR, 22, RCC_PLL3DIVR,  9, 7),
985 		M_ODF("pll3_q", "vco3", RCC_PLLCFGR, 23, RCC_PLL3DIVR, 16, 7),
986 		M_ODF("pll3_r", "vco3", RCC_PLLCFGR, 24, RCC_PLL3DIVR, 24, 7),
987 	}
988 };
989 
990 /* PERIF CLOCKS */
991 struct pclk_t {
992 	u32 gate_offset;
993 	u8 bit_idx;
994 	const char *name;
995 	const char *parent;
996 	u32 flags;
997 };
998 
999 #define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
1000 {\
1001 	.gate_offset	= _gate_offset,\
1002 	.bit_idx	= _bit_idx,\
1003 	.name		= _name,\
1004 	.parent		= _parent,\
1005 	.flags		= _flags,\
1006 }
1007 
1008 #define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
1009 	PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
1010 
1011 static const struct pclk_t pclk[] = {
1012 	PER_CLK(RCC_AHB3ENR, 31, "d1sram1", "hclk"),
1013 	PER_CLK(RCC_AHB3ENR, 30, "itcm", "hclk"),
1014 	PER_CLK(RCC_AHB3ENR, 29, "dtcm2", "hclk"),
1015 	PER_CLK(RCC_AHB3ENR, 28, "dtcm1", "hclk"),
1016 	PER_CLK(RCC_AHB3ENR, 8, "flitf", "hclk"),
1017 	PER_CLK(RCC_AHB3ENR, 5, "jpgdec", "hclk"),
1018 	PER_CLK(RCC_AHB3ENR, 4, "dma2d", "hclk"),
1019 	PER_CLK(RCC_AHB3ENR, 0, "mdma", "hclk"),
1020 	PER_CLK(RCC_AHB1ENR, 28, "usb2ulpi", "hclk"),
1021 	PER_CLK(RCC_AHB1ENR, 26, "usb1ulpi", "hclk"),
1022 	PER_CLK(RCC_AHB1ENR, 17, "eth1rx", "hclk"),
1023 	PER_CLK(RCC_AHB1ENR, 16, "eth1tx", "hclk"),
1024 	PER_CLK(RCC_AHB1ENR, 15, "eth1mac", "hclk"),
1025 	PER_CLK(RCC_AHB1ENR, 14, "art", "hclk"),
1026 	PER_CLK(RCC_AHB1ENR, 1, "dma2", "hclk"),
1027 	PER_CLK(RCC_AHB1ENR, 0, "dma1", "hclk"),
1028 	PER_CLK(RCC_AHB2ENR, 31, "d2sram3", "hclk"),
1029 	PER_CLK(RCC_AHB2ENR, 30, "d2sram2", "hclk"),
1030 	PER_CLK(RCC_AHB2ENR, 29, "d2sram1", "hclk"),
1031 	PER_CLK(RCC_AHB2ENR, 5, "hash", "hclk"),
1032 	PER_CLK(RCC_AHB2ENR, 4, "crypt", "hclk"),
1033 	PER_CLK(RCC_AHB2ENR, 0, "camitf", "hclk"),
1034 	PER_CLK(RCC_AHB4ENR, 28, "bkpram", "hclk"),
1035 	PER_CLK(RCC_AHB4ENR, 25, "hsem", "hclk"),
1036 	PER_CLK(RCC_AHB4ENR, 21, "bdma", "hclk"),
1037 	PER_CLK(RCC_AHB4ENR, 19, "crc", "hclk"),
1038 	PER_CLK(RCC_AHB4ENR, 10, "gpiok", "hclk"),
1039 	PER_CLK(RCC_AHB4ENR, 9, "gpioj", "hclk"),
1040 	PER_CLK(RCC_AHB4ENR, 8, "gpioi", "hclk"),
1041 	PER_CLK(RCC_AHB4ENR, 7, "gpioh", "hclk"),
1042 	PER_CLK(RCC_AHB4ENR, 6, "gpiog", "hclk"),
1043 	PER_CLK(RCC_AHB4ENR, 5, "gpiof", "hclk"),
1044 	PER_CLK(RCC_AHB4ENR, 4, "gpioe", "hclk"),
1045 	PER_CLK(RCC_AHB4ENR, 3, "gpiod", "hclk"),
1046 	PER_CLK(RCC_AHB4ENR, 2, "gpioc", "hclk"),
1047 	PER_CLK(RCC_AHB4ENR, 1, "gpiob", "hclk"),
1048 	PER_CLK(RCC_AHB4ENR, 0, "gpioa", "hclk"),
1049 	PER_CLK(RCC_APB3ENR, 6, "wwdg1", "pclk3"),
1050 	PER_CLK(RCC_APB1LENR, 29, "dac12", "pclk1"),
1051 	PER_CLK(RCC_APB1LENR, 11, "wwdg2", "pclk1"),
1052 	PER_CLK(RCC_APB1LENR, 8, "tim14", "tim1_ker"),
1053 	PER_CLK(RCC_APB1LENR, 7, "tim13", "tim1_ker"),
1054 	PER_CLK(RCC_APB1LENR, 6, "tim12", "tim1_ker"),
1055 	PER_CLK(RCC_APB1LENR, 5, "tim7", "tim1_ker"),
1056 	PER_CLK(RCC_APB1LENR, 4, "tim6", "tim1_ker"),
1057 	PER_CLK(RCC_APB1LENR, 3, "tim5", "tim1_ker"),
1058 	PER_CLK(RCC_APB1LENR, 2, "tim4", "tim1_ker"),
1059 	PER_CLK(RCC_APB1LENR, 1, "tim3", "tim1_ker"),
1060 	PER_CLK(RCC_APB1LENR, 0, "tim2", "tim1_ker"),
1061 	PER_CLK(RCC_APB1HENR, 5, "mdios", "pclk1"),
1062 	PER_CLK(RCC_APB1HENR, 4, "opamp", "pclk1"),
1063 	PER_CLK(RCC_APB1HENR, 1, "crs", "pclk1"),
1064 	PER_CLK(RCC_APB2ENR, 18, "tim17", "tim2_ker"),
1065 	PER_CLK(RCC_APB2ENR, 17, "tim16", "tim2_ker"),
1066 	PER_CLK(RCC_APB2ENR, 16, "tim15", "tim2_ker"),
1067 	PER_CLK(RCC_APB2ENR, 1, "tim8", "tim2_ker"),
1068 	PER_CLK(RCC_APB2ENR, 0, "tim1", "tim2_ker"),
1069 	PER_CLK(RCC_APB4ENR, 26, "tmpsens", "pclk4"),
1070 	PER_CLK(RCC_APB4ENR, 16, "rtcapb", "pclk4"),
1071 	PER_CLK(RCC_APB4ENR, 15, "vref", "pclk4"),
1072 	PER_CLK(RCC_APB4ENR, 14, "comp12", "pclk4"),
1073 	PER_CLK(RCC_APB4ENR, 1, "syscfg", "pclk4"),
1074 };
1075 
1076 /* KERNEL CLOCKS */
1077 #define KER_CLKF(_gate_offset, _bit_idx,\
1078 		_mux_offset, _mux_shift, _mux_width,\
1079 		_name, _parent_name,\
1080 		_flags) \
1081 { \
1082 	.gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
1083 	.mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
1084 	.name = _name, \
1085 	.parent_name = _parent_name, \
1086 	.num_parents = ARRAY_SIZE(_parent_name),\
1087 	.flags = _flags,\
1088 }
1089 
1090 #define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
1091 		_name, _parent_name) \
1092 KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
1093 		_name, _parent_name, 0)\
1094 
1095 #define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
1096 		_name, _parent_name,\
1097 		_flags) \
1098 { \
1099 	.gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
1100 	.mux = NULL,\
1101 	.name = _name, \
1102 	.parent_name = _parent_name, \
1103 	.num_parents = 1,\
1104 	.flags = _flags,\
1105 }
1106 
1107 static const struct composite_clk_cfg kclk[] = {
1108 	KER_CLK(RCC_AHB3ENR,  16, RCC_D1CCIPR,	16, 1, "sdmmc1", sdmmc_src),
1109 	KER_CLKF(RCC_AHB3ENR, 14, RCC_D1CCIPR,	 4, 2, "quadspi", qspi_src,
1110 			CLK_IGNORE_UNUSED),
1111 	KER_CLKF(RCC_AHB3ENR, 12, RCC_D1CCIPR,	 0, 2, "fmc", fmc_src,
1112 			CLK_IGNORE_UNUSED),
1113 	KER_CLK(RCC_AHB1ENR,  27, RCC_D2CCIP2R,	20, 2, "usb2otg", usbotg_src),
1114 	KER_CLK(RCC_AHB1ENR,  25, RCC_D2CCIP2R, 20, 2, "usb1otg", usbotg_src),
1115 	KER_CLK(RCC_AHB1ENR,   5, RCC_D3CCIPR,	16, 2, "adc12", adc_src),
1116 	KER_CLK(RCC_AHB2ENR,   9, RCC_D1CCIPR,	16, 1, "sdmmc2", sdmmc_src),
1117 	KER_CLK(RCC_AHB2ENR,   6, RCC_D2CCIP2R,	 8, 2, "rng", rng_src),
1118 	KER_CLK(RCC_AHB4ENR,  24, RCC_D3CCIPR,  16, 2, "adc3", adc_src),
1119 	KER_CLKF(RCC_APB3ENR,   4, RCC_D1CCIPR,	 8, 1, "dsi", dsi_src,
1120 			CLK_SET_RATE_PARENT),
1121 	KER_CLKF_NOMUX(RCC_APB3ENR, 3, "ltdc", ltdc_src, CLK_SET_RATE_PARENT),
1122 	KER_CLK(RCC_APB1LENR, 31, RCC_D2CCIP2R,  0, 3, "usart8", usart_src2),
1123 	KER_CLK(RCC_APB1LENR, 30, RCC_D2CCIP2R,  0, 3, "usart7", usart_src2),
1124 	KER_CLK(RCC_APB1LENR, 27, RCC_D2CCIP2R, 22, 2, "hdmicec", cec_src),
1125 	KER_CLK(RCC_APB1LENR, 23, RCC_D2CCIP2R, 12, 2, "i2c3", i2c_src1),
1126 	KER_CLK(RCC_APB1LENR, 22, RCC_D2CCIP2R, 12, 2, "i2c2", i2c_src1),
1127 	KER_CLK(RCC_APB1LENR, 21, RCC_D2CCIP2R, 12, 2, "i2c1", i2c_src1),
1128 	KER_CLK(RCC_APB1LENR, 20, RCC_D2CCIP2R,	 0, 3, "uart5", usart_src2),
1129 	KER_CLK(RCC_APB1LENR, 19, RCC_D2CCIP2R,  0, 3, "uart4", usart_src2),
1130 	KER_CLK(RCC_APB1LENR, 18, RCC_D2CCIP2R,  0, 3, "usart3", usart_src2),
1131 	KER_CLK(RCC_APB1LENR, 17, RCC_D2CCIP2R,  0, 3, "usart2", usart_src2),
1132 	KER_CLK(RCC_APB1LENR, 16, RCC_D2CCIP1R, 20, 2, "spdifrx", spdifrx_src),
1133 	KER_CLK(RCC_APB1LENR, 15, RCC_D2CCIP1R, 16, 3, "spi3", spi_src1),
1134 	KER_CLK(RCC_APB1LENR, 14, RCC_D2CCIP1R, 16, 3, "spi2", spi_src1),
1135 	KER_CLK(RCC_APB1LENR,  9, RCC_D2CCIP2R, 28, 3, "lptim1", lptim_src1),
1136 	KER_CLK(RCC_APB1HENR,  8, RCC_D2CCIP1R, 28, 2, "fdcan", fdcan_src),
1137 	KER_CLK(RCC_APB1HENR,  2, RCC_D2CCIP1R, 31, 1, "swp", swp_src),
1138 	KER_CLK(RCC_APB2ENR,  29, RCC_CFGR,	14, 1, "hrtim", hrtim_src),
1139 	KER_CLK(RCC_APB2ENR,  28, RCC_D2CCIP1R, 24, 1, "dfsdm1", dfsdm1_src),
1140 	KER_CLKF(RCC_APB2ENR,  24, RCC_D2CCIP1R,  6, 3, "sai3", sai_src,
1141 		 CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
1142 	KER_CLKF(RCC_APB2ENR,  23, RCC_D2CCIP1R,  6, 3, "sai2", sai_src,
1143 		 CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
1144 	KER_CLKF(RCC_APB2ENR,  22, RCC_D2CCIP1R,  0, 3, "sai1", sai_src,
1145 		 CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT),
1146 	KER_CLK(RCC_APB2ENR,  20, RCC_D2CCIP1R, 16, 3, "spi5", spi_src2),
1147 	KER_CLK(RCC_APB2ENR,  13, RCC_D2CCIP1R, 16, 3, "spi4", spi_src2),
1148 	KER_CLK(RCC_APB2ENR,  12, RCC_D2CCIP1R, 16, 3, "spi1", spi_src1),
1149 	KER_CLK(RCC_APB2ENR,   5, RCC_D2CCIP2R,  3, 3, "usart6", usart_src1),
1150 	KER_CLK(RCC_APB2ENR,   4, RCC_D2CCIP2R,  3, 3, "usart1", usart_src1),
1151 	KER_CLK(RCC_APB4ENR,  21, RCC_D3CCIPR,	24, 3, "sai4b", sai_src),
1152 	KER_CLK(RCC_APB4ENR,  21, RCC_D3CCIPR,	21, 3, "sai4a", sai_src),
1153 	KER_CLK(RCC_APB4ENR,  12, RCC_D3CCIPR,	13, 3, "lptim5", lptim_src2),
1154 	KER_CLK(RCC_APB4ENR,  11, RCC_D3CCIPR,	13, 3, "lptim4", lptim_src2),
1155 	KER_CLK(RCC_APB4ENR,  10, RCC_D3CCIPR,	13, 3, "lptim3", lptim_src2),
1156 	KER_CLK(RCC_APB4ENR,   9, RCC_D3CCIPR,	10, 3, "lptim2", lptim_src2),
1157 	KER_CLK(RCC_APB4ENR,   7, RCC_D3CCIPR,	 8, 2, "i2c4", i2c_src2),
1158 	KER_CLK(RCC_APB4ENR,   5, RCC_D3CCIPR,	28, 3, "spi6", spi_src3),
1159 	KER_CLK(RCC_APB4ENR,   3, RCC_D3CCIPR,	 0, 3, "lpuart1", lpuart1_src),
1160 };
1161 
1162 static struct composite_clk_gcfg kernel_clk_cfg = {
1163 	M_CFG_MUX(NULL, 0),
1164 	M_CFG_GATE(NULL, 0),
1165 };
1166 
1167 /* RTC clock */
1168 /*
1169  * RTC & LSE registers are protected against parasitic write access.
1170  * PWR_CR_DBP bit must be set to enable write access to RTC registers.
1171  */
1172 /* STM32_PWR_CR */
1173 #define PWR_CR				0x00
1174 /* STM32_PWR_CR bit field */
1175 #define PWR_CR_DBP			BIT(8)
1176 
1177 static struct composite_clk_gcfg rtc_clk_cfg = {
1178 	M_CFG_MUX(NULL, 0),
1179 	M_CFG_GATE(NULL, 0),
1180 };
1181 
1182 static const struct composite_clk_cfg rtc_clk =
1183 	KER_CLK(RCC_BDCR, 15, RCC_BDCR, 8, 2, "rtc_ck", rtc_src);
1184 
1185 /* Micro-controller output clock */
1186 static struct composite_clk_gcfg mco_clk_cfg = {
1187 	M_CFG_MUX(NULL, 0),
1188 	M_CFG_DIV(NULL,	CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO),
1189 };
1190 
1191 #define M_MCO_F(_name, _parents, _mux_offset,  _mux_shift, _mux_width,\
1192 		_rate_offset, _rate_shift, _rate_width,\
1193 		_flags)\
1194 {\
1195 	.mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
1196 	.div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
1197 	.gate = NULL,\
1198 	.name = _name,\
1199 	.parent_name = _parents,\
1200 	.num_parents = ARRAY_SIZE(_parents),\
1201 	.flags = _flags,\
1202 }
1203 
1204 static const struct composite_clk_cfg mco_clk[] = {
1205 	M_MCO_F("mco1", mco_src1, RCC_CFGR, 22, 4, RCC_CFGR, 18, 4, 0),
1206 	M_MCO_F("mco2", mco_src2, RCC_CFGR, 29, 3, RCC_CFGR, 25, 4, 0),
1207 };
1208 
1209 static void __init stm32h7_rcc_init(struct device_node *np)
1210 {
1211 	struct clk_hw_onecell_data *clk_data;
1212 	struct composite_cfg c_cfg;
1213 	int n;
1214 	const char *hse_clk, *lse_clk, *i2s_clk;
1215 	struct regmap *pdrm;
1216 
1217 	clk_data = kzalloc(sizeof(*clk_data) +
1218 			sizeof(*clk_data->hws) * STM32H7_MAX_CLKS,
1219 			GFP_KERNEL);
1220 	if (!clk_data)
1221 		return;
1222 
1223 	clk_data->num = STM32H7_MAX_CLKS;
1224 
1225 	hws = clk_data->hws;
1226 
1227 	for (n = 0; n < STM32H7_MAX_CLKS; n++)
1228 		hws[n] = ERR_PTR(-ENOENT);
1229 
1230 	/* get RCC base @ from DT */
1231 	base = of_iomap(np, 0);
1232 	if (!base) {
1233 		pr_err("%s: unable to map resource", np->name);
1234 		goto err_free_clks;
1235 	}
1236 
1237 	pdrm = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
1238 	if (IS_ERR(pdrm))
1239 		pr_warn("%s: Unable to get syscfg\n", __func__);
1240 	else
1241 		/* In any case disable backup domain write protection
1242 		 * and will never be enabled.
1243 		 * Needed by LSE & RTC clocks.
1244 		 */
1245 		regmap_update_bits(pdrm, PWR_CR, PWR_CR_DBP, PWR_CR_DBP);
1246 
1247 	/* Put parent names from DT */
1248 	hse_clk = of_clk_get_parent_name(np, 0);
1249 	lse_clk = of_clk_get_parent_name(np, 1);
1250 	i2s_clk = of_clk_get_parent_name(np, 2);
1251 
1252 	sai_src[3] = i2s_clk;
1253 	spi_src1[3] = i2s_clk;
1254 
1255 	/* Register Internal oscillators */
1256 	clk_hw_register_fixed_rate(NULL, "clk-hsi", NULL, 0, 64000000);
1257 	clk_hw_register_fixed_rate(NULL, "clk-csi", NULL, 0, 4000000);
1258 	clk_hw_register_fixed_rate(NULL, "clk-lsi", NULL, 0, 32000);
1259 	clk_hw_register_fixed_rate(NULL, "clk-rc48", NULL, 0, 48000);
1260 
1261 	/* This clock is coming from outside. Frequencies unknown */
1262 	hws[CK_DSI_PHY] = clk_hw_register_fixed_rate(NULL, "ck_dsi_phy", NULL,
1263 			0, 0);
1264 
1265 	hws[HSI_DIV] = clk_hw_register_divider(NULL, "hsidiv", "clk-hsi", 0,
1266 			base + RCC_CR, 3, 2, CLK_DIVIDER_POWER_OF_TWO,
1267 			&stm32rcc_lock);
1268 
1269 	hws[HSE_1M] = clk_hw_register_divider(NULL, "hse_1M", "hse_ck",	0,
1270 			base + RCC_CFGR, 8, 6, CLK_DIVIDER_ONE_BASED |
1271 			CLK_DIVIDER_ALLOW_ZERO,
1272 			&stm32rcc_lock);
1273 
1274 	/* Mux system clocks */
1275 	for (n = 0; n < ARRAY_SIZE(stm32_mclk); n++)
1276 		hws[MCLK_BANK + n] = clk_hw_register_mux(NULL,
1277 				stm32_mclk[n].name,
1278 				stm32_mclk[n].parents,
1279 				stm32_mclk[n].num_parents,
1280 				stm32_mclk[n].flags,
1281 				stm32_mclk[n].offset + base,
1282 				stm32_mclk[n].shift,
1283 				stm32_mclk[n].width,
1284 				0,
1285 				&stm32rcc_lock);
1286 
1287 	register_core_and_bus_clocks();
1288 
1289 	/* Oscillary clocks */
1290 	for (n = 0; n < ARRAY_SIZE(stm32_oclk); n++)
1291 		hws[OSC_BANK + n] = clk_register_ready_gate(NULL,
1292 				stm32_oclk[n].name,
1293 				stm32_oclk[n].parent,
1294 				stm32_oclk[n].gate_offset + base,
1295 				stm32_oclk[n].bit_idx,
1296 				stm32_oclk[n].bit_rdy,
1297 				stm32_oclk[n].flags,
1298 				&stm32rcc_lock);
1299 
1300 	hws[HSE_CK] = clk_register_ready_gate(NULL,
1301 				"hse_ck",
1302 				hse_clk,
1303 				RCC_CR + base,
1304 				16, 17,
1305 				0,
1306 				&stm32rcc_lock);
1307 
1308 	hws[LSE_CK] = clk_register_ready_gate(NULL,
1309 				"lse_ck",
1310 				lse_clk,
1311 				RCC_BDCR + base,
1312 				0, 1,
1313 				0,
1314 				&stm32rcc_lock);
1315 
1316 	hws[CSI_KER_DIV122 + n] = clk_hw_register_fixed_factor(NULL,
1317 			"csi_ker_div122", "csi_ker", 0, 1, 122);
1318 
1319 	/* PLLs */
1320 	for (n = 0; n < ARRAY_SIZE(stm32_pll); n++) {
1321 		int odf;
1322 
1323 		/* Register the VCO */
1324 		clk_register_stm32_pll(NULL, stm32_pll[n].name,
1325 				stm32_pll[n].parent_name, stm32_pll[n].flags,
1326 				stm32_pll[n].cfg,
1327 				&stm32rcc_lock);
1328 
1329 		/* Register the 3 output dividers */
1330 		for (odf = 0; odf < 3; odf++) {
1331 			int idx = n * 3 + odf;
1332 
1333 			get_cfg_composite_div(&odf_clk_gcfg, &stm32_odf[n][odf],
1334 					&c_cfg,	&stm32rcc_lock);
1335 
1336 			hws[ODF_BANK + idx] = clk_hw_register_composite(NULL,
1337 					stm32_odf[n][odf].name,
1338 					stm32_odf[n][odf].parent_name,
1339 					stm32_odf[n][odf].num_parents,
1340 					c_cfg.mux_hw, c_cfg.mux_ops,
1341 					c_cfg.div_hw, c_cfg.div_ops,
1342 					c_cfg.gate_hw, c_cfg.gate_ops,
1343 					stm32_odf[n][odf].flags);
1344 		}
1345 	}
1346 
1347 	/* Peripheral clocks */
1348 	for (n = 0; n < ARRAY_SIZE(pclk); n++)
1349 		hws[PERIF_BANK + n] = clk_hw_register_gate(NULL, pclk[n].name,
1350 				pclk[n].parent,
1351 				pclk[n].flags, base + pclk[n].gate_offset,
1352 				pclk[n].bit_idx, pclk[n].flags, &stm32rcc_lock);
1353 
1354 	/* Kernel clocks */
1355 	for (n = 0; n < ARRAY_SIZE(kclk); n++) {
1356 		get_cfg_composite_div(&kernel_clk_cfg, &kclk[n], &c_cfg,
1357 				&stm32rcc_lock);
1358 
1359 		hws[KERN_BANK + n] = clk_hw_register_composite(NULL,
1360 				kclk[n].name,
1361 				kclk[n].parent_name,
1362 				kclk[n].num_parents,
1363 				c_cfg.mux_hw, c_cfg.mux_ops,
1364 				c_cfg.div_hw, c_cfg.div_ops,
1365 				c_cfg.gate_hw, c_cfg.gate_ops,
1366 				kclk[n].flags);
1367 	}
1368 
1369 	/* RTC clock (default state is off) */
1370 	clk_hw_register_fixed_rate(NULL, "off", NULL, 0, 0);
1371 
1372 	get_cfg_composite_div(&rtc_clk_cfg, &rtc_clk, &c_cfg, &stm32rcc_lock);
1373 
1374 	hws[RTC_CK] = clk_hw_register_composite(NULL,
1375 			rtc_clk.name,
1376 			rtc_clk.parent_name,
1377 			rtc_clk.num_parents,
1378 			c_cfg.mux_hw, c_cfg.mux_ops,
1379 			c_cfg.div_hw, c_cfg.div_ops,
1380 			c_cfg.gate_hw, c_cfg.gate_ops,
1381 			rtc_clk.flags);
1382 
1383 	/* Micro-controller clocks */
1384 	for (n = 0; n < ARRAY_SIZE(mco_clk); n++) {
1385 		get_cfg_composite_div(&mco_clk_cfg, &mco_clk[n], &c_cfg,
1386 				&stm32rcc_lock);
1387 
1388 		hws[MCO_BANK + n] = clk_hw_register_composite(NULL,
1389 				mco_clk[n].name,
1390 				mco_clk[n].parent_name,
1391 				mco_clk[n].num_parents,
1392 				c_cfg.mux_hw, c_cfg.mux_ops,
1393 				c_cfg.div_hw, c_cfg.div_ops,
1394 				c_cfg.gate_hw, c_cfg.gate_ops,
1395 				mco_clk[n].flags);
1396 	}
1397 
1398 	of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
1399 
1400 	return;
1401 
1402 err_free_clks:
1403 	kfree(clk_data);
1404 }
1405 
1406 /* The RCC node is a clock and reset controller, and these
1407  * functionalities are supported by different drivers that
1408  * matches the same compatible strings.
1409  */
1410 CLK_OF_DECLARE_DRIVER(stm32h7_rcc, "st,stm32h743-rcc", stm32h7_rcc_init);
1411