xref: /openbmc/linux/drivers/clk/imx/clk-sscg-pll.c (revision 61163895)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright 2018 NXP.
4  *
5  * This driver supports the SCCG plls found in the imx8m SOCs
6  *
7  * Documentation for this SCCG pll can be found at:
8  *   https://www.nxp.com/docs/en/reference-manual/IMX8MDQLQRM.pdf#page=834
9  */
10 
11 #include <linux/clk-provider.h>
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/iopoll.h>
15 #include <linux/slab.h>
16 #include <linux/bitfield.h>
17 
18 #include "clk.h"
19 
20 /* PLL CFGs */
21 #define PLL_CFG0		0x0
22 #define PLL_CFG1		0x4
23 #define PLL_CFG2		0x8
24 
25 #define PLL_DIVF1_MASK		GENMASK(18, 13)
26 #define PLL_DIVF2_MASK		GENMASK(12, 7)
27 #define PLL_DIVR1_MASK		GENMASK(27, 25)
28 #define PLL_DIVR2_MASK		GENMASK(24, 19)
29 #define PLL_DIVQ_MASK           GENMASK(6, 1)
30 #define PLL_REF_MASK		GENMASK(2, 0)
31 
32 #define PLL_LOCK_MASK		BIT(31)
33 #define PLL_PD_MASK		BIT(7)
34 
35 /* These are the specification limits for the SSCG PLL */
36 #define PLL_REF_MIN_FREQ		25000000UL
37 #define PLL_REF_MAX_FREQ		235000000UL
38 
39 #define PLL_STAGE1_MIN_FREQ		1600000000UL
40 #define PLL_STAGE1_MAX_FREQ		2400000000UL
41 
42 #define PLL_STAGE1_REF_MIN_FREQ		25000000UL
43 #define PLL_STAGE1_REF_MAX_FREQ		54000000UL
44 
45 #define PLL_STAGE2_MIN_FREQ		1200000000UL
46 #define PLL_STAGE2_MAX_FREQ		2400000000UL
47 
48 #define PLL_STAGE2_REF_MIN_FREQ		54000000UL
49 #define PLL_STAGE2_REF_MAX_FREQ		75000000UL
50 
51 #define PLL_OUT_MIN_FREQ		20000000UL
52 #define PLL_OUT_MAX_FREQ		1200000000UL
53 
54 #define PLL_DIVR1_MAX			7
55 #define PLL_DIVR2_MAX			63
56 #define PLL_DIVF1_MAX			63
57 #define PLL_DIVF2_MAX			63
58 #define PLL_DIVQ_MAX			63
59 
60 #define PLL_BYPASS_NONE			0x0
61 #define PLL_BYPASS1			0x2
62 #define PLL_BYPASS2			0x1
63 
64 #define SSCG_PLL_BYPASS1_MASK           BIT(5)
65 #define SSCG_PLL_BYPASS2_MASK           BIT(4)
66 #define SSCG_PLL_BYPASS_MASK		GENMASK(5, 4)
67 
68 #define PLL_SCCG_LOCK_TIMEOUT		70
69 
70 struct clk_sscg_pll_setup {
71 	int divr1, divf1;
72 	int divr2, divf2;
73 	int divq;
74 	int bypass;
75 	uint64_t vco1;
76 	uint64_t vco2;
77 	uint64_t fout;
78 	uint64_t ref;
79 	uint64_t ref_div1;
80 	uint64_t ref_div2;
81 	uint64_t fout_request;
82 	int fout_error;
83 };
84 
85 struct clk_sscg_pll {
86 	struct clk_hw	hw;
87 	const struct clk_ops  ops;
88 	void __iomem *base;
89 	struct clk_sscg_pll_setup setup;
90 	u8 parent;
91 	u8 bypass1;
92 	u8 bypass2;
93 };
94 
95 #define to_clk_sscg_pll(_hw) container_of(_hw, struct clk_sscg_pll, hw)
96 
97 static int clk_sscg_pll_wait_lock(struct clk_sscg_pll *pll)
98 {
99 	u32 val;
100 
101 	val = readl_relaxed(pll->base + PLL_CFG0);
102 
103 	/* don't wait for lock if all plls are bypassed */
104 	if (!(val & SSCG_PLL_BYPASS2_MASK))
105 		return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK,
106 						0, PLL_SCCG_LOCK_TIMEOUT);
107 
108 	return 0;
109 }
110 
111 static int clk_sscg_pll2_check_match(struct clk_sscg_pll_setup *setup,
112 					struct clk_sscg_pll_setup *temp_setup)
113 {
114 	int new_diff = temp_setup->fout - temp_setup->fout_request;
115 	int diff = temp_setup->fout_error;
116 
117 	if (abs(diff) > abs(new_diff)) {
118 		temp_setup->fout_error = new_diff;
119 		memcpy(setup, temp_setup, sizeof(struct clk_sscg_pll_setup));
120 
121 		if (temp_setup->fout_request == temp_setup->fout)
122 			return 0;
123 	}
124 	return -1;
125 }
126 
127 static int clk_sscg_divq_lookup(struct clk_sscg_pll_setup *setup,
128 				struct clk_sscg_pll_setup *temp_setup)
129 {
130 	int ret = -EINVAL;
131 
132 	for (temp_setup->divq = 0; temp_setup->divq <= PLL_DIVQ_MAX;
133 	     temp_setup->divq++) {
134 		temp_setup->vco2 = temp_setup->vco1;
135 		do_div(temp_setup->vco2, temp_setup->divr2 + 1);
136 		temp_setup->vco2 *= 2;
137 		temp_setup->vco2 *= temp_setup->divf2 + 1;
138 		if (temp_setup->vco2 >= PLL_STAGE2_MIN_FREQ &&
139 				temp_setup->vco2 <= PLL_STAGE2_MAX_FREQ) {
140 			temp_setup->fout = temp_setup->vco2;
141 			do_div(temp_setup->fout, 2 * (temp_setup->divq + 1));
142 
143 			ret = clk_sscg_pll2_check_match(setup, temp_setup);
144 			if (!ret) {
145 				temp_setup->bypass = PLL_BYPASS1;
146 				return ret;
147 			}
148 		}
149 	}
150 
151 	return ret;
152 }
153 
154 static int clk_sscg_divf2_lookup(struct clk_sscg_pll_setup *setup,
155 					struct clk_sscg_pll_setup *temp_setup)
156 {
157 	int ret = -EINVAL;
158 
159 	for (temp_setup->divf2 = 0; temp_setup->divf2 <= PLL_DIVF2_MAX;
160 	     temp_setup->divf2++) {
161 		ret = clk_sscg_divq_lookup(setup, temp_setup);
162 		if (!ret)
163 			return ret;
164 	}
165 
166 	return ret;
167 }
168 
169 static int clk_sscg_divr2_lookup(struct clk_sscg_pll_setup *setup,
170 				struct clk_sscg_pll_setup *temp_setup)
171 {
172 	int ret = -EINVAL;
173 
174 	for (temp_setup->divr2 = 0; temp_setup->divr2 <= PLL_DIVR2_MAX;
175 	     temp_setup->divr2++) {
176 		temp_setup->ref_div2 = temp_setup->vco1;
177 		do_div(temp_setup->ref_div2, temp_setup->divr2 + 1);
178 		if (temp_setup->ref_div2 >= PLL_STAGE2_REF_MIN_FREQ &&
179 		    temp_setup->ref_div2 <= PLL_STAGE2_REF_MAX_FREQ) {
180 			ret = clk_sscg_divf2_lookup(setup, temp_setup);
181 			if (!ret)
182 				return ret;
183 		}
184 	}
185 
186 	return ret;
187 }
188 
189 static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup,
190 					struct clk_sscg_pll_setup *temp_setup,
191 					uint64_t ref)
192 {
193 	int ret;
194 
195 	if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ)
196 		return -EINVAL;
197 
198 	temp_setup->vco1 = ref;
199 
200 	ret = clk_sscg_divr2_lookup(setup, temp_setup);
201 	return ret;
202 }
203 
204 static int clk_sscg_divf1_lookup(struct clk_sscg_pll_setup *setup,
205 				struct clk_sscg_pll_setup *temp_setup)
206 {
207 	int ret = -EINVAL;
208 
209 	for (temp_setup->divf1 = 0; temp_setup->divf1 <= PLL_DIVF1_MAX;
210 	     temp_setup->divf1++) {
211 		uint64_t vco1 = temp_setup->ref;
212 
213 		do_div(vco1, temp_setup->divr1 + 1);
214 		vco1 *= 2;
215 		vco1 *= temp_setup->divf1 + 1;
216 
217 		ret = clk_sscg_pll2_find_setup(setup, temp_setup, vco1);
218 		if (!ret) {
219 			temp_setup->bypass = PLL_BYPASS_NONE;
220 			return ret;
221 		}
222 	}
223 
224 	return ret;
225 }
226 
227 static int clk_sscg_divr1_lookup(struct clk_sscg_pll_setup *setup,
228 				struct clk_sscg_pll_setup *temp_setup)
229 {
230 	int ret = -EINVAL;
231 
232 	for (temp_setup->divr1 = 0; temp_setup->divr1 <= PLL_DIVR1_MAX;
233 	     temp_setup->divr1++) {
234 		temp_setup->ref_div1 = temp_setup->ref;
235 		do_div(temp_setup->ref_div1, temp_setup->divr1 + 1);
236 		if (temp_setup->ref_div1 >= PLL_STAGE1_REF_MIN_FREQ &&
237 		    temp_setup->ref_div1 <= PLL_STAGE1_REF_MAX_FREQ) {
238 			ret = clk_sscg_divf1_lookup(setup, temp_setup);
239 			if (!ret)
240 				return ret;
241 		}
242 	}
243 
244 	return ret;
245 }
246 
247 static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup,
248 					struct clk_sscg_pll_setup *temp_setup,
249 					uint64_t ref)
250 {
251 	int ret;
252 
253 	if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ)
254 		return -EINVAL;
255 
256 	temp_setup->ref = ref;
257 
258 	ret = clk_sscg_divr1_lookup(setup, temp_setup);
259 
260 	return ret;
261 }
262 
263 static int clk_sscg_pll_find_setup(struct clk_sscg_pll_setup *setup,
264 					uint64_t prate,
265 					uint64_t rate, int try_bypass)
266 {
267 	struct clk_sscg_pll_setup temp_setup;
268 	int ret = -EINVAL;
269 
270 	memset(&temp_setup, 0, sizeof(struct clk_sscg_pll_setup));
271 	memset(setup, 0, sizeof(struct clk_sscg_pll_setup));
272 
273 	temp_setup.fout_error = PLL_OUT_MAX_FREQ;
274 	temp_setup.fout_request = rate;
275 
276 	switch (try_bypass) {
277 	case PLL_BYPASS2:
278 		if (prate == rate) {
279 			setup->bypass = PLL_BYPASS2;
280 			setup->fout = rate;
281 			ret = 0;
282 		}
283 		break;
284 	case PLL_BYPASS1:
285 		ret = clk_sscg_pll2_find_setup(setup, &temp_setup, prate);
286 		break;
287 	case PLL_BYPASS_NONE:
288 		ret = clk_sscg_pll1_find_setup(setup, &temp_setup, prate);
289 		break;
290 	}
291 
292 	return ret;
293 }
294 
295 static int clk_sscg_pll_is_prepared(struct clk_hw *hw)
296 {
297 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
298 
299 	u32 val = readl_relaxed(pll->base + PLL_CFG0);
300 
301 	return (val & PLL_PD_MASK) ? 0 : 1;
302 }
303 
304 static int clk_sscg_pll_prepare(struct clk_hw *hw)
305 {
306 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
307 	u32 val;
308 
309 	val = readl_relaxed(pll->base + PLL_CFG0);
310 	val &= ~PLL_PD_MASK;
311 	writel_relaxed(val, pll->base + PLL_CFG0);
312 
313 	return clk_sscg_pll_wait_lock(pll);
314 }
315 
316 static void clk_sscg_pll_unprepare(struct clk_hw *hw)
317 {
318 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
319 	u32 val;
320 
321 	val = readl_relaxed(pll->base + PLL_CFG0);
322 	val |= PLL_PD_MASK;
323 	writel_relaxed(val, pll->base + PLL_CFG0);
324 }
325 
326 static unsigned long clk_sscg_pll_recalc_rate(struct clk_hw *hw,
327 					 unsigned long parent_rate)
328 {
329 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
330 	u32 val, divr1, divf1, divr2, divf2, divq;
331 	u64 temp64;
332 
333 	val = readl_relaxed(pll->base + PLL_CFG2);
334 	divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
335 	divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
336 	divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
337 	divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
338 	divq = FIELD_GET(PLL_DIVQ_MASK, val);
339 
340 	temp64 = parent_rate;
341 
342 	val = readl(pll->base + PLL_CFG0);
343 	if (val & SSCG_PLL_BYPASS2_MASK) {
344 		temp64 = parent_rate;
345 	} else if (val & SSCG_PLL_BYPASS1_MASK) {
346 		temp64 *= divf2;
347 		do_div(temp64, (divr2 + 1) * (divq + 1));
348 	} else {
349 		temp64 *= 2;
350 		temp64 *= (divf1 + 1) * (divf2 + 1);
351 		do_div(temp64, (divr1 + 1) * (divr2 + 1) * (divq + 1));
352 	}
353 
354 	return temp64;
355 }
356 
357 static int clk_sscg_pll_set_rate(struct clk_hw *hw, unsigned long rate,
358 			    unsigned long parent_rate)
359 {
360 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
361 	struct clk_sscg_pll_setup *setup = &pll->setup;
362 	u32 val;
363 
364 	/* set bypass here too since the parent might be the same */
365 	val = readl(pll->base + PLL_CFG0);
366 	val &= ~SSCG_PLL_BYPASS_MASK;
367 	val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass);
368 	writel(val, pll->base + PLL_CFG0);
369 
370 	val = readl_relaxed(pll->base + PLL_CFG2);
371 	val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK);
372 	val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK);
373 	val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1);
374 	val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2);
375 	val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1);
376 	val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2);
377 	val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq);
378 	writel_relaxed(val, pll->base + PLL_CFG2);
379 
380 	return clk_sscg_pll_wait_lock(pll);
381 }
382 
383 static u8 clk_sscg_pll_get_parent(struct clk_hw *hw)
384 {
385 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
386 	u32 val;
387 	u8 ret = pll->parent;
388 
389 	val = readl(pll->base + PLL_CFG0);
390 	if (val & SSCG_PLL_BYPASS2_MASK)
391 		ret = pll->bypass2;
392 	else if (val & SSCG_PLL_BYPASS1_MASK)
393 		ret = pll->bypass1;
394 	return ret;
395 }
396 
397 static int clk_sscg_pll_set_parent(struct clk_hw *hw, u8 index)
398 {
399 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
400 	u32 val;
401 
402 	val = readl(pll->base + PLL_CFG0);
403 	val &= ~SSCG_PLL_BYPASS_MASK;
404 	val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass);
405 	writel(val, pll->base + PLL_CFG0);
406 
407 	return clk_sscg_pll_wait_lock(pll);
408 }
409 
410 static int __clk_sscg_pll_determine_rate(struct clk_hw *hw,
411 					struct clk_rate_request *req,
412 					uint64_t min,
413 					uint64_t max,
414 					uint64_t rate,
415 					int bypass)
416 {
417 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
418 	struct clk_sscg_pll_setup *setup = &pll->setup;
419 	struct clk_hw *parent_hw = NULL;
420 	int bypass_parent_index;
421 	int ret;
422 
423 	req->max_rate = max;
424 	req->min_rate = min;
425 
426 	switch (bypass) {
427 	case PLL_BYPASS2:
428 		bypass_parent_index = pll->bypass2;
429 		break;
430 	case PLL_BYPASS1:
431 		bypass_parent_index = pll->bypass1;
432 		break;
433 	default:
434 		bypass_parent_index = pll->parent;
435 		break;
436 	}
437 
438 	parent_hw = clk_hw_get_parent_by_index(hw, bypass_parent_index);
439 	ret = __clk_determine_rate(parent_hw, req);
440 	if (!ret) {
441 		ret = clk_sscg_pll_find_setup(setup, req->rate,
442 						rate, bypass);
443 	}
444 
445 	req->best_parent_hw = parent_hw;
446 	req->best_parent_rate = req->rate;
447 	req->rate = setup->fout;
448 
449 	return ret;
450 }
451 
452 static int clk_sscg_pll_determine_rate(struct clk_hw *hw,
453 				       struct clk_rate_request *req)
454 {
455 	struct clk_sscg_pll *pll = to_clk_sscg_pll(hw);
456 	struct clk_sscg_pll_setup *setup = &pll->setup;
457 	uint64_t rate = req->rate;
458 	uint64_t min = req->min_rate;
459 	uint64_t max = req->max_rate;
460 	int ret;
461 
462 	if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ)
463 		return -EINVAL;
464 
465 	ret = __clk_sscg_pll_determine_rate(hw, req, req->rate, req->rate,
466 						rate, PLL_BYPASS2);
467 	if (!ret)
468 		return ret;
469 
470 	ret = __clk_sscg_pll_determine_rate(hw, req, PLL_STAGE1_REF_MIN_FREQ,
471 						PLL_STAGE1_REF_MAX_FREQ, rate,
472 						PLL_BYPASS1);
473 	if (!ret)
474 		return ret;
475 
476 	ret = __clk_sscg_pll_determine_rate(hw, req, PLL_REF_MIN_FREQ,
477 						PLL_REF_MAX_FREQ, rate,
478 						PLL_BYPASS_NONE);
479 	if (!ret)
480 		return ret;
481 
482 	if (setup->fout >= min && setup->fout <= max)
483 		ret = 0;
484 
485 	return ret;
486 }
487 
488 static const struct clk_ops clk_sscg_pll_ops = {
489 	.prepare	= clk_sscg_pll_prepare,
490 	.unprepare	= clk_sscg_pll_unprepare,
491 	.is_prepared	= clk_sscg_pll_is_prepared,
492 	.recalc_rate	= clk_sscg_pll_recalc_rate,
493 	.set_rate	= clk_sscg_pll_set_rate,
494 	.set_parent	= clk_sscg_pll_set_parent,
495 	.get_parent	= clk_sscg_pll_get_parent,
496 	.determine_rate	= clk_sscg_pll_determine_rate,
497 };
498 
499 struct clk_hw *imx_clk_hw_sscg_pll(const char *name,
500 				const char * const *parent_names,
501 				u8 num_parents,
502 				u8 parent, u8 bypass1, u8 bypass2,
503 				void __iomem *base,
504 				unsigned long flags)
505 {
506 	struct clk_sscg_pll *pll;
507 	struct clk_init_data init;
508 	struct clk_hw *hw;
509 	int ret;
510 
511 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
512 	if (!pll)
513 		return ERR_PTR(-ENOMEM);
514 
515 	pll->parent = parent;
516 	pll->bypass1 = bypass1;
517 	pll->bypass2 = bypass2;
518 
519 	pll->base = base;
520 	init.name = name;
521 	init.ops = &clk_sscg_pll_ops;
522 
523 	init.flags = flags;
524 	init.parent_names = parent_names;
525 	init.num_parents = num_parents;
526 
527 	pll->base = base;
528 	pll->hw.init = &init;
529 
530 	hw = &pll->hw;
531 
532 	ret = clk_hw_register(NULL, hw);
533 	if (ret) {
534 		kfree(pll);
535 		return ERR_PTR(ret);
536 	}
537 
538 	return hw;
539 }
540