xref: /openbmc/linux/drivers/clk/rockchip/clk-pll.c (revision 7051924f771722c6dd235e693742cda6488ac700)
1 /*
2  * Copyright (c) 2014 MundoReader S.L.
3  * Author: Heiko Stuebner <heiko@sntech.de>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15 
16 #include <asm/div64.h>
17 #include <linux/slab.h>
18 #include <linux/io.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/clk-provider.h>
22 #include <linux/regmap.h>
23 #include "clk.h"
24 
25 #define PLL_MODE_MASK		0x3
26 #define PLL_MODE_SLOW		0x0
27 #define PLL_MODE_NORM		0x1
28 #define PLL_MODE_DEEP		0x2
29 
30 struct rockchip_clk_pll {
31 	struct clk_hw		hw;
32 
33 	struct clk_mux		pll_mux;
34 	const struct clk_ops	*pll_mux_ops;
35 
36 	struct notifier_block	clk_nb;
37 	bool			rate_change_remuxed;
38 
39 	void __iomem		*reg_base;
40 	int			lock_offset;
41 	unsigned int		lock_shift;
42 	enum rockchip_pll_type	type;
43 	const struct rockchip_pll_rate_table *rate_table;
44 	unsigned int		rate_count;
45 	spinlock_t		*lock;
46 };
47 
48 #define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
49 #define to_rockchip_clk_pll_nb(nb) \
50 			container_of(nb, struct rockchip_clk_pll, clk_nb)
51 
52 static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
53 			    struct rockchip_clk_pll *pll, unsigned long rate)
54 {
55 	const struct rockchip_pll_rate_table  *rate_table = pll->rate_table;
56 	int i;
57 
58 	for (i = 0; i < pll->rate_count; i++) {
59 		if (rate == rate_table[i].rate)
60 			return &rate_table[i];
61 	}
62 
63 	return NULL;
64 }
65 
66 static long rockchip_pll_round_rate(struct clk_hw *hw,
67 			    unsigned long drate, unsigned long *prate)
68 {
69 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
70 	const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
71 	int i;
72 
73 	/* Assumming rate_table is in descending order */
74 	for (i = 0; i < pll->rate_count; i++) {
75 		if (drate >= rate_table[i].rate)
76 			return rate_table[i].rate;
77 	}
78 
79 	/* return minimum supported value */
80 	return rate_table[i - 1].rate;
81 }
82 
83 /*
84  * Wait for the pll to reach the locked state.
85  * The calling set_rate function is responsible for making sure the
86  * grf regmap is available.
87  */
88 static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
89 {
90 	struct regmap *grf = rockchip_clk_get_grf();
91 	unsigned int val;
92 	int delay = 24000000, ret;
93 
94 	while (delay > 0) {
95 		ret = regmap_read(grf, pll->lock_offset, &val);
96 		if (ret) {
97 			pr_err("%s: failed to read pll lock status: %d\n",
98 			       __func__, ret);
99 			return ret;
100 		}
101 
102 		if (val & BIT(pll->lock_shift))
103 			return 0;
104 		delay--;
105 	}
106 
107 	pr_err("%s: timeout waiting for pll to lock\n", __func__);
108 	return -ETIMEDOUT;
109 }
110 
111 /**
112  * Set pll mux when changing the pll rate.
113  * This makes sure to move the pll mux away from the actual pll before
114  * changing its rate and back to the original parent after the change.
115  */
116 static int rockchip_pll_notifier_cb(struct notifier_block *nb,
117 					unsigned long event, void *data)
118 {
119 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll_nb(nb);
120 	struct clk_mux *pll_mux = &pll->pll_mux;
121 	const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
122 	int cur_parent;
123 
124 	switch (event) {
125 	case PRE_RATE_CHANGE:
126 		cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
127 		if (cur_parent == PLL_MODE_NORM) {
128 			pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
129 			pll->rate_change_remuxed = 1;
130 		}
131 		break;
132 	case POST_RATE_CHANGE:
133 		if (pll->rate_change_remuxed) {
134 			pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
135 			pll->rate_change_remuxed = 0;
136 		}
137 		break;
138 	}
139 
140 	return NOTIFY_OK;
141 }
142 
143 /**
144  * PLL used in RK3066, RK3188 and RK3288
145  */
146 
147 #define RK3066_PLL_RESET_DELAY(nr)	((nr * 500) / 24 + 1)
148 
149 #define RK3066_PLLCON(i)		(i * 0x4)
150 #define RK3066_PLLCON0_OD_MASK		0xf
151 #define RK3066_PLLCON0_OD_SHIFT		0
152 #define RK3066_PLLCON0_NR_MASK		0x3f
153 #define RK3066_PLLCON0_NR_SHIFT		8
154 #define RK3066_PLLCON1_NF_MASK		0x1fff
155 #define RK3066_PLLCON1_NF_SHIFT		0
156 #define RK3066_PLLCON2_BWADJ_MASK	0xfff
157 #define RK3066_PLLCON2_BWADJ_SHIFT	0
158 #define RK3066_PLLCON3_RESET		(1 << 5)
159 #define RK3066_PLLCON3_PWRDOWN		(1 << 1)
160 #define RK3066_PLLCON3_BYPASS		(1 << 0)
161 
162 static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
163 						     unsigned long prate)
164 {
165 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
166 	u64 nf, nr, no, rate64 = prate;
167 	u32 pllcon;
168 
169 	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
170 	if (pllcon & RK3066_PLLCON3_BYPASS) {
171 		pr_debug("%s: pll %s is bypassed\n", __func__,
172 			__clk_get_name(hw->clk));
173 		return prate;
174 	}
175 
176 	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
177 	nf = (pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK;
178 
179 	pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
180 	nr = (pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK;
181 	no = (pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK;
182 
183 	rate64 *= (nf + 1);
184 	do_div(rate64, nr + 1);
185 	do_div(rate64, no + 1);
186 
187 	return (unsigned long)rate64;
188 }
189 
190 static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
191 					unsigned long prate)
192 {
193 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
194 	const struct rockchip_pll_rate_table *rate;
195 	unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
196 	struct regmap *grf = rockchip_clk_get_grf();
197 	int ret;
198 
199 	if (IS_ERR(grf)) {
200 		pr_debug("%s: grf regmap not available, aborting rate change\n",
201 			 __func__);
202 		return PTR_ERR(grf);
203 	}
204 
205 	pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
206 		 __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
207 
208 	/* Get required rate settings from table */
209 	rate = rockchip_get_pll_settings(pll, drate);
210 	if (!rate) {
211 		pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
212 			drate, __clk_get_name(hw->clk));
213 		return -EINVAL;
214 	}
215 
216 	pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n",
217 		 __func__, rate->rate, rate->nr, rate->no, rate->nf);
218 
219 	/* enter reset mode */
220 	writel(HIWORD_UPDATE(RK3066_PLLCON3_RESET, RK3066_PLLCON3_RESET, 0),
221 	       pll->reg_base + RK3066_PLLCON(3));
222 
223 	/* update pll values */
224 	writel(HIWORD_UPDATE(rate->nr - 1, RK3066_PLLCON0_NR_MASK,
225 					   RK3066_PLLCON0_NR_SHIFT) |
226 	       HIWORD_UPDATE(rate->no - 1, RK3066_PLLCON0_OD_MASK,
227 					   RK3066_PLLCON0_OD_SHIFT),
228 	       pll->reg_base + RK3066_PLLCON(0));
229 
230 	writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
231 						   RK3066_PLLCON1_NF_SHIFT),
232 		       pll->reg_base + RK3066_PLLCON(1));
233 	writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK,
234 						  RK3066_PLLCON2_BWADJ_SHIFT),
235 		       pll->reg_base + RK3066_PLLCON(2));
236 
237 	/* leave reset and wait the reset_delay */
238 	writel(HIWORD_UPDATE(0, RK3066_PLLCON3_RESET, 0),
239 	       pll->reg_base + RK3066_PLLCON(3));
240 	udelay(RK3066_PLL_RESET_DELAY(rate->nr));
241 
242 	/* wait for the pll to lock */
243 	ret = rockchip_pll_wait_lock(pll);
244 	if (ret) {
245 		pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
246 			__func__, old_rate);
247 		rockchip_rk3066_pll_set_rate(hw, old_rate, prate);
248 	}
249 
250 	return ret;
251 }
252 
253 static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
254 {
255 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
256 
257 	writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0),
258 	       pll->reg_base + RK3066_PLLCON(3));
259 
260 	return 0;
261 }
262 
263 static void rockchip_rk3066_pll_disable(struct clk_hw *hw)
264 {
265 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
266 
267 	writel(HIWORD_UPDATE(RK3066_PLLCON3_PWRDOWN,
268 			     RK3066_PLLCON3_PWRDOWN, 0),
269 	       pll->reg_base + RK3066_PLLCON(3));
270 }
271 
272 static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
273 {
274 	struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
275 	u32 pllcon = readl(pll->reg_base + RK3066_PLLCON(3));
276 
277 	return !(pllcon & RK3066_PLLCON3_PWRDOWN);
278 }
279 
280 static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
281 	.recalc_rate = rockchip_rk3066_pll_recalc_rate,
282 	.enable = rockchip_rk3066_pll_enable,
283 	.disable = rockchip_rk3066_pll_disable,
284 	.is_enabled = rockchip_rk3066_pll_is_enabled,
285 };
286 
287 static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
288 	.recalc_rate = rockchip_rk3066_pll_recalc_rate,
289 	.round_rate = rockchip_pll_round_rate,
290 	.set_rate = rockchip_rk3066_pll_set_rate,
291 	.enable = rockchip_rk3066_pll_enable,
292 	.disable = rockchip_rk3066_pll_disable,
293 	.is_enabled = rockchip_rk3066_pll_is_enabled,
294 };
295 
296 /*
297  * Common registering of pll clocks
298  */
299 
300 struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
301 		const char *name, const char **parent_names, u8 num_parents,
302 		void __iomem *base, int con_offset, int grf_lock_offset,
303 		int lock_shift, int mode_offset, int mode_shift,
304 		struct rockchip_pll_rate_table *rate_table,
305 		spinlock_t *lock)
306 {
307 	const char *pll_parents[3];
308 	struct clk_init_data init;
309 	struct rockchip_clk_pll *pll;
310 	struct clk_mux *pll_mux;
311 	struct clk *pll_clk, *mux_clk;
312 	char pll_name[20];
313 	int ret;
314 
315 	if (num_parents != 2) {
316 		pr_err("%s: needs two parent clocks\n", __func__);
317 		return ERR_PTR(-EINVAL);
318 	}
319 
320 	/* name the actual pll */
321 	snprintf(pll_name, sizeof(pll_name), "pll_%s", name);
322 
323 	pll = kzalloc(sizeof(*pll), GFP_KERNEL);
324 	if (!pll)
325 		return ERR_PTR(-ENOMEM);
326 
327 	init.name = pll_name;
328 
329 	/* keep all plls untouched for now */
330 	init.flags = CLK_IGNORE_UNUSED;
331 
332 	init.parent_names = &parent_names[0];
333 	init.num_parents = 1;
334 
335 	if (rate_table) {
336 		int len;
337 
338 		/* find count of rates in rate_table */
339 		for (len = 0; rate_table[len].rate != 0; )
340 			len++;
341 
342 		pll->rate_count = len;
343 		pll->rate_table = kmemdup(rate_table,
344 					pll->rate_count *
345 					sizeof(struct rockchip_pll_rate_table),
346 					GFP_KERNEL);
347 		WARN(!pll->rate_table,
348 			"%s: could not allocate rate table for %s\n",
349 			__func__, name);
350 	}
351 
352 	switch (pll_type) {
353 	case pll_rk3066:
354 		if (!pll->rate_table)
355 			init.ops = &rockchip_rk3066_pll_clk_norate_ops;
356 		else
357 			init.ops = &rockchip_rk3066_pll_clk_ops;
358 		break;
359 	default:
360 		pr_warn("%s: Unknown pll type for pll clk %s\n",
361 			__func__, name);
362 	}
363 
364 	pll->hw.init = &init;
365 	pll->type = pll_type;
366 	pll->reg_base = base + con_offset;
367 	pll->lock_offset = grf_lock_offset;
368 	pll->lock_shift = lock_shift;
369 	pll->lock = lock;
370 	pll->clk_nb.notifier_call = rockchip_pll_notifier_cb;
371 
372 	pll_clk = clk_register(NULL, &pll->hw);
373 	if (IS_ERR(pll_clk)) {
374 		pr_err("%s: failed to register pll clock %s : %ld\n",
375 			__func__, name, PTR_ERR(pll_clk));
376 		mux_clk = pll_clk;
377 		goto err_pll;
378 	}
379 
380 	ret = clk_notifier_register(pll_clk, &pll->clk_nb);
381 	if (ret) {
382 		pr_err("%s: failed to register clock notifier for %s : %d\n",
383 				__func__, name, ret);
384 		mux_clk = ERR_PTR(ret);
385 		goto err_pll_notifier;
386 	}
387 
388 	/* create the mux on top of the real pll */
389 	pll->pll_mux_ops = &clk_mux_ops;
390 	pll_mux = &pll->pll_mux;
391 
392 	/* the actual muxing is xin24m, pll-output, xin32k */
393 	pll_parents[0] = parent_names[0];
394 	pll_parents[1] = pll_name;
395 	pll_parents[2] = parent_names[1];
396 
397 	init.name = name;
398 	init.flags = CLK_SET_RATE_PARENT;
399 	init.ops = pll->pll_mux_ops;
400 	init.parent_names = pll_parents;
401 	init.num_parents = ARRAY_SIZE(pll_parents);
402 
403 	pll_mux->reg = base + mode_offset;
404 	pll_mux->shift = mode_shift;
405 	pll_mux->mask = PLL_MODE_MASK;
406 	pll_mux->flags = 0;
407 	pll_mux->lock = lock;
408 	pll_mux->hw.init = &init;
409 
410 	if (pll_type == pll_rk3066)
411 		pll_mux->flags |= CLK_MUX_HIWORD_MASK;
412 
413 	mux_clk = clk_register(NULL, &pll_mux->hw);
414 	if (IS_ERR(mux_clk))
415 		goto err_mux;
416 
417 	return mux_clk;
418 
419 err_mux:
420 	ret = clk_notifier_unregister(pll_clk, &pll->clk_nb);
421 	if (ret) {
422 		pr_err("%s: could not unregister clock notifier in error path : %d\n",
423 		       __func__, ret);
424 		return mux_clk;
425 	}
426 err_pll_notifier:
427 	clk_unregister(pll_clk);
428 err_pll:
429 	kfree(pll);
430 	return mux_clk;
431 }
432