1 /*
2  * Copyright 2013 Broadcom Corporation.
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 /*
8  *
9  * bcm235xx architecture clock framework
10  *
11  */
12 
13 #include <common.h>
14 #include <asm/io.h>
15 #include <asm/errno.h>
16 #include <bitfield.h>
17 #include <asm/arch/sysmap.h>
18 #include <asm/kona-common/clk.h>
19 #include "clk-core.h"
20 
21 #define CLK_WR_ACCESS_PASSWORD	0x00a5a501
22 #define WR_ACCESS_OFFSET	0	/* common to all clock blocks */
23 #define POLICY_CTL_GO		1	/* Load and refresh policy masks */
24 #define POLICY_CTL_GO_ATL	4	/* Active Load */
25 
26 /* Helper function */
27 int clk_get_and_enable(char *clkstr)
28 {
29 	int ret = 0;
30 	struct clk *c;
31 
32 	debug("%s: %s\n", __func__, clkstr);
33 
34 	c = clk_get(clkstr);
35 	if (c) {
36 		ret = clk_enable(c);
37 		if (ret)
38 			return ret;
39 	} else {
40 		printf("%s: Couldn't find %s\n", __func__, clkstr);
41 		return -EINVAL;
42 	}
43 	return ret;
44 }
45 
46 /*
47  * Poll a register in a CCU's address space, returning when the
48  * specified bit in that register's value is set (or clear). Delay
49  * a microsecond after each read of the register. Returns true if
50  * successful, or false if we gave up trying.
51  *
52  * Caller must ensure the CCU lock is held.
53  */
54 #define CLK_GATE_DELAY_USEC 2000
55 static inline int wait_bit(void *base, u32 offset, u32 bit, bool want)
56 {
57 	unsigned int tries;
58 	u32 bit_mask = 1 << bit;
59 
60 	for (tries = 0; tries < CLK_GATE_DELAY_USEC; tries++) {
61 		u32 val;
62 		bool bit_val;
63 
64 		val = readl(base + offset);
65 		bit_val = (val & bit_mask) ? 1 : 0;
66 		if (bit_val == want)
67 			return 0;	/* success */
68 		udelay(1);
69 	}
70 
71 	debug("%s: timeout on addr 0x%p, waiting for bit %d to go to %d\n",
72 	      __func__, base + offset, bit, want);
73 
74 	return -ETIMEDOUT;
75 }
76 
77 /* Enable a peripheral clock */
78 static int peri_clk_enable(struct clk *c, int enable)
79 {
80 	int ret = 0;
81 	u32 reg;
82 	struct peri_clock *peri_clk = to_peri_clk(c);
83 	struct peri_clk_data *cd = peri_clk->data;
84 	struct bcm_clk_gate *gate = &cd->gate;
85 	void *base = (void *)c->ccu_clk_mgr_base;
86 
87 
88 	debug("%s: %s\n", __func__, c->name);
89 
90 	clk_get_rate(c);	/* Make sure rate and sel are filled in */
91 
92 	/* enable access */
93 	writel(CLK_WR_ACCESS_PASSWORD, base + WR_ACCESS_OFFSET);
94 
95 	if (enable) {
96 		debug("%s %s set rate %lu div %lu sel %d parent %lu\n",
97 		      __func__, c->name, c->rate, c->div, c->sel,
98 		      c->parent->rate);
99 
100 		/*
101 		 * clkgate - only software controllable gates are
102 		 * supported by u-boot which includes all clocks
103 		 * that matter. This avoids bringing in a lot of extra
104 		 * complexity as done in the kernel framework.
105 		 */
106 		if (gate_exists(gate)) {
107 			reg = readl(base + cd->gate.offset);
108 			reg |= (1 << cd->gate.en_bit);
109 			writel(reg, base + cd->gate.offset);
110 		}
111 
112 		/* div and pll select */
113 		if (divider_exists(&cd->div)) {
114 			reg = readl(base + cd->div.offset);
115 			bitfield_replace(reg, cd->div.shift, cd->div.width,
116 					 c->div - 1);
117 			writel(reg, base + cd->div.offset);
118 		}
119 
120 		/* frequency selector */
121 		if (selector_exists(&cd->sel)) {
122 			reg = readl(base + cd->sel.offset);
123 			bitfield_replace(reg, cd->sel.shift, cd->sel.width,
124 					 c->sel);
125 			writel(reg, base + cd->sel.offset);
126 		}
127 
128 		/* trigger */
129 		if (trigger_exists(&cd->trig)) {
130 			writel((1 << cd->trig.bit), base + cd->trig.offset);
131 
132 			/* wait for trigger status bit to go to 0 */
133 			ret = wait_bit(base, cd->trig.offset, cd->trig.bit, 0);
134 			if (ret)
135 				return ret;
136 		}
137 
138 		/* wait for running (status_bit = 1) */
139 		ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit, 1);
140 		if (ret)
141 			return ret;
142 	} else {
143 		debug("%s disable clock %s\n", __func__, c->name);
144 
145 		/* clkgate */
146 		reg = readl(base + cd->gate.offset);
147 		reg &= ~(1 << cd->gate.en_bit);
148 		writel(reg, base + cd->gate.offset);
149 
150 		/* wait for stop (status_bit = 0) */
151 		ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit, 0);
152 	}
153 
154 	/* disable access */
155 	writel(0, base + WR_ACCESS_OFFSET);
156 
157 	return ret;
158 }
159 
160 /* Set the rate of a peripheral clock */
161 static int peri_clk_set_rate(struct clk *c, unsigned long rate)
162 {
163 	int ret = 0;
164 	int i;
165 	unsigned long diff;
166 	unsigned long new_rate = 0, div = 1;
167 	struct peri_clock *peri_clk = to_peri_clk(c);
168 	struct peri_clk_data *cd = peri_clk->data;
169 	const char **clock;
170 
171 	debug("%s: %s\n", __func__, c->name);
172 	diff = rate;
173 
174 	i = 0;
175 	for (clock = cd->clocks; *clock; clock++, i++) {
176 		struct refclk *ref = refclk_str_to_clk(*clock);
177 		if (!ref) {
178 			printf("%s: Lookup of %s failed\n", __func__, *clock);
179 			return -EINVAL;
180 		}
181 
182 		/* round to the new rate */
183 		div = ref->clk.rate / rate;
184 		if (div == 0)
185 			div = 1;
186 
187 		new_rate = ref->clk.rate / div;
188 
189 		/* get the min diff */
190 		if (abs(new_rate - rate) < diff) {
191 			diff = abs(new_rate - rate);
192 			c->sel = i;
193 			c->parent = &ref->clk;
194 			c->rate = new_rate;
195 			c->div = div;
196 		}
197 	}
198 
199 	debug("%s %s set rate %lu div %lu sel %d parent %lu\n", __func__,
200 	      c->name, c->rate, c->div, c->sel, c->parent->rate);
201 	return ret;
202 }
203 
204 /* Get the rate of a peripheral clock */
205 static unsigned long peri_clk_get_rate(struct clk *c)
206 {
207 	struct peri_clock *peri_clk = to_peri_clk(c);
208 	struct peri_clk_data *cd = peri_clk->data;
209 	void *base = (void *)c->ccu_clk_mgr_base;
210 	int div = 1;
211 	const char **clock;
212 	struct refclk *ref;
213 	u32 reg;
214 
215 	debug("%s: %s\n", __func__, c->name);
216 	if (selector_exists(&cd->sel)) {
217 		reg = readl(base + cd->sel.offset);
218 		c->sel = bitfield_extract(reg, cd->sel.shift, cd->sel.width);
219 	} else {
220 		/*
221 		 * For peri clocks that don't have a selector, the single
222 		 * reference clock will always exist at index 0.
223 		 */
224 		c->sel = 0;
225 	}
226 
227 	if (divider_exists(&cd->div)) {
228 		reg = readl(base + cd->div.offset);
229 		div = bitfield_extract(reg, cd->div.shift, cd->div.width);
230 		div += 1;
231 	}
232 
233 	clock = cd->clocks;
234 	ref = refclk_str_to_clk(clock[c->sel]);
235 	if (!ref) {
236 		printf("%s: Can't lookup %s\n", __func__, clock[c->sel]);
237 		return 0;
238 	}
239 
240 	c->parent = &ref->clk;
241 	c->div = div;
242 	c->rate = c->parent->rate / c->div;
243 	debug("%s parent rate %lu div %d sel %d rate %lu\n", __func__,
244 	      c->parent->rate, div, c->sel, c->rate);
245 
246 	return c->rate;
247 }
248 
249 /* Peripheral clock operations */
250 struct clk_ops peri_clk_ops = {
251 	.enable = peri_clk_enable,
252 	.set_rate = peri_clk_set_rate,
253 	.get_rate = peri_clk_get_rate,
254 };
255 
256 /* Enable a CCU clock */
257 static int ccu_clk_enable(struct clk *c, int enable)
258 {
259 	struct ccu_clock *ccu_clk = to_ccu_clk(c);
260 	void *base = (void *)c->ccu_clk_mgr_base;
261 	int ret = 0;
262 	u32 reg;
263 
264 	debug("%s: %s\n", __func__, c->name);
265 	if (!enable)
266 		return -EINVAL;	/* CCU clock cannot shutdown */
267 
268 	/* enable access */
269 	writel(CLK_WR_ACCESS_PASSWORD, base + WR_ACCESS_OFFSET);
270 
271 	/* config enable for policy engine */
272 	writel(1, base + ccu_clk->lvm_en_offset);
273 
274 	/* wait for bit to go to 0 */
275 	ret = wait_bit(base, ccu_clk->lvm_en_offset, 0, 0);
276 	if (ret)
277 		return ret;
278 
279 	/* freq ID */
280 	if (!ccu_clk->freq_bit_shift)
281 		ccu_clk->freq_bit_shift = 8;
282 
283 	/* Set frequency id for each of the 4 policies */
284 	reg = ccu_clk->freq_id |
285 	    (ccu_clk->freq_id << (ccu_clk->freq_bit_shift)) |
286 	    (ccu_clk->freq_id << (ccu_clk->freq_bit_shift * 2)) |
287 	    (ccu_clk->freq_id << (ccu_clk->freq_bit_shift * 3));
288 	writel(reg, base + ccu_clk->policy_freq_offset);
289 
290 	/* enable all clock mask */
291 	writel(0x7fffffff, base + ccu_clk->policy0_mask_offset);
292 	writel(0x7fffffff, base + ccu_clk->policy1_mask_offset);
293 	writel(0x7fffffff, base + ccu_clk->policy2_mask_offset);
294 	writel(0x7fffffff, base + ccu_clk->policy3_mask_offset);
295 
296 	if (ccu_clk->num_policy_masks == 2) {
297 		writel(0x7fffffff, base + ccu_clk->policy0_mask2_offset);
298 		writel(0x7fffffff, base + ccu_clk->policy1_mask2_offset);
299 		writel(0x7fffffff, base + ccu_clk->policy2_mask2_offset);
300 		writel(0x7fffffff, base + ccu_clk->policy3_mask2_offset);
301 	}
302 
303 	/* start policy engine */
304 	reg = readl(base + ccu_clk->policy_ctl_offset);
305 	reg |= (POLICY_CTL_GO + POLICY_CTL_GO_ATL);
306 	writel(reg, base + ccu_clk->policy_ctl_offset);
307 
308 	/* wait till started */
309 	ret = wait_bit(base, ccu_clk->policy_ctl_offset, 0, 0);
310 	if (ret)
311 		return ret;
312 
313 	/* disable access */
314 	writel(0, base + WR_ACCESS_OFFSET);
315 
316 	return ret;
317 }
318 
319 /* Get the CCU clock rate */
320 static unsigned long ccu_clk_get_rate(struct clk *c)
321 {
322 	struct ccu_clock *ccu_clk = to_ccu_clk(c);
323 	debug("%s: %s\n", __func__, c->name);
324 	c->rate = ccu_clk->freq_tbl[ccu_clk->freq_id];
325 	return c->rate;
326 }
327 
328 /* CCU clock operations */
329 struct clk_ops ccu_clk_ops = {
330 	.enable = ccu_clk_enable,
331 	.get_rate = ccu_clk_get_rate,
332 };
333 
334 /* Enable a bus clock */
335 static int bus_clk_enable(struct clk *c, int enable)
336 {
337 	struct bus_clock *bus_clk = to_bus_clk(c);
338 	struct bus_clk_data *cd = bus_clk->data;
339 	void *base = (void *)c->ccu_clk_mgr_base;
340 	int ret = 0;
341 	u32 reg;
342 
343 	debug("%s: %s\n", __func__, c->name);
344 	/* enable access */
345 	writel(CLK_WR_ACCESS_PASSWORD, base + WR_ACCESS_OFFSET);
346 
347 	/* enable gating */
348 	reg = readl(base + cd->gate.offset);
349 	if (!!(reg & (1 << cd->gate.status_bit)) == !!enable)
350 		debug("%s already %s\n", c->name,
351 		      enable ? "enabled" : "disabled");
352 	else {
353 		int want = (enable) ? 1 : 0;
354 		reg |= (1 << cd->gate.hw_sw_sel_bit);
355 
356 		if (enable)
357 			reg |= (1 << cd->gate.en_bit);
358 		else
359 			reg &= ~(1 << cd->gate.en_bit);
360 
361 		writel(reg, base + cd->gate.offset);
362 		ret = wait_bit(base, cd->gate.offset, cd->gate.status_bit,
363 			       want);
364 		if (ret)
365 			return ret;
366 	}
367 
368 	/* disable access */
369 	writel(0, base + WR_ACCESS_OFFSET);
370 
371 	return ret;
372 }
373 
374 /* Get the rate of a bus clock */
375 static unsigned long bus_clk_get_rate(struct clk *c)
376 {
377 	struct bus_clock *bus_clk = to_bus_clk(c);
378 	struct ccu_clock *ccu_clk;
379 
380 	debug("%s: %s\n", __func__, c->name);
381 	ccu_clk = to_ccu_clk(c->parent);
382 
383 	c->rate = bus_clk->freq_tbl[ccu_clk->freq_id];
384 	c->div = ccu_clk->freq_tbl[ccu_clk->freq_id] / c->rate;
385 	return c->rate;
386 }
387 
388 /* Bus clock operations */
389 struct clk_ops bus_clk_ops = {
390 	.enable = bus_clk_enable,
391 	.get_rate = bus_clk_get_rate,
392 };
393 
394 /* Enable a reference clock */
395 static int ref_clk_enable(struct clk *c, int enable)
396 {
397 	debug("%s: %s\n", __func__, c->name);
398 	return 0;
399 }
400 
401 /* Reference clock operations */
402 struct clk_ops ref_clk_ops = {
403 	.enable = ref_clk_enable,
404 };
405 
406 /*
407  * clk.h implementation follows
408  */
409 
410 /* Initialize the clock framework */
411 int clk_init(void)
412 {
413 	debug("%s:\n", __func__);
414 	return 0;
415 }
416 
417 /* Get a clock handle, give a name string */
418 struct clk *clk_get(const char *con_id)
419 {
420 	int i;
421 	struct clk_lookup *clk_tblp;
422 
423 	debug("%s: %s\n", __func__, con_id);
424 
425 	clk_tblp = arch_clk_tbl;
426 	for (i = 0; i < arch_clk_tbl_array_size; i++, clk_tblp++) {
427 		if (clk_tblp->con_id) {
428 			if (!con_id || strcmp(clk_tblp->con_id, con_id))
429 				continue;
430 			return clk_tblp->clk;
431 		}
432 	}
433 	return NULL;
434 }
435 
436 /* Enable a clock */
437 int clk_enable(struct clk *c)
438 {
439 	int ret = 0;
440 
441 	debug("%s: %s\n", __func__, c->name);
442 	if (!c->ops || !c->ops->enable)
443 		return -1;
444 
445 	/* enable parent clock first */
446 	if (c->parent)
447 		ret = clk_enable(c->parent);
448 
449 	if (ret)
450 		return ret;
451 
452 	if (!c->use_cnt)
453 		ret = c->ops->enable(c, 1);
454 	c->use_cnt++;
455 
456 	return ret;
457 }
458 
459 /* Disable a clock */
460 void clk_disable(struct clk *c)
461 {
462 	debug("%s: %s\n", __func__, c->name);
463 	if (!c->ops || !c->ops->enable)
464 		return;
465 
466 	if (c->use_cnt > 0) {
467 		c->use_cnt--;
468 		if (c->use_cnt == 0)
469 			c->ops->enable(c, 0);
470 	}
471 
472 	/* disable parent */
473 	if (c->parent)
474 		clk_disable(c->parent);
475 }
476 
477 /* Get the clock rate */
478 unsigned long clk_get_rate(struct clk *c)
479 {
480 	unsigned long rate;
481 
482 	debug("%s: %s\n", __func__, c->name);
483 	if (!c || !c->ops || !c->ops->get_rate)
484 		return 0;
485 
486 	rate = c->ops->get_rate(c);
487 	debug("%s: rate = %ld\n", __func__, rate);
488 	return rate;
489 }
490 
491 /* Set the clock rate */
492 int clk_set_rate(struct clk *c, unsigned long rate)
493 {
494 	int ret;
495 
496 	debug("%s: %s rate=%ld\n", __func__, c->name, rate);
497 	if (!c || !c->ops || !c->ops->set_rate)
498 		return -EINVAL;
499 
500 	if (c->use_cnt)
501 		return -EINVAL;
502 
503 	ret = c->ops->set_rate(c, rate);
504 
505 	return ret;
506 }
507 
508 /* Not required for this arch */
509 /*
510 long clk_round_rate(struct clk *clk, unsigned long rate);
511 int clk_set_parent(struct clk *clk, struct clk *parent);
512 struct clk *clk_get_parent(struct clk *clk);
513 */
514