xref: /openbmc/linux/arch/mips/alchemy/common/clock.c (revision dbd815c0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Alchemy clocks.
4  *
5  * Exposes all configurable internal clock sources to the clk framework.
6  *
7  * We have:
8  *  - Root source, usually 12MHz supplied by an external crystal
9  *  - 3 PLLs which generate multiples of root rate [AUX, CPU, AUX2]
10  *
11  * Dividers:
12  *  - 6 clock dividers with:
13  *   * selectable source [one of the PLLs],
14  *   * output divided between [2 .. 512 in steps of 2] (!Au1300)
15  *     or [1 .. 256 in steps of 1] (Au1300),
16  *   * can be enabled individually.
17  *
18  * - up to 6 "internal" (fixed) consumers which:
19  *   * take either AUXPLL or one of the above 6 dividers as input,
20  *   * divide this input by 1, 2, or 4 (and 3 on Au1300).
21  *   * can be disabled separately.
22  *
23  * Misc clocks:
24  * - sysbus clock: CPU core clock (CPUPLL) divided by 2, 3 or 4.
25  *    depends on board design and should be set by bootloader, read-only.
26  * - peripheral clock: half the rate of sysbus clock, source for a lot
27  *    of peripheral blocks, read-only.
28  * - memory clock: clk rate to main memory chips, depends on board
29  *    design and is read-only,
30  * - lrclk: the static bus clock signal for synchronous operation.
31  *    depends on board design, must be set by bootloader,
32  *    but may be required to correctly configure devices attached to
33  *    the static bus. The Au1000/1500/1100 manuals call it LCLK, on
34  *    later models it's called RCLK.
35  */
36 
37 #include <linux/init.h>
38 #include <linux/io.h>
39 #include <linux/clk.h>
40 #include <linux/clk-provider.h>
41 #include <linux/clkdev.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/types.h>
45 #include <asm/mach-au1x00/au1000.h>
46 
47 /* Base clock: 12MHz is the default in all databooks, and I haven't
48  * found any board yet which uses a different rate.
49  */
50 #define ALCHEMY_ROOTCLK_RATE	12000000
51 
52 /*
53  * the internal sources which can be driven by the PLLs and dividers.
54  * Names taken from the databooks, refer to them for more information,
55  * especially which ones are share a clock line.
56  */
57 static const char * const alchemy_au1300_intclknames[] = {
58 	"lcd_intclk", "gpemgp_clk", "maempe_clk", "maebsa_clk",
59 	"EXTCLK0", "EXTCLK1"
60 };
61 
62 static const char * const alchemy_au1200_intclknames[] = {
63 	"lcd_intclk", NULL, NULL, NULL, "EXTCLK0", "EXTCLK1"
64 };
65 
66 static const char * const alchemy_au1550_intclknames[] = {
67 	"usb_clk", "psc0_intclk", "psc1_intclk", "pci_clko",
68 	"EXTCLK0", "EXTCLK1"
69 };
70 
71 static const char * const alchemy_au1100_intclknames[] = {
72 	"usb_clk", "lcd_intclk", NULL, "i2s_clk", "EXTCLK0", "EXTCLK1"
73 };
74 
75 static const char * const alchemy_au1500_intclknames[] = {
76 	NULL, "usbd_clk", "usbh_clk", "pci_clko", "EXTCLK0", "EXTCLK1"
77 };
78 
79 static const char * const alchemy_au1000_intclknames[] = {
80 	"irda_clk", "usbd_clk", "usbh_clk", "i2s_clk", "EXTCLK0",
81 	"EXTCLK1"
82 };
83 
84 /* aliases for a few on-chip sources which are either shared
85  * or have gone through name changes.
86  */
87 static struct clk_aliastable {
88 	char *alias;
89 	char *base;
90 	int cputype;
91 } alchemy_clk_aliases[] __initdata = {
92 	{ "usbh_clk", "usb_clk",    ALCHEMY_CPU_AU1100 },
93 	{ "usbd_clk", "usb_clk",    ALCHEMY_CPU_AU1100 },
94 	{ "irda_clk", "usb_clk",    ALCHEMY_CPU_AU1100 },
95 	{ "usbh_clk", "usb_clk",    ALCHEMY_CPU_AU1550 },
96 	{ "usbd_clk", "usb_clk",    ALCHEMY_CPU_AU1550 },
97 	{ "psc2_intclk", "usb_clk", ALCHEMY_CPU_AU1550 },
98 	{ "psc3_intclk", "EXTCLK0", ALCHEMY_CPU_AU1550 },
99 	{ "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1200 },
100 	{ "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1200 },
101 	{ "psc0_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
102 	{ "psc2_intclk", "EXTCLK0", ALCHEMY_CPU_AU1300 },
103 	{ "psc1_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
104 	{ "psc3_intclk", "EXTCLK1", ALCHEMY_CPU_AU1300 },
105 
106 	{ NULL, NULL, 0 },
107 };
108 
109 #define IOMEM(x)	((void __iomem *)(KSEG1ADDR(CPHYSADDR(x))))
110 
111 /* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
112 static spinlock_t alchemy_clk_fg0_lock;
113 static spinlock_t alchemy_clk_fg1_lock;
114 static DEFINE_SPINLOCK(alchemy_clk_csrc_lock);
115 
116 /* CPU Core clock *****************************************************/
117 
alchemy_clk_cpu_recalc(struct clk_hw * hw,unsigned long parent_rate)118 static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
119 					    unsigned long parent_rate)
120 {
121 	unsigned long t;
122 
123 	/*
124 	 * On early Au1000, sys_cpupll was write-only. Since these
125 	 * silicon versions of Au1000 are not sold, we don't bend
126 	 * over backwards trying to determine the frequency.
127 	 */
128 	if (unlikely(au1xxx_cpu_has_pll_wo()))
129 		t = 396000000;
130 	else {
131 		t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
132 		if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
133 			t &= 0x3f;
134 		t *= parent_rate;
135 	}
136 
137 	return t;
138 }
139 
alchemy_set_lpj(void)140 void __init alchemy_set_lpj(void)
141 {
142 	preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
143 	preset_lpj /= 2 * HZ;
144 }
145 
146 static const struct clk_ops alchemy_clkops_cpu = {
147 	.recalc_rate	= alchemy_clk_cpu_recalc,
148 };
149 
alchemy_clk_setup_cpu(const char * parent_name,int ctype)150 static struct clk __init *alchemy_clk_setup_cpu(const char *parent_name,
151 						int ctype)
152 {
153 	struct clk_init_data id;
154 	struct clk_hw *h;
155 	struct clk *clk;
156 
157 	h = kzalloc(sizeof(*h), GFP_KERNEL);
158 	if (!h)
159 		return ERR_PTR(-ENOMEM);
160 
161 	id.name = ALCHEMY_CPU_CLK;
162 	id.parent_names = &parent_name;
163 	id.num_parents = 1;
164 	id.flags = 0;
165 	id.ops = &alchemy_clkops_cpu;
166 	h->init = &id;
167 
168 	clk = clk_register(NULL, h);
169 	if (IS_ERR(clk)) {
170 		pr_err("failed to register clock\n");
171 		kfree(h);
172 	}
173 
174 	return clk;
175 }
176 
177 /* AUXPLLs ************************************************************/
178 
179 struct alchemy_auxpll_clk {
180 	struct clk_hw hw;
181 	unsigned long reg;	/* au1300 has also AUXPLL2 */
182 	int maxmult;		/* max multiplier */
183 };
184 #define to_auxpll_clk(x) container_of(x, struct alchemy_auxpll_clk, hw)
185 
alchemy_clk_aux_recalc(struct clk_hw * hw,unsigned long parent_rate)186 static unsigned long alchemy_clk_aux_recalc(struct clk_hw *hw,
187 					    unsigned long parent_rate)
188 {
189 	struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
190 
191 	return (alchemy_rdsys(a->reg) & 0xff) * parent_rate;
192 }
193 
alchemy_clk_aux_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)194 static int alchemy_clk_aux_setr(struct clk_hw *hw,
195 				unsigned long rate,
196 				unsigned long parent_rate)
197 {
198 	struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
199 	unsigned long d = rate;
200 
201 	if (rate)
202 		d /= parent_rate;
203 	else
204 		d = 0;
205 
206 	/* minimum is 84MHz, max is 756-1032 depending on variant */
207 	if (((d < 7) && (d != 0)) || (d > a->maxmult))
208 		return -EINVAL;
209 
210 	alchemy_wrsys(d, a->reg);
211 	return 0;
212 }
213 
alchemy_clk_aux_roundr(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)214 static long alchemy_clk_aux_roundr(struct clk_hw *hw,
215 					    unsigned long rate,
216 					    unsigned long *parent_rate)
217 {
218 	struct alchemy_auxpll_clk *a = to_auxpll_clk(hw);
219 	unsigned long mult;
220 
221 	if (!rate || !*parent_rate)
222 		return 0;
223 
224 	mult = rate / (*parent_rate);
225 
226 	if (mult && (mult < 7))
227 		mult = 7;
228 	if (mult > a->maxmult)
229 		mult = a->maxmult;
230 
231 	return (*parent_rate) * mult;
232 }
233 
234 static const struct clk_ops alchemy_clkops_aux = {
235 	.recalc_rate	= alchemy_clk_aux_recalc,
236 	.set_rate	= alchemy_clk_aux_setr,
237 	.round_rate	= alchemy_clk_aux_roundr,
238 };
239 
alchemy_clk_setup_aux(const char * parent_name,char * name,int maxmult,unsigned long reg)240 static struct clk __init *alchemy_clk_setup_aux(const char *parent_name,
241 						char *name, int maxmult,
242 						unsigned long reg)
243 {
244 	struct clk_init_data id;
245 	struct clk *c;
246 	struct alchemy_auxpll_clk *a;
247 
248 	a = kzalloc(sizeof(*a), GFP_KERNEL);
249 	if (!a)
250 		return ERR_PTR(-ENOMEM);
251 
252 	id.name = name;
253 	id.parent_names = &parent_name;
254 	id.num_parents = 1;
255 	id.flags = CLK_GET_RATE_NOCACHE;
256 	id.ops = &alchemy_clkops_aux;
257 
258 	a->reg = reg;
259 	a->maxmult = maxmult;
260 	a->hw.init = &id;
261 
262 	c = clk_register(NULL, &a->hw);
263 	if (!IS_ERR(c))
264 		clk_register_clkdev(c, name, NULL);
265 	else
266 		kfree(a);
267 
268 	return c;
269 }
270 
271 /* sysbus_clk *********************************************************/
272 
alchemy_clk_setup_sysbus(const char * pn)273 static struct clk __init  *alchemy_clk_setup_sysbus(const char *pn)
274 {
275 	unsigned long v = (alchemy_rdsys(AU1000_SYS_POWERCTRL) & 3) + 2;
276 	struct clk *c;
277 
278 	c = clk_register_fixed_factor(NULL, ALCHEMY_SYSBUS_CLK,
279 				      pn, 0, 1, v);
280 	if (!IS_ERR(c))
281 		clk_register_clkdev(c, ALCHEMY_SYSBUS_CLK, NULL);
282 	return c;
283 }
284 
285 /* Peripheral Clock ***************************************************/
286 
alchemy_clk_setup_periph(const char * pn)287 static struct clk __init *alchemy_clk_setup_periph(const char *pn)
288 {
289 	/* Peripheral clock runs at half the rate of sysbus clk */
290 	struct clk *c;
291 
292 	c = clk_register_fixed_factor(NULL, ALCHEMY_PERIPH_CLK,
293 				      pn, 0, 1, 2);
294 	if (!IS_ERR(c))
295 		clk_register_clkdev(c, ALCHEMY_PERIPH_CLK, NULL);
296 	return c;
297 }
298 
299 /* mem clock **********************************************************/
300 
alchemy_clk_setup_mem(const char * pn,int ct)301 static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
302 {
303 	void __iomem *addr = IOMEM(AU1000_MEM_PHYS_ADDR);
304 	unsigned long v;
305 	struct clk *c;
306 	int div;
307 
308 	switch (ct) {
309 	case ALCHEMY_CPU_AU1550:
310 	case ALCHEMY_CPU_AU1200:
311 		v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
312 		div = (v & (1 << 15)) ? 1 : 2;
313 		break;
314 	case ALCHEMY_CPU_AU1300:
315 		v = __raw_readl(addr + AU1550_MEM_SDCONFIGB);
316 		div = (v & (1 << 31)) ? 1 : 2;
317 		break;
318 	case ALCHEMY_CPU_AU1000:
319 	case ALCHEMY_CPU_AU1500:
320 	case ALCHEMY_CPU_AU1100:
321 	default:
322 		div = 2;
323 		break;
324 	}
325 
326 	c = clk_register_fixed_factor(NULL, ALCHEMY_MEM_CLK, pn,
327 				      0, 1, div);
328 	if (!IS_ERR(c))
329 		clk_register_clkdev(c, ALCHEMY_MEM_CLK, NULL);
330 	return c;
331 }
332 
333 /* lrclk: external synchronous static bus clock ***********************/
334 
alchemy_clk_setup_lrclk(const char * pn,int t)335 static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
336 {
337 	/* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
338 	 * otherwise lrclk=pclk/4.
339 	 * All other variants: MEM_STCFG0[15:13] = divisor.
340 	 * L/RCLK = periph_clk / (divisor + 1)
341 	 * On Au1000, Au1500, Au1100 it's called LCLK,
342 	 * on later models it's called RCLK, but it's the same thing.
343 	 */
344 	struct clk *c;
345 	unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
346 
347 	switch (t) {
348 	case ALCHEMY_CPU_AU1000:
349 	case ALCHEMY_CPU_AU1500:
350 		v = 4 + ((v >> 11) & 1);
351 		break;
352 	default:	/* all other models */
353 		v = ((v >> 13) & 7) + 1;
354 	}
355 	c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
356 				      pn, 0, 1, v);
357 	if (!IS_ERR(c))
358 		clk_register_clkdev(c, ALCHEMY_LR_CLK, NULL);
359 	return c;
360 }
361 
362 /* Clock dividers and muxes *******************************************/
363 
364 /* data for fgen and csrc mux-dividers */
365 struct alchemy_fgcs_clk {
366 	struct clk_hw hw;
367 	spinlock_t *reglock;	/* register lock		  */
368 	unsigned long reg;	/* SYS_FREQCTRL0/1		  */
369 	int shift;		/* offset in register		  */
370 	int parent;		/* parent before disable [Au1300] */
371 	int isen;		/* is it enabled?		  */
372 	int *dt;		/* dividertable for csrc	  */
373 };
374 #define to_fgcs_clk(x) container_of(x, struct alchemy_fgcs_clk, hw)
375 
alchemy_calc_div(unsigned long rate,unsigned long prate,int scale,int maxdiv,unsigned long * rv)376 static long alchemy_calc_div(unsigned long rate, unsigned long prate,
377 			       int scale, int maxdiv, unsigned long *rv)
378 {
379 	long div1, div2;
380 
381 	div1 = prate / rate;
382 	if ((prate / div1) > rate)
383 		div1++;
384 
385 	if (scale == 2) {	/* only div-by-multiple-of-2 possible */
386 		if (div1 & 1)
387 			div1++;	/* stay <=prate */
388 	}
389 
390 	div2 = (div1 / scale) - 1;	/* value to write to register */
391 
392 	if (div2 > maxdiv)
393 		div2 = maxdiv;
394 	if (rv)
395 		*rv = div2;
396 
397 	div1 = ((div2 + 1) * scale);
398 	return div1;
399 }
400 
alchemy_clk_fgcs_detr(struct clk_hw * hw,struct clk_rate_request * req,int scale,int maxdiv)401 static int alchemy_clk_fgcs_detr(struct clk_hw *hw,
402 				 struct clk_rate_request *req,
403 				 int scale, int maxdiv)
404 {
405 	struct clk_hw *pc, *bpc, *free;
406 	long tdv, tpr, pr, nr, br, bpr, diff, lastdiff;
407 	int j;
408 
409 	lastdiff = INT_MAX;
410 	bpr = 0;
411 	bpc = NULL;
412 	br = -EINVAL;
413 	free = NULL;
414 
415 	/* look at the rates each enabled parent supplies and select
416 	 * the one that gets closest to but not over the requested rate.
417 	 */
418 	for (j = 0; j < 7; j++) {
419 		pc = clk_hw_get_parent_by_index(hw, j);
420 		if (!pc)
421 			break;
422 
423 		/* if this parent is currently unused, remember it.
424 		 * XXX: we would actually want clk_has_active_children()
425 		 * but this is a good-enough approximation for now.
426 		 */
427 		if (!clk_hw_is_prepared(pc)) {
428 			if (!free)
429 				free = pc;
430 		}
431 
432 		pr = clk_hw_get_rate(pc);
433 		if (pr < req->rate)
434 			continue;
435 
436 		/* what can hardware actually provide */
437 		tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv, NULL);
438 		nr = pr / tdv;
439 		diff = req->rate - nr;
440 		if (nr > req->rate)
441 			continue;
442 
443 		if (diff < lastdiff) {
444 			lastdiff = diff;
445 			bpr = pr;
446 			bpc = pc;
447 			br = nr;
448 		}
449 		if (diff == 0)
450 			break;
451 	}
452 
453 	/* if we couldn't get the exact rate we wanted from the enabled
454 	 * parents, maybe we can tell an available disabled/inactive one
455 	 * to give us a rate we can divide down to the requested rate.
456 	 */
457 	if (lastdiff && free) {
458 		for (j = (maxdiv == 4) ? 1 : scale; j <= maxdiv; j += scale) {
459 			tpr = req->rate * j;
460 			if (tpr < 0)
461 				break;
462 			pr = clk_hw_round_rate(free, tpr);
463 
464 			tdv = alchemy_calc_div(req->rate, pr, scale, maxdiv,
465 					       NULL);
466 			nr = pr / tdv;
467 			diff = req->rate - nr;
468 			if (nr > req->rate)
469 				continue;
470 			if (diff < lastdiff) {
471 				lastdiff = diff;
472 				bpr = pr;
473 				bpc = free;
474 				br = nr;
475 			}
476 			if (diff == 0)
477 				break;
478 		}
479 	}
480 
481 	if (br < 0)
482 		return br;
483 
484 	req->best_parent_rate = bpr;
485 	req->best_parent_hw = bpc;
486 	req->rate = br;
487 
488 	return 0;
489 }
490 
alchemy_clk_fgv1_en(struct clk_hw * hw)491 static int alchemy_clk_fgv1_en(struct clk_hw *hw)
492 {
493 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
494 	unsigned long v, flags;
495 
496 	spin_lock_irqsave(c->reglock, flags);
497 	v = alchemy_rdsys(c->reg);
498 	v |= (1 << 1) << c->shift;
499 	alchemy_wrsys(v, c->reg);
500 	spin_unlock_irqrestore(c->reglock, flags);
501 
502 	return 0;
503 }
504 
alchemy_clk_fgv1_isen(struct clk_hw * hw)505 static int alchemy_clk_fgv1_isen(struct clk_hw *hw)
506 {
507 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
508 	unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 1);
509 
510 	return v & 1;
511 }
512 
alchemy_clk_fgv1_dis(struct clk_hw * hw)513 static void alchemy_clk_fgv1_dis(struct clk_hw *hw)
514 {
515 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
516 	unsigned long v, flags;
517 
518 	spin_lock_irqsave(c->reglock, flags);
519 	v = alchemy_rdsys(c->reg);
520 	v &= ~((1 << 1) << c->shift);
521 	alchemy_wrsys(v, c->reg);
522 	spin_unlock_irqrestore(c->reglock, flags);
523 }
524 
alchemy_clk_fgv1_setp(struct clk_hw * hw,u8 index)525 static int alchemy_clk_fgv1_setp(struct clk_hw *hw, u8 index)
526 {
527 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
528 	unsigned long v, flags;
529 
530 	spin_lock_irqsave(c->reglock, flags);
531 	v = alchemy_rdsys(c->reg);
532 	if (index)
533 		v |= (1 << c->shift);
534 	else
535 		v &= ~(1 << c->shift);
536 	alchemy_wrsys(v, c->reg);
537 	spin_unlock_irqrestore(c->reglock, flags);
538 
539 	return 0;
540 }
541 
alchemy_clk_fgv1_getp(struct clk_hw * hw)542 static u8 alchemy_clk_fgv1_getp(struct clk_hw *hw)
543 {
544 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
545 
546 	return (alchemy_rdsys(c->reg) >> c->shift) & 1;
547 }
548 
alchemy_clk_fgv1_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)549 static int alchemy_clk_fgv1_setr(struct clk_hw *hw, unsigned long rate,
550 				 unsigned long parent_rate)
551 {
552 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
553 	unsigned long div, v, flags, ret;
554 	int sh = c->shift + 2;
555 
556 	if (!rate || !parent_rate || rate > (parent_rate / 2))
557 		return -EINVAL;
558 	ret = alchemy_calc_div(rate, parent_rate, 2, 512, &div);
559 	spin_lock_irqsave(c->reglock, flags);
560 	v = alchemy_rdsys(c->reg);
561 	v &= ~(0xff << sh);
562 	v |= div << sh;
563 	alchemy_wrsys(v, c->reg);
564 	spin_unlock_irqrestore(c->reglock, flags);
565 
566 	return 0;
567 }
568 
alchemy_clk_fgv1_recalc(struct clk_hw * hw,unsigned long parent_rate)569 static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
570 					     unsigned long parent_rate)
571 {
572 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
573 	unsigned long v = alchemy_rdsys(c->reg) >> (c->shift + 2);
574 
575 	v = ((v & 0xff) + 1) * 2;
576 	return parent_rate / v;
577 }
578 
alchemy_clk_fgv1_detr(struct clk_hw * hw,struct clk_rate_request * req)579 static int alchemy_clk_fgv1_detr(struct clk_hw *hw,
580 				 struct clk_rate_request *req)
581 {
582 	return alchemy_clk_fgcs_detr(hw, req, 2, 512);
583 }
584 
585 /* Au1000, Au1100, Au15x0, Au12x0 */
586 static const struct clk_ops alchemy_clkops_fgenv1 = {
587 	.recalc_rate	= alchemy_clk_fgv1_recalc,
588 	.determine_rate	= alchemy_clk_fgv1_detr,
589 	.set_rate	= alchemy_clk_fgv1_setr,
590 	.set_parent	= alchemy_clk_fgv1_setp,
591 	.get_parent	= alchemy_clk_fgv1_getp,
592 	.enable		= alchemy_clk_fgv1_en,
593 	.disable	= alchemy_clk_fgv1_dis,
594 	.is_enabled	= alchemy_clk_fgv1_isen,
595 };
596 
__alchemy_clk_fgv2_en(struct alchemy_fgcs_clk * c)597 static void __alchemy_clk_fgv2_en(struct alchemy_fgcs_clk *c)
598 {
599 	unsigned long v = alchemy_rdsys(c->reg);
600 
601 	v &= ~(3 << c->shift);
602 	v |= (c->parent & 3) << c->shift;
603 	alchemy_wrsys(v, c->reg);
604 	c->isen = 1;
605 }
606 
alchemy_clk_fgv2_en(struct clk_hw * hw)607 static int alchemy_clk_fgv2_en(struct clk_hw *hw)
608 {
609 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
610 	unsigned long flags;
611 
612 	/* enable by setting the previous parent clock */
613 	spin_lock_irqsave(c->reglock, flags);
614 	__alchemy_clk_fgv2_en(c);
615 	spin_unlock_irqrestore(c->reglock, flags);
616 
617 	return 0;
618 }
619 
alchemy_clk_fgv2_isen(struct clk_hw * hw)620 static int alchemy_clk_fgv2_isen(struct clk_hw *hw)
621 {
622 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
623 
624 	return ((alchemy_rdsys(c->reg) >> c->shift) & 3) != 0;
625 }
626 
alchemy_clk_fgv2_dis(struct clk_hw * hw)627 static void alchemy_clk_fgv2_dis(struct clk_hw *hw)
628 {
629 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
630 	unsigned long v, flags;
631 
632 	spin_lock_irqsave(c->reglock, flags);
633 	v = alchemy_rdsys(c->reg);
634 	v &= ~(3 << c->shift);	/* set input mux to "disabled" state */
635 	alchemy_wrsys(v, c->reg);
636 	c->isen = 0;
637 	spin_unlock_irqrestore(c->reglock, flags);
638 }
639 
alchemy_clk_fgv2_setp(struct clk_hw * hw,u8 index)640 static int alchemy_clk_fgv2_setp(struct clk_hw *hw, u8 index)
641 {
642 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
643 	unsigned long flags;
644 
645 	spin_lock_irqsave(c->reglock, flags);
646 	c->parent = index + 1;	/* value to write to register */
647 	if (c->isen)
648 		__alchemy_clk_fgv2_en(c);
649 	spin_unlock_irqrestore(c->reglock, flags);
650 
651 	return 0;
652 }
653 
alchemy_clk_fgv2_getp(struct clk_hw * hw)654 static u8 alchemy_clk_fgv2_getp(struct clk_hw *hw)
655 {
656 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
657 	unsigned long flags, v;
658 
659 	spin_lock_irqsave(c->reglock, flags);
660 	v = c->parent - 1;
661 	spin_unlock_irqrestore(c->reglock, flags);
662 	return v;
663 }
664 
665 /* fg0-2 and fg4-6 share a "scale"-bit. With this bit cleared, the
666  * dividers behave exactly as on previous models (dividers are multiples
667  * of 2); with the bit set, dividers are multiples of 1, halving their
668  * range, but making them also much more flexible.
669  */
alchemy_clk_fgv2_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)670 static int alchemy_clk_fgv2_setr(struct clk_hw *hw, unsigned long rate,
671 				 unsigned long parent_rate)
672 {
673 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
674 	int sh = c->shift + 2;
675 	unsigned long div, v, flags, ret;
676 
677 	if (!rate || !parent_rate || rate > parent_rate)
678 		return -EINVAL;
679 
680 	v = alchemy_rdsys(c->reg) & (1 << 30); /* test "scale" bit */
681 	ret = alchemy_calc_div(rate, parent_rate, v ? 1 : 2,
682 			       v ? 256 : 512, &div);
683 
684 	spin_lock_irqsave(c->reglock, flags);
685 	v = alchemy_rdsys(c->reg);
686 	v &= ~(0xff << sh);
687 	v |= (div & 0xff) << sh;
688 	alchemy_wrsys(v, c->reg);
689 	spin_unlock_irqrestore(c->reglock, flags);
690 
691 	return 0;
692 }
693 
alchemy_clk_fgv2_recalc(struct clk_hw * hw,unsigned long parent_rate)694 static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
695 					     unsigned long parent_rate)
696 {
697 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
698 	int sh = c->shift + 2;
699 	unsigned long v, t;
700 
701 	v = alchemy_rdsys(c->reg);
702 	t = parent_rate / (((v >> sh) & 0xff) + 1);
703 	if ((v & (1 << 30)) == 0)		/* test scale bit */
704 		t /= 2;
705 
706 	return t;
707 }
708 
alchemy_clk_fgv2_detr(struct clk_hw * hw,struct clk_rate_request * req)709 static int alchemy_clk_fgv2_detr(struct clk_hw *hw,
710 				 struct clk_rate_request *req)
711 {
712 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
713 	int scale, maxdiv;
714 
715 	if (alchemy_rdsys(c->reg) & (1 << 30)) {
716 		scale = 1;
717 		maxdiv = 256;
718 	} else {
719 		scale = 2;
720 		maxdiv = 512;
721 	}
722 
723 	return alchemy_clk_fgcs_detr(hw, req, scale, maxdiv);
724 }
725 
726 /* Au1300 larger input mux, no separate disable bit, flexible divider */
727 static const struct clk_ops alchemy_clkops_fgenv2 = {
728 	.recalc_rate	= alchemy_clk_fgv2_recalc,
729 	.determine_rate	= alchemy_clk_fgv2_detr,
730 	.set_rate	= alchemy_clk_fgv2_setr,
731 	.set_parent	= alchemy_clk_fgv2_setp,
732 	.get_parent	= alchemy_clk_fgv2_getp,
733 	.enable		= alchemy_clk_fgv2_en,
734 	.disable	= alchemy_clk_fgv2_dis,
735 	.is_enabled	= alchemy_clk_fgv2_isen,
736 };
737 
738 static const char * const alchemy_clk_fgv1_parents[] = {
739 	ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
740 };
741 
742 static const char * const alchemy_clk_fgv2_parents[] = {
743 	ALCHEMY_AUXPLL2_CLK, ALCHEMY_CPU_CLK, ALCHEMY_AUXPLL_CLK
744 };
745 
746 static const char * const alchemy_clk_fgen_names[] = {
747 	ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
748 	ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK };
749 
alchemy_clk_init_fgens(int ctype)750 static int __init alchemy_clk_init_fgens(int ctype)
751 {
752 	struct clk *c;
753 	struct clk_init_data id;
754 	struct alchemy_fgcs_clk *a;
755 	unsigned long v;
756 	int i, ret;
757 
758 	switch (ctype) {
759 	case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200:
760 		id.ops = &alchemy_clkops_fgenv1;
761 		id.parent_names = alchemy_clk_fgv1_parents;
762 		id.num_parents = 2;
763 		break;
764 	case ALCHEMY_CPU_AU1300:
765 		id.ops = &alchemy_clkops_fgenv2;
766 		id.parent_names = alchemy_clk_fgv2_parents;
767 		id.num_parents = 3;
768 		break;
769 	default:
770 		return -ENODEV;
771 	}
772 	id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
773 
774 	a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL);
775 	if (!a)
776 		return -ENOMEM;
777 
778 	spin_lock_init(&alchemy_clk_fg0_lock);
779 	spin_lock_init(&alchemy_clk_fg1_lock);
780 	ret = 0;
781 	for (i = 0; i < 6; i++) {
782 		id.name = alchemy_clk_fgen_names[i];
783 		a->shift = 10 * (i < 3 ? i : i - 3);
784 		if (i > 2) {
785 			a->reg = AU1000_SYS_FREQCTRL1;
786 			a->reglock = &alchemy_clk_fg1_lock;
787 		} else {
788 			a->reg = AU1000_SYS_FREQCTRL0;
789 			a->reglock = &alchemy_clk_fg0_lock;
790 		}
791 
792 		/* default to first parent if bootloader has set
793 		 * the mux to disabled state.
794 		 */
795 		if (ctype == ALCHEMY_CPU_AU1300) {
796 			v = alchemy_rdsys(a->reg);
797 			a->parent = (v >> a->shift) & 3;
798 			if (!a->parent) {
799 				a->parent = 1;
800 				a->isen = 0;
801 			} else
802 				a->isen = 1;
803 		}
804 
805 		a->hw.init = &id;
806 		c = clk_register(NULL, &a->hw);
807 		if (IS_ERR(c))
808 			ret++;
809 		else
810 			clk_register_clkdev(c, id.name, NULL);
811 		a++;
812 	}
813 
814 	return ret;
815 }
816 
817 /* internal sources muxes *********************************************/
818 
alchemy_clk_csrc_isen(struct clk_hw * hw)819 static int alchemy_clk_csrc_isen(struct clk_hw *hw)
820 {
821 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
822 	unsigned long v = alchemy_rdsys(c->reg);
823 
824 	return (((v >> c->shift) >> 2) & 7) != 0;
825 }
826 
__alchemy_clk_csrc_en(struct alchemy_fgcs_clk * c)827 static void __alchemy_clk_csrc_en(struct alchemy_fgcs_clk *c)
828 {
829 	unsigned long v = alchemy_rdsys(c->reg);
830 
831 	v &= ~((7 << 2) << c->shift);
832 	v |= ((c->parent & 7) << 2) << c->shift;
833 	alchemy_wrsys(v, c->reg);
834 	c->isen = 1;
835 }
836 
alchemy_clk_csrc_en(struct clk_hw * hw)837 static int alchemy_clk_csrc_en(struct clk_hw *hw)
838 {
839 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
840 	unsigned long flags;
841 
842 	/* enable by setting the previous parent clock */
843 	spin_lock_irqsave(c->reglock, flags);
844 	__alchemy_clk_csrc_en(c);
845 	spin_unlock_irqrestore(c->reglock, flags);
846 
847 	return 0;
848 }
849 
alchemy_clk_csrc_dis(struct clk_hw * hw)850 static void alchemy_clk_csrc_dis(struct clk_hw *hw)
851 {
852 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
853 	unsigned long v, flags;
854 
855 	spin_lock_irqsave(c->reglock, flags);
856 	v = alchemy_rdsys(c->reg);
857 	v &= ~((3 << 2) << c->shift);	/* mux to "disabled" state */
858 	alchemy_wrsys(v, c->reg);
859 	c->isen = 0;
860 	spin_unlock_irqrestore(c->reglock, flags);
861 }
862 
alchemy_clk_csrc_setp(struct clk_hw * hw,u8 index)863 static int alchemy_clk_csrc_setp(struct clk_hw *hw, u8 index)
864 {
865 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
866 	unsigned long flags;
867 
868 	spin_lock_irqsave(c->reglock, flags);
869 	c->parent = index + 1;	/* value to write to register */
870 	if (c->isen)
871 		__alchemy_clk_csrc_en(c);
872 	spin_unlock_irqrestore(c->reglock, flags);
873 
874 	return 0;
875 }
876 
alchemy_clk_csrc_getp(struct clk_hw * hw)877 static u8 alchemy_clk_csrc_getp(struct clk_hw *hw)
878 {
879 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
880 
881 	return c->parent - 1;
882 }
883 
alchemy_clk_csrc_recalc(struct clk_hw * hw,unsigned long parent_rate)884 static unsigned long alchemy_clk_csrc_recalc(struct clk_hw *hw,
885 					     unsigned long parent_rate)
886 {
887 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
888 	unsigned long v = (alchemy_rdsys(c->reg) >> c->shift) & 3;
889 
890 	return parent_rate / c->dt[v];
891 }
892 
alchemy_clk_csrc_setr(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)893 static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
894 				 unsigned long parent_rate)
895 {
896 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
897 	unsigned long d, v, flags;
898 	int i;
899 
900 	if (!rate || !parent_rate || rate > parent_rate)
901 		return -EINVAL;
902 
903 	d = (parent_rate + (rate / 2)) / rate;
904 	if (d > 4)
905 		return -EINVAL;
906 	if ((d == 3) && (c->dt[2] != 3))
907 		d = 4;
908 
909 	for (i = 0; i < 4; i++)
910 		if (c->dt[i] == d)
911 			break;
912 
913 	if (i >= 4)
914 		return -EINVAL;	/* oops */
915 
916 	spin_lock_irqsave(c->reglock, flags);
917 	v = alchemy_rdsys(c->reg);
918 	v &= ~(3 << c->shift);
919 	v |= (i & 3) << c->shift;
920 	alchemy_wrsys(v, c->reg);
921 	spin_unlock_irqrestore(c->reglock, flags);
922 
923 	return 0;
924 }
925 
alchemy_clk_csrc_detr(struct clk_hw * hw,struct clk_rate_request * req)926 static int alchemy_clk_csrc_detr(struct clk_hw *hw,
927 				 struct clk_rate_request *req)
928 {
929 	struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
930 	int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
931 
932 	return alchemy_clk_fgcs_detr(hw, req, scale, 4);
933 }
934 
935 static const struct clk_ops alchemy_clkops_csrc = {
936 	.recalc_rate	= alchemy_clk_csrc_recalc,
937 	.determine_rate	= alchemy_clk_csrc_detr,
938 	.set_rate	= alchemy_clk_csrc_setr,
939 	.set_parent	= alchemy_clk_csrc_setp,
940 	.get_parent	= alchemy_clk_csrc_getp,
941 	.enable		= alchemy_clk_csrc_en,
942 	.disable	= alchemy_clk_csrc_dis,
943 	.is_enabled	= alchemy_clk_csrc_isen,
944 };
945 
946 static const char * const alchemy_clk_csrc_parents[] = {
947 	/* disabled at index 0 */ ALCHEMY_AUXPLL_CLK,
948 	ALCHEMY_FG0_CLK, ALCHEMY_FG1_CLK, ALCHEMY_FG2_CLK,
949 	ALCHEMY_FG3_CLK, ALCHEMY_FG4_CLK, ALCHEMY_FG5_CLK
950 };
951 
952 /* divider tables */
953 static int alchemy_csrc_dt1[] = { 1, 4, 1, 2 };	/* rest */
954 static int alchemy_csrc_dt2[] = { 1, 4, 3, 2 };	/* Au1300 */
955 
alchemy_clk_setup_imux(int ctype)956 static int __init alchemy_clk_setup_imux(int ctype)
957 {
958 	struct alchemy_fgcs_clk *a;
959 	const char * const *names;
960 	struct clk_init_data id;
961 	unsigned long v;
962 	int i, ret, *dt;
963 	struct clk *c;
964 
965 	id.ops = &alchemy_clkops_csrc;
966 	id.parent_names = alchemy_clk_csrc_parents;
967 	id.num_parents = 7;
968 	id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE;
969 
970 	dt = alchemy_csrc_dt1;
971 	switch (ctype) {
972 	case ALCHEMY_CPU_AU1000:
973 		names = alchemy_au1000_intclknames;
974 		break;
975 	case ALCHEMY_CPU_AU1500:
976 		names = alchemy_au1500_intclknames;
977 		break;
978 	case ALCHEMY_CPU_AU1100:
979 		names = alchemy_au1100_intclknames;
980 		break;
981 	case ALCHEMY_CPU_AU1550:
982 		names = alchemy_au1550_intclknames;
983 		break;
984 	case ALCHEMY_CPU_AU1200:
985 		names = alchemy_au1200_intclknames;
986 		break;
987 	case ALCHEMY_CPU_AU1300:
988 		dt = alchemy_csrc_dt2;
989 		names = alchemy_au1300_intclknames;
990 		break;
991 	default:
992 		return -ENODEV;
993 	}
994 
995 	a = kcalloc(6, sizeof(*a), GFP_KERNEL);
996 	if (!a)
997 		return -ENOMEM;
998 
999 	ret = 0;
1000 
1001 	for (i = 0; i < 6; i++) {
1002 		id.name = names[i];
1003 		if (!id.name)
1004 			goto next;
1005 
1006 		a->shift = i * 5;
1007 		a->reg = AU1000_SYS_CLKSRC;
1008 		a->reglock = &alchemy_clk_csrc_lock;
1009 		a->dt = dt;
1010 
1011 		/* default to first parent clock if mux is initially
1012 		 * set to disabled state.
1013 		 */
1014 		v = alchemy_rdsys(a->reg);
1015 		a->parent = ((v >> a->shift) >> 2) & 7;
1016 		if (!a->parent) {
1017 			a->parent = 1;
1018 			a->isen = 0;
1019 		} else
1020 			a->isen = 1;
1021 
1022 		a->hw.init = &id;
1023 		c = clk_register(NULL, &a->hw);
1024 		if (IS_ERR(c))
1025 			ret++;
1026 		else
1027 			clk_register_clkdev(c, id.name, NULL);
1028 next:
1029 		a++;
1030 	}
1031 
1032 	return ret;
1033 }
1034 
1035 
1036 /**********************************************************************/
1037 
1038 
1039 #define ERRCK(x)						\
1040 	if (IS_ERR(x)) {					\
1041 		ret = PTR_ERR(x);				\
1042 		goto out;					\
1043 	}
1044 
alchemy_clk_init(void)1045 static int __init alchemy_clk_init(void)
1046 {
1047 	int ctype = alchemy_get_cputype(), ret, i;
1048 	struct clk_aliastable *t = alchemy_clk_aliases;
1049 	struct clk *c;
1050 
1051 	/* Root of the Alchemy clock tree: external 12MHz crystal osc */
1052 	c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
1053 					   0, ALCHEMY_ROOTCLK_RATE);
1054 	ERRCK(c)
1055 
1056 	/* CPU core clock */
1057 	c = alchemy_clk_setup_cpu(ALCHEMY_ROOT_CLK, ctype);
1058 	ERRCK(c)
1059 
1060 	/* AUXPLLs: max 1GHz on Au1300, 748MHz on older models */
1061 	i = (ctype == ALCHEMY_CPU_AU1300) ? 84 : 63;
1062 	c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK, ALCHEMY_AUXPLL_CLK,
1063 				  i, AU1000_SYS_AUXPLL);
1064 	ERRCK(c)
1065 
1066 	if (ctype == ALCHEMY_CPU_AU1300) {
1067 		c = alchemy_clk_setup_aux(ALCHEMY_ROOT_CLK,
1068 					  ALCHEMY_AUXPLL2_CLK, i,
1069 					  AU1300_SYS_AUXPLL2);
1070 		ERRCK(c)
1071 	}
1072 
1073 	/* sysbus clock: cpu core clock divided by 2, 3 or 4 */
1074 	c = alchemy_clk_setup_sysbus(ALCHEMY_CPU_CLK);
1075 	ERRCK(c)
1076 
1077 	/* peripheral clock: runs at half rate of sysbus clk */
1078 	c = alchemy_clk_setup_periph(ALCHEMY_SYSBUS_CLK);
1079 	ERRCK(c)
1080 
1081 	/* SDR/DDR memory clock */
1082 	c = alchemy_clk_setup_mem(ALCHEMY_SYSBUS_CLK, ctype);
1083 	ERRCK(c)
1084 
1085 	/* L/RCLK: external static bus clock for synchronous mode */
1086 	c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
1087 	ERRCK(c)
1088 
1089 	/* Frequency dividers 0-5 */
1090 	ret = alchemy_clk_init_fgens(ctype);
1091 	if (ret) {
1092 		ret = -ENODEV;
1093 		goto out;
1094 	}
1095 
1096 	/* diving muxes for internal sources */
1097 	ret = alchemy_clk_setup_imux(ctype);
1098 	if (ret) {
1099 		ret = -ENODEV;
1100 		goto out;
1101 	}
1102 
1103 	/* set up aliases drivers might look for */
1104 	while (t->base) {
1105 		if (t->cputype == ctype)
1106 			clk_add_alias(t->alias, NULL, t->base, NULL);
1107 		t++;
1108 	}
1109 
1110 	pr_info("Alchemy clocktree installed\n");
1111 	return 0;
1112 
1113 out:
1114 	return ret;
1115 }
1116 postcore_initcall(alchemy_clk_init);
1117