xref: /openbmc/linux/drivers/clk/bcm/clk-kona-setup.c (revision 52e6676e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Broadcom Corporation
4  * Copyright 2013 Linaro Limited
5  */
6 
7 #include <linux/io.h>
8 #include <linux/of_address.h>
9 
10 #include "clk-kona.h"
11 
12 /* These are used when a selector or trigger is found to be unneeded */
13 #define selector_clear_exists(sel)	((sel)->width = 0)
14 #define trigger_clear_exists(trig)	FLAG_CLEAR(trig, TRIG, EXISTS)
15 
16 /* Validity checking */
17 
ccu_data_offsets_valid(struct ccu_data * ccu)18 static bool ccu_data_offsets_valid(struct ccu_data *ccu)
19 {
20 	struct ccu_policy *ccu_policy = &ccu->policy;
21 	u32 limit;
22 
23 	limit = ccu->range - sizeof(u32);
24 	limit = round_down(limit, sizeof(u32));
25 	if (ccu_policy_exists(ccu_policy)) {
26 		if (ccu_policy->enable.offset > limit) {
27 			pr_err("%s: bad policy enable offset for %s "
28 					"(%u > %u)\n", __func__,
29 				ccu->name, ccu_policy->enable.offset, limit);
30 			return false;
31 		}
32 		if (ccu_policy->control.offset > limit) {
33 			pr_err("%s: bad policy control offset for %s "
34 					"(%u > %u)\n", __func__,
35 				ccu->name, ccu_policy->control.offset, limit);
36 			return false;
37 		}
38 	}
39 
40 	return true;
41 }
42 
clk_requires_trigger(struct kona_clk * bcm_clk)43 static bool clk_requires_trigger(struct kona_clk *bcm_clk)
44 {
45 	struct peri_clk_data *peri = bcm_clk->u.peri;
46 	struct bcm_clk_sel *sel;
47 	struct bcm_clk_div *div;
48 
49 	if (bcm_clk->type != bcm_clk_peri)
50 		return false;
51 
52 	sel = &peri->sel;
53 	if (sel->parent_count && selector_exists(sel))
54 		return true;
55 
56 	div = &peri->div;
57 	if (!divider_exists(div))
58 		return false;
59 
60 	/* Fixed dividers don't need triggers */
61 	if (!divider_is_fixed(div))
62 		return true;
63 
64 	div = &peri->pre_div;
65 
66 	return divider_exists(div) && !divider_is_fixed(div);
67 }
68 
peri_clk_data_offsets_valid(struct kona_clk * bcm_clk)69 static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk)
70 {
71 	struct peri_clk_data *peri;
72 	struct bcm_clk_policy *policy;
73 	struct bcm_clk_gate *gate;
74 	struct bcm_clk_hyst *hyst;
75 	struct bcm_clk_div *div;
76 	struct bcm_clk_sel *sel;
77 	struct bcm_clk_trig *trig;
78 	const char *name;
79 	u32 range;
80 	u32 limit;
81 
82 	BUG_ON(bcm_clk->type != bcm_clk_peri);
83 	peri = bcm_clk->u.peri;
84 	name = bcm_clk->init_data.name;
85 	range = bcm_clk->ccu->range;
86 
87 	limit = range - sizeof(u32);
88 	limit = round_down(limit, sizeof(u32));
89 
90 	policy = &peri->policy;
91 	if (policy_exists(policy)) {
92 		if (policy->offset > limit) {
93 			pr_err("%s: bad policy offset for %s (%u > %u)\n",
94 				__func__, name, policy->offset, limit);
95 			return false;
96 		}
97 	}
98 
99 	gate = &peri->gate;
100 	hyst = &peri->hyst;
101 	if (gate_exists(gate)) {
102 		if (gate->offset > limit) {
103 			pr_err("%s: bad gate offset for %s (%u > %u)\n",
104 				__func__, name, gate->offset, limit);
105 			return false;
106 		}
107 
108 		if (hyst_exists(hyst)) {
109 			if (hyst->offset > limit) {
110 				pr_err("%s: bad hysteresis offset for %s "
111 					"(%u > %u)\n", __func__,
112 					name, hyst->offset, limit);
113 				return false;
114 			}
115 		}
116 	} else if (hyst_exists(hyst)) {
117 		pr_err("%s: hysteresis but no gate for %s\n", __func__, name);
118 		return false;
119 	}
120 
121 	div = &peri->div;
122 	if (divider_exists(div)) {
123 		if (div->u.s.offset > limit) {
124 			pr_err("%s: bad divider offset for %s (%u > %u)\n",
125 				__func__, name, div->u.s.offset, limit);
126 			return false;
127 		}
128 	}
129 
130 	div = &peri->pre_div;
131 	if (divider_exists(div)) {
132 		if (div->u.s.offset > limit) {
133 			pr_err("%s: bad pre-divider offset for %s "
134 					"(%u > %u)\n",
135 				__func__, name, div->u.s.offset, limit);
136 			return false;
137 		}
138 	}
139 
140 	sel = &peri->sel;
141 	if (selector_exists(sel)) {
142 		if (sel->offset > limit) {
143 			pr_err("%s: bad selector offset for %s (%u > %u)\n",
144 				__func__, name, sel->offset, limit);
145 			return false;
146 		}
147 	}
148 
149 	trig = &peri->trig;
150 	if (trigger_exists(trig)) {
151 		if (trig->offset > limit) {
152 			pr_err("%s: bad trigger offset for %s (%u > %u)\n",
153 				__func__, name, trig->offset, limit);
154 			return false;
155 		}
156 	}
157 
158 	trig = &peri->pre_trig;
159 	if (trigger_exists(trig)) {
160 		if (trig->offset > limit) {
161 			pr_err("%s: bad pre-trigger offset for %s (%u > %u)\n",
162 				__func__, name, trig->offset, limit);
163 			return false;
164 		}
165 	}
166 
167 	return true;
168 }
169 
170 /* A bit position must be less than the number of bits in a 32-bit register. */
bit_posn_valid(u32 bit_posn,const char * field_name,const char * clock_name)171 static bool bit_posn_valid(u32 bit_posn, const char *field_name,
172 			const char *clock_name)
173 {
174 	u32 limit = BITS_PER_BYTE * sizeof(u32) - 1;
175 
176 	if (bit_posn > limit) {
177 		pr_err("%s: bad %s bit for %s (%u > %u)\n", __func__,
178 			field_name, clock_name, bit_posn, limit);
179 		return false;
180 	}
181 	return true;
182 }
183 
184 /*
185  * A bitfield must be at least 1 bit wide.  Both the low-order and
186  * high-order bits must lie within a 32-bit register.  We require
187  * fields to be less than 32 bits wide, mainly because we use
188  * shifting to produce field masks, and shifting a full word width
189  * is not well-defined by the C standard.
190  */
bitfield_valid(u32 shift,u32 width,const char * field_name,const char * clock_name)191 static bool bitfield_valid(u32 shift, u32 width, const char *field_name,
192 			const char *clock_name)
193 {
194 	u32 limit = BITS_PER_BYTE * sizeof(u32);
195 
196 	if (!width) {
197 		pr_err("%s: bad %s field width 0 for %s\n", __func__,
198 			field_name, clock_name);
199 		return false;
200 	}
201 	if (shift + width > limit) {
202 		pr_err("%s: bad %s for %s (%u + %u > %u)\n", __func__,
203 			field_name, clock_name, shift, width, limit);
204 		return false;
205 	}
206 	return true;
207 }
208 
209 static bool
ccu_policy_valid(struct ccu_policy * ccu_policy,const char * ccu_name)210 ccu_policy_valid(struct ccu_policy *ccu_policy, const char *ccu_name)
211 {
212 	struct bcm_lvm_en *enable = &ccu_policy->enable;
213 	struct bcm_policy_ctl *control;
214 
215 	if (!bit_posn_valid(enable->bit, "policy enable", ccu_name))
216 		return false;
217 
218 	control = &ccu_policy->control;
219 	if (!bit_posn_valid(control->go_bit, "policy control GO", ccu_name))
220 		return false;
221 
222 	if (!bit_posn_valid(control->atl_bit, "policy control ATL", ccu_name))
223 		return false;
224 
225 	if (!bit_posn_valid(control->ac_bit, "policy control AC", ccu_name))
226 		return false;
227 
228 	return true;
229 }
230 
policy_valid(struct bcm_clk_policy * policy,const char * clock_name)231 static bool policy_valid(struct bcm_clk_policy *policy, const char *clock_name)
232 {
233 	if (!bit_posn_valid(policy->bit, "policy", clock_name))
234 		return false;
235 
236 	return true;
237 }
238 
239 /*
240  * All gates, if defined, have a status bit, and for hardware-only
241  * gates, that's it.  Gates that can be software controlled also
242  * have an enable bit.  And a gate that can be hardware or software
243  * controlled will have a hardware/software select bit.
244  */
gate_valid(struct bcm_clk_gate * gate,const char * field_name,const char * clock_name)245 static bool gate_valid(struct bcm_clk_gate *gate, const char *field_name,
246 			const char *clock_name)
247 {
248 	if (!bit_posn_valid(gate->status_bit, "gate status", clock_name))
249 		return false;
250 
251 	if (gate_is_sw_controllable(gate)) {
252 		if (!bit_posn_valid(gate->en_bit, "gate enable", clock_name))
253 			return false;
254 
255 		if (gate_is_hw_controllable(gate)) {
256 			if (!bit_posn_valid(gate->hw_sw_sel_bit,
257 						"gate hw/sw select",
258 						clock_name))
259 				return false;
260 		}
261 	} else {
262 		BUG_ON(!gate_is_hw_controllable(gate));
263 	}
264 
265 	return true;
266 }
267 
hyst_valid(struct bcm_clk_hyst * hyst,const char * clock_name)268 static bool hyst_valid(struct bcm_clk_hyst *hyst, const char *clock_name)
269 {
270 	if (!bit_posn_valid(hyst->en_bit, "hysteresis enable", clock_name))
271 		return false;
272 
273 	if (!bit_posn_valid(hyst->val_bit, "hysteresis value", clock_name))
274 		return false;
275 
276 	return true;
277 }
278 
279 /*
280  * A selector bitfield must be valid.  Its parent_sel array must
281  * also be reasonable for the field.
282  */
sel_valid(struct bcm_clk_sel * sel,const char * field_name,const char * clock_name)283 static bool sel_valid(struct bcm_clk_sel *sel, const char *field_name,
284 			const char *clock_name)
285 {
286 	if (!bitfield_valid(sel->shift, sel->width, field_name, clock_name))
287 		return false;
288 
289 	if (sel->parent_count) {
290 		u32 max_sel;
291 		u32 limit;
292 
293 		/*
294 		 * Make sure the selector field can hold all the
295 		 * selector values we expect to be able to use.  A
296 		 * clock only needs to have a selector defined if it
297 		 * has more than one parent.  And in that case the
298 		 * highest selector value will be in the last entry
299 		 * in the array.
300 		 */
301 		max_sel = sel->parent_sel[sel->parent_count - 1];
302 		limit = (1 << sel->width) - 1;
303 		if (max_sel > limit) {
304 			pr_err("%s: bad selector for %s "
305 					"(%u needs > %u bits)\n",
306 				__func__, clock_name, max_sel,
307 				sel->width);
308 			return false;
309 		}
310 	} else {
311 		pr_warn("%s: ignoring selector for %s (no parents)\n",
312 			__func__, clock_name);
313 		selector_clear_exists(sel);
314 		kfree(sel->parent_sel);
315 		sel->parent_sel = NULL;
316 	}
317 
318 	return true;
319 }
320 
321 /*
322  * A fixed divider just needs to be non-zero.  A variable divider
323  * has to have a valid divider bitfield, and if it has a fraction,
324  * the width of the fraction must not be no more than the width of
325  * the divider as a whole.
326  */
div_valid(struct bcm_clk_div * div,const char * field_name,const char * clock_name)327 static bool div_valid(struct bcm_clk_div *div, const char *field_name,
328 			const char *clock_name)
329 {
330 	if (divider_is_fixed(div)) {
331 		/* Any fixed divider value but 0 is OK */
332 		if (div->u.fixed == 0) {
333 			pr_err("%s: bad %s fixed value 0 for %s\n", __func__,
334 				field_name, clock_name);
335 			return false;
336 		}
337 		return true;
338 	}
339 	if (!bitfield_valid(div->u.s.shift, div->u.s.width,
340 				field_name, clock_name))
341 		return false;
342 
343 	if (divider_has_fraction(div))
344 		if (div->u.s.frac_width > div->u.s.width) {
345 			pr_warn("%s: bad %s fraction width for %s (%u > %u)\n",
346 				__func__, field_name, clock_name,
347 				div->u.s.frac_width, div->u.s.width);
348 			return false;
349 		}
350 
351 	return true;
352 }
353 
354 /*
355  * If a clock has two dividers, the combined number of fractional
356  * bits must be representable in a 32-bit unsigned value.  This
357  * is because we scale up a dividend using both dividers before
358  * dividing to improve accuracy, and we need to avoid overflow.
359  */
kona_dividers_valid(struct kona_clk * bcm_clk)360 static bool kona_dividers_valid(struct kona_clk *bcm_clk)
361 {
362 	struct peri_clk_data *peri = bcm_clk->u.peri;
363 	struct bcm_clk_div *div;
364 	struct bcm_clk_div *pre_div;
365 	u32 limit;
366 
367 	BUG_ON(bcm_clk->type != bcm_clk_peri);
368 
369 	if (!divider_exists(&peri->div) || !divider_exists(&peri->pre_div))
370 		return true;
371 
372 	div = &peri->div;
373 	pre_div = &peri->pre_div;
374 	if (divider_is_fixed(div) || divider_is_fixed(pre_div))
375 		return true;
376 
377 	limit = BITS_PER_BYTE * sizeof(u32);
378 
379 	return div->u.s.frac_width + pre_div->u.s.frac_width <= limit;
380 }
381 
382 
383 /* A trigger just needs to represent a valid bit position */
trig_valid(struct bcm_clk_trig * trig,const char * field_name,const char * clock_name)384 static bool trig_valid(struct bcm_clk_trig *trig, const char *field_name,
385 			const char *clock_name)
386 {
387 	return bit_posn_valid(trig->bit, field_name, clock_name);
388 }
389 
390 /* Determine whether the set of peripheral clock registers are valid. */
391 static bool
peri_clk_data_valid(struct kona_clk * bcm_clk)392 peri_clk_data_valid(struct kona_clk *bcm_clk)
393 {
394 	struct peri_clk_data *peri;
395 	struct bcm_clk_policy *policy;
396 	struct bcm_clk_gate *gate;
397 	struct bcm_clk_hyst *hyst;
398 	struct bcm_clk_sel *sel;
399 	struct bcm_clk_div *div;
400 	struct bcm_clk_div *pre_div;
401 	struct bcm_clk_trig *trig;
402 	const char *name;
403 
404 	BUG_ON(bcm_clk->type != bcm_clk_peri);
405 
406 	/*
407 	 * First validate register offsets.  This is the only place
408 	 * where we need something from the ccu, so we do these
409 	 * together.
410 	 */
411 	if (!peri_clk_data_offsets_valid(bcm_clk))
412 		return false;
413 
414 	peri = bcm_clk->u.peri;
415 	name = bcm_clk->init_data.name;
416 
417 	policy = &peri->policy;
418 	if (policy_exists(policy) && !policy_valid(policy, name))
419 		return false;
420 
421 	gate = &peri->gate;
422 	if (gate_exists(gate) && !gate_valid(gate, "gate", name))
423 		return false;
424 
425 	hyst = &peri->hyst;
426 	if (hyst_exists(hyst) && !hyst_valid(hyst, name))
427 		return false;
428 
429 	sel = &peri->sel;
430 	if (selector_exists(sel)) {
431 		if (!sel_valid(sel, "selector", name))
432 			return false;
433 
434 	} else if (sel->parent_count > 1) {
435 		pr_err("%s: multiple parents but no selector for %s\n",
436 			__func__, name);
437 
438 		return false;
439 	}
440 
441 	div = &peri->div;
442 	pre_div = &peri->pre_div;
443 	if (divider_exists(div)) {
444 		if (!div_valid(div, "divider", name))
445 			return false;
446 
447 		if (divider_exists(pre_div))
448 			if (!div_valid(pre_div, "pre-divider", name))
449 				return false;
450 	} else if (divider_exists(pre_div)) {
451 		pr_err("%s: pre-divider but no divider for %s\n", __func__,
452 			name);
453 		return false;
454 	}
455 
456 	trig = &peri->trig;
457 	if (trigger_exists(trig)) {
458 		if (!trig_valid(trig, "trigger", name))
459 			return false;
460 
461 		if (trigger_exists(&peri->pre_trig)) {
462 			if (!trig_valid(trig, "pre-trigger", name)) {
463 				return false;
464 			}
465 		}
466 		if (!clk_requires_trigger(bcm_clk)) {
467 			pr_warn("%s: ignoring trigger for %s (not needed)\n",
468 				__func__, name);
469 			trigger_clear_exists(trig);
470 		}
471 	} else if (trigger_exists(&peri->pre_trig)) {
472 		pr_err("%s: pre-trigger but no trigger for %s\n", __func__,
473 			name);
474 		return false;
475 	} else if (clk_requires_trigger(bcm_clk)) {
476 		pr_err("%s: required trigger missing for %s\n", __func__,
477 			name);
478 		return false;
479 	}
480 
481 	return kona_dividers_valid(bcm_clk);
482 }
483 
kona_clk_valid(struct kona_clk * bcm_clk)484 static bool kona_clk_valid(struct kona_clk *bcm_clk)
485 {
486 	switch (bcm_clk->type) {
487 	case bcm_clk_peri:
488 		if (!peri_clk_data_valid(bcm_clk))
489 			return false;
490 		break;
491 	default:
492 		pr_err("%s: unrecognized clock type (%d)\n", __func__,
493 			(int)bcm_clk->type);
494 		return false;
495 	}
496 	return true;
497 }
498 
499 /*
500  * Scan an array of parent clock names to determine whether there
501  * are any entries containing BAD_CLK_NAME.  Such entries are
502  * placeholders for non-supported clocks.  Keep track of the
503  * position of each clock name in the original array.
504  *
505  * Allocates an array of pointers to hold the names of all
506  * non-null entries in the original array, and returns a pointer to
507  * that array in *names.  This will be used for registering the
508  * clock with the common clock code.  On successful return,
509  * *count indicates how many entries are in that names array.
510  *
511  * If there is more than one entry in the resulting names array,
512  * another array is allocated to record the parent selector value
513  * for each (defined) parent clock.  This is the value that
514  * represents this parent clock in the clock's source selector
515  * register.  The position of the clock in the original parent array
516  * defines that selector value.  The number of entries in this array
517  * is the same as the number of entries in the parent names array.
518  *
519  * The array of selector values is returned.  If the clock has no
520  * parents, no selector is required and a null pointer is returned.
521  *
522  * Returns a null pointer if the clock names array supplied was
523  * null.  (This is not an error.)
524  *
525  * Returns a pointer-coded error if an error occurs.
526  */
parent_process(const char * clocks[],u32 * count,const char *** names)527 static u32 *parent_process(const char *clocks[],
528 			u32 *count, const char ***names)
529 {
530 	static const char **parent_names;
531 	static u32 *parent_sel;
532 	const char **clock;
533 	u32 parent_count;
534 	u32 bad_count = 0;
535 	u32 orig_count;
536 	u32 i;
537 	u32 j;
538 
539 	*count = 0;	/* In case of early return */
540 	*names = NULL;
541 	if (!clocks)
542 		return NULL;
543 
544 	/*
545 	 * Count the number of names in the null-terminated array,
546 	 * and find out how many of those are actually clock names.
547 	 */
548 	for (clock = clocks; *clock; clock++)
549 		if (*clock == BAD_CLK_NAME)
550 			bad_count++;
551 	orig_count = (u32)(clock - clocks);
552 	parent_count = orig_count - bad_count;
553 
554 	/* If all clocks are unsupported, we treat it as no clock */
555 	if (!parent_count)
556 		return NULL;
557 
558 	/* Avoid exceeding our parent clock limit */
559 	if (parent_count > PARENT_COUNT_MAX) {
560 		pr_err("%s: too many parents (%u > %u)\n", __func__,
561 			parent_count, PARENT_COUNT_MAX);
562 		return ERR_PTR(-EINVAL);
563 	}
564 
565 	/*
566 	 * There is one parent name for each defined parent clock.
567 	 * We also maintain an array containing the selector value
568 	 * for each defined clock.  If there's only one clock, the
569 	 * selector is not required, but we allocate space for the
570 	 * array anyway to keep things simple.
571 	 */
572 	parent_names = kmalloc_array(parent_count, sizeof(*parent_names),
573 			       GFP_KERNEL);
574 	if (!parent_names)
575 		return ERR_PTR(-ENOMEM);
576 
577 	/* There is at least one parent, so allocate a selector array */
578 	parent_sel = kmalloc_array(parent_count, sizeof(*parent_sel),
579 				   GFP_KERNEL);
580 	if (!parent_sel) {
581 		kfree(parent_names);
582 
583 		return ERR_PTR(-ENOMEM);
584 	}
585 
586 	/* Now fill in the parent names and selector arrays */
587 	for (i = 0, j = 0; i < orig_count; i++) {
588 		if (clocks[i] != BAD_CLK_NAME) {
589 			parent_names[j] = clocks[i];
590 			parent_sel[j] = i;
591 			j++;
592 		}
593 	}
594 	*names = parent_names;
595 	*count = parent_count;
596 
597 	return parent_sel;
598 }
599 
600 static int
clk_sel_setup(const char ** clocks,struct bcm_clk_sel * sel,struct clk_init_data * init_data)601 clk_sel_setup(const char **clocks, struct bcm_clk_sel *sel,
602 		struct clk_init_data *init_data)
603 {
604 	const char **parent_names = NULL;
605 	u32 parent_count = 0;
606 	u32 *parent_sel;
607 
608 	/*
609 	 * If a peripheral clock has multiple parents, the value
610 	 * used by the hardware to select that parent is represented
611 	 * by the parent clock's position in the "clocks" list.  Some
612 	 * values don't have defined or supported clocks; these will
613 	 * have BAD_CLK_NAME entries in the parents[] array.  The
614 	 * list is terminated by a NULL entry.
615 	 *
616 	 * We need to supply (only) the names of defined parent
617 	 * clocks when registering a clock though, so we use an
618 	 * array of parent selector values to map between the
619 	 * indexes the common clock code uses and the selector
620 	 * values we need.
621 	 */
622 	parent_sel = parent_process(clocks, &parent_count, &parent_names);
623 	if (IS_ERR(parent_sel)) {
624 		int ret = PTR_ERR(parent_sel);
625 
626 		pr_err("%s: error processing parent clocks for %s (%d)\n",
627 			__func__, init_data->name, ret);
628 
629 		return ret;
630 	}
631 
632 	init_data->parent_names = parent_names;
633 	init_data->num_parents = parent_count;
634 
635 	sel->parent_count = parent_count;
636 	sel->parent_sel = parent_sel;
637 
638 	return 0;
639 }
640 
clk_sel_teardown(struct bcm_clk_sel * sel,struct clk_init_data * init_data)641 static void clk_sel_teardown(struct bcm_clk_sel *sel,
642 		struct clk_init_data *init_data)
643 {
644 	kfree(sel->parent_sel);
645 	sel->parent_sel = NULL;
646 	sel->parent_count = 0;
647 
648 	init_data->num_parents = 0;
649 	kfree(init_data->parent_names);
650 	init_data->parent_names = NULL;
651 }
652 
peri_clk_teardown(struct peri_clk_data * data,struct clk_init_data * init_data)653 static void peri_clk_teardown(struct peri_clk_data *data,
654 				struct clk_init_data *init_data)
655 {
656 	clk_sel_teardown(&data->sel, init_data);
657 }
658 
659 /*
660  * Caller is responsible for freeing the parent_names[] and
661  * parent_sel[] arrays in the peripheral clock's "data" structure
662  * that can be assigned if the clock has one or more parent clocks
663  * associated with it.
664  */
665 static int
peri_clk_setup(struct peri_clk_data * data,struct clk_init_data * init_data)666 peri_clk_setup(struct peri_clk_data *data, struct clk_init_data *init_data)
667 {
668 	init_data->flags = CLK_IGNORE_UNUSED;
669 
670 	return clk_sel_setup(data->clocks, &data->sel, init_data);
671 }
672 
bcm_clk_teardown(struct kona_clk * bcm_clk)673 static void bcm_clk_teardown(struct kona_clk *bcm_clk)
674 {
675 	switch (bcm_clk->type) {
676 	case bcm_clk_peri:
677 		peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data);
678 		break;
679 	default:
680 		break;
681 	}
682 	bcm_clk->u.data = NULL;
683 	bcm_clk->type = bcm_clk_none;
684 }
685 
kona_clk_teardown(struct clk_hw * hw)686 static void kona_clk_teardown(struct clk_hw *hw)
687 {
688 	struct kona_clk *bcm_clk;
689 
690 	if (!hw)
691 		return;
692 
693 	clk_hw_unregister(hw);
694 
695 	bcm_clk = to_kona_clk(hw);
696 	bcm_clk_teardown(bcm_clk);
697 }
698 
kona_clk_setup(struct kona_clk * bcm_clk)699 static int kona_clk_setup(struct kona_clk *bcm_clk)
700 {
701 	int ret;
702 	struct clk_init_data *init_data = &bcm_clk->init_data;
703 
704 	switch (bcm_clk->type) {
705 	case bcm_clk_peri:
706 		ret = peri_clk_setup(bcm_clk->u.data, init_data);
707 		if (ret)
708 			return ret;
709 		break;
710 	default:
711 		pr_err("%s: clock type %d invalid for %s\n", __func__,
712 			(int)bcm_clk->type, init_data->name);
713 		return -EINVAL;
714 	}
715 
716 	/* Make sure everything makes sense before we set it up */
717 	if (!kona_clk_valid(bcm_clk)) {
718 		pr_err("%s: clock data invalid for %s\n", __func__,
719 			init_data->name);
720 		ret = -EINVAL;
721 		goto out_teardown;
722 	}
723 
724 	bcm_clk->hw.init = init_data;
725 	ret = clk_hw_register(NULL, &bcm_clk->hw);
726 	if (ret) {
727 		pr_err("%s: error registering clock %s (%d)\n", __func__,
728 			init_data->name, ret);
729 		goto out_teardown;
730 	}
731 
732 	return 0;
733 out_teardown:
734 	bcm_clk_teardown(bcm_clk);
735 
736 	return ret;
737 }
738 
ccu_clks_teardown(struct ccu_data * ccu)739 static void ccu_clks_teardown(struct ccu_data *ccu)
740 {
741 	u32 i;
742 
743 	for (i = 0; i < ccu->clk_num; i++)
744 		kona_clk_teardown(&ccu->kona_clks[i].hw);
745 }
746 
kona_ccu_teardown(struct ccu_data * ccu)747 static void kona_ccu_teardown(struct ccu_data *ccu)
748 {
749 	if (!ccu->base)
750 		return;
751 
752 	of_clk_del_provider(ccu->node);	/* safe if never added */
753 	ccu_clks_teardown(ccu);
754 	of_node_put(ccu->node);
755 	ccu->node = NULL;
756 	iounmap(ccu->base);
757 	ccu->base = NULL;
758 }
759 
ccu_data_valid(struct ccu_data * ccu)760 static bool ccu_data_valid(struct ccu_data *ccu)
761 {
762 	struct ccu_policy *ccu_policy;
763 
764 	if (!ccu_data_offsets_valid(ccu))
765 		return false;
766 
767 	ccu_policy = &ccu->policy;
768 	if (ccu_policy_exists(ccu_policy))
769 		if (!ccu_policy_valid(ccu_policy, ccu->name))
770 			return false;
771 
772 	return true;
773 }
774 
775 static struct clk_hw *
of_clk_kona_onecell_get(struct of_phandle_args * clkspec,void * data)776 of_clk_kona_onecell_get(struct of_phandle_args *clkspec, void *data)
777 {
778 	struct ccu_data *ccu = data;
779 	unsigned int idx = clkspec->args[0];
780 
781 	if (idx >= ccu->clk_num) {
782 		pr_err("%s: invalid index %u\n", __func__, idx);
783 		return ERR_PTR(-EINVAL);
784 	}
785 
786 	return &ccu->kona_clks[idx].hw;
787 }
788 
789 /*
790  * Set up a CCU.  Call the provided ccu_clks_setup callback to
791  * initialize the array of clocks provided by the CCU.
792  */
kona_dt_ccu_setup(struct ccu_data * ccu,struct device_node * node)793 void __init kona_dt_ccu_setup(struct ccu_data *ccu,
794 			struct device_node *node)
795 {
796 	struct resource res = { 0 };
797 	resource_size_t range;
798 	unsigned int i;
799 	int ret;
800 
801 	ret = of_address_to_resource(node, 0, &res);
802 	if (ret) {
803 		pr_err("%s: no valid CCU registers found for %pOFn\n", __func__,
804 			node);
805 		goto out_err;
806 	}
807 
808 	range = resource_size(&res);
809 	if (range > (resource_size_t)U32_MAX) {
810 		pr_err("%s: address range too large for %pOFn\n", __func__,
811 			node);
812 		goto out_err;
813 	}
814 
815 	ccu->range = (u32)range;
816 
817 	if (!ccu_data_valid(ccu)) {
818 		pr_err("%s: ccu data not valid for %pOFn\n", __func__, node);
819 		goto out_err;
820 	}
821 
822 	ccu->base = ioremap(res.start, ccu->range);
823 	if (!ccu->base) {
824 		pr_err("%s: unable to map CCU registers for %pOFn\n", __func__,
825 			node);
826 		goto out_err;
827 	}
828 	ccu->node = of_node_get(node);
829 
830 	/*
831 	 * Set up each defined kona clock and save the result in
832 	 * the clock framework clock array (in ccu->data).  Then
833 	 * register as a provider for these clocks.
834 	 */
835 	for (i = 0; i < ccu->clk_num; i++) {
836 		if (!ccu->kona_clks[i].ccu)
837 			continue;
838 		kona_clk_setup(&ccu->kona_clks[i]);
839 	}
840 
841 	ret = of_clk_add_hw_provider(node, of_clk_kona_onecell_get, ccu);
842 	if (ret) {
843 		pr_err("%s: error adding ccu %pOFn as provider (%d)\n", __func__,
844 				node, ret);
845 		goto out_err;
846 	}
847 
848 	if (!kona_ccu_init(ccu))
849 		pr_err("Broadcom %pOFn initialization had errors\n", node);
850 
851 	return;
852 out_err:
853 	kona_ccu_teardown(ccu);
854 	pr_err("Broadcom %pOFn setup aborted\n", node);
855 }
856