xref: /openbmc/linux/drivers/clk/bcm/clk-kona.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2013 Broadcom Corporation
4   * Copyright 2013 Linaro Limited
5   */
6  
7  #include "clk-kona.h"
8  
9  #include <linux/delay.h>
10  #include <linux/io.h>
11  #include <linux/kernel.h>
12  #include <linux/clk-provider.h>
13  
14  /*
15   * "Policies" affect the frequencies of bus clocks provided by a
16   * CCU.  (I believe these polices are named "Deep Sleep", "Economy",
17   * "Normal", and "Turbo".)  A lower policy number has lower power
18   * consumption, and policy 2 is the default.
19   */
20  #define CCU_POLICY_COUNT	4
21  
22  #define CCU_ACCESS_PASSWORD      0xA5A500
23  #define CLK_GATE_DELAY_LOOP      2000
24  
25  /* Bitfield operations */
26  
27  /* Produces a mask of set bits covering a range of a 32-bit value */
bitfield_mask(u32 shift,u32 width)28  static inline u32 bitfield_mask(u32 shift, u32 width)
29  {
30  	return ((1 << width) - 1) << shift;
31  }
32  
33  /* Extract the value of a bitfield found within a given register value */
bitfield_extract(u32 reg_val,u32 shift,u32 width)34  static inline u32 bitfield_extract(u32 reg_val, u32 shift, u32 width)
35  {
36  	return (reg_val & bitfield_mask(shift, width)) >> shift;
37  }
38  
39  /* Replace the value of a bitfield found within a given register value */
bitfield_replace(u32 reg_val,u32 shift,u32 width,u32 val)40  static inline u32 bitfield_replace(u32 reg_val, u32 shift, u32 width, u32 val)
41  {
42  	u32 mask = bitfield_mask(shift, width);
43  
44  	return (reg_val & ~mask) | (val << shift);
45  }
46  
47  /* Divider and scaling helpers */
48  
49  /* Convert a divider into the scaled divisor value it represents. */
scaled_div_value(struct bcm_clk_div * div,u32 reg_div)50  static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div)
51  {
52  	return (u64)reg_div + ((u64)1 << div->u.s.frac_width);
53  }
54  
55  /*
56   * Build a scaled divider value as close as possible to the
57   * given whole part (div_value) and fractional part (expressed
58   * in billionths).
59   */
scaled_div_build(struct bcm_clk_div * div,u32 div_value,u32 billionths)60  u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths)
61  {
62  	u64 combined;
63  
64  	BUG_ON(!div_value);
65  	BUG_ON(billionths >= BILLION);
66  
67  	combined = (u64)div_value * BILLION + billionths;
68  	combined <<= div->u.s.frac_width;
69  
70  	return DIV_ROUND_CLOSEST_ULL(combined, BILLION);
71  }
72  
73  /* The scaled minimum divisor representable by a divider */
74  static inline u64
scaled_div_min(struct bcm_clk_div * div)75  scaled_div_min(struct bcm_clk_div *div)
76  {
77  	if (divider_is_fixed(div))
78  		return (u64)div->u.fixed;
79  
80  	return scaled_div_value(div, 0);
81  }
82  
83  /* The scaled maximum divisor representable by a divider */
scaled_div_max(struct bcm_clk_div * div)84  u64 scaled_div_max(struct bcm_clk_div *div)
85  {
86  	u32 reg_div;
87  
88  	if (divider_is_fixed(div))
89  		return (u64)div->u.fixed;
90  
91  	reg_div = ((u32)1 << div->u.s.width) - 1;
92  
93  	return scaled_div_value(div, reg_div);
94  }
95  
96  /*
97   * Convert a scaled divisor into its divider representation as
98   * stored in a divider register field.
99   */
100  static inline u32
divider(struct bcm_clk_div * div,u64 scaled_div)101  divider(struct bcm_clk_div *div, u64 scaled_div)
102  {
103  	BUG_ON(scaled_div < scaled_div_min(div));
104  	BUG_ON(scaled_div > scaled_div_max(div));
105  
106  	return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width));
107  }
108  
109  /* Return a rate scaled for use when dividing by a scaled divisor. */
110  static inline u64
scale_rate(struct bcm_clk_div * div,u32 rate)111  scale_rate(struct bcm_clk_div *div, u32 rate)
112  {
113  	if (divider_is_fixed(div))
114  		return (u64)rate;
115  
116  	return (u64)rate << div->u.s.frac_width;
117  }
118  
119  /* CCU access */
120  
121  /* Read a 32-bit register value from a CCU's address space. */
__ccu_read(struct ccu_data * ccu,u32 reg_offset)122  static inline u32 __ccu_read(struct ccu_data *ccu, u32 reg_offset)
123  {
124  	return readl(ccu->base + reg_offset);
125  }
126  
127  /* Write a 32-bit register value into a CCU's address space. */
128  static inline void
__ccu_write(struct ccu_data * ccu,u32 reg_offset,u32 reg_val)129  __ccu_write(struct ccu_data *ccu, u32 reg_offset, u32 reg_val)
130  {
131  	writel(reg_val, ccu->base + reg_offset);
132  }
133  
ccu_lock(struct ccu_data * ccu)134  static inline unsigned long ccu_lock(struct ccu_data *ccu)
135  {
136  	unsigned long flags;
137  
138  	spin_lock_irqsave(&ccu->lock, flags);
139  
140  	return flags;
141  }
ccu_unlock(struct ccu_data * ccu,unsigned long flags)142  static inline void ccu_unlock(struct ccu_data *ccu, unsigned long flags)
143  {
144  	spin_unlock_irqrestore(&ccu->lock, flags);
145  }
146  
147  /*
148   * Enable/disable write access to CCU protected registers.  The
149   * WR_ACCESS register for all CCUs is at offset 0.
150   */
__ccu_write_enable(struct ccu_data * ccu)151  static inline void __ccu_write_enable(struct ccu_data *ccu)
152  {
153  	if (ccu->write_enabled) {
154  		pr_err("%s: access already enabled for %s\n", __func__,
155  			ccu->name);
156  		return;
157  	}
158  	ccu->write_enabled = true;
159  	__ccu_write(ccu, 0, CCU_ACCESS_PASSWORD | 1);
160  }
161  
__ccu_write_disable(struct ccu_data * ccu)162  static inline void __ccu_write_disable(struct ccu_data *ccu)
163  {
164  	if (!ccu->write_enabled) {
165  		pr_err("%s: access wasn't enabled for %s\n", __func__,
166  			ccu->name);
167  		return;
168  	}
169  
170  	__ccu_write(ccu, 0, CCU_ACCESS_PASSWORD);
171  	ccu->write_enabled = false;
172  }
173  
174  /*
175   * Poll a register in a CCU's address space, returning when the
176   * specified bit in that register's value is set (or clear).  Delay
177   * a microsecond after each read of the register.  Returns true if
178   * successful, or false if we gave up trying.
179   *
180   * Caller must ensure the CCU lock is held.
181   */
182  static inline bool
__ccu_wait_bit(struct ccu_data * ccu,u32 reg_offset,u32 bit,bool want)183  __ccu_wait_bit(struct ccu_data *ccu, u32 reg_offset, u32 bit, bool want)
184  {
185  	unsigned int tries;
186  	u32 bit_mask = 1 << bit;
187  
188  	for (tries = 0; tries < CLK_GATE_DELAY_LOOP; tries++) {
189  		u32 val;
190  		bool bit_val;
191  
192  		val = __ccu_read(ccu, reg_offset);
193  		bit_val = (val & bit_mask) != 0;
194  		if (bit_val == want)
195  			return true;
196  		udelay(1);
197  	}
198  	pr_warn("%s: %s/0x%04x bit %u was never %s\n", __func__,
199  		ccu->name, reg_offset, bit, want ? "set" : "clear");
200  
201  	return false;
202  }
203  
204  /* Policy operations */
205  
__ccu_policy_engine_start(struct ccu_data * ccu,bool sync)206  static bool __ccu_policy_engine_start(struct ccu_data *ccu, bool sync)
207  {
208  	struct bcm_policy_ctl *control = &ccu->policy.control;
209  	u32 offset;
210  	u32 go_bit;
211  	u32 mask;
212  	bool ret;
213  
214  	/* If we don't need to control policy for this CCU, we're done. */
215  	if (!policy_ctl_exists(control))
216  		return true;
217  
218  	offset = control->offset;
219  	go_bit = control->go_bit;
220  
221  	/* Ensure we're not busy before we start */
222  	ret = __ccu_wait_bit(ccu, offset, go_bit, false);
223  	if (!ret) {
224  		pr_err("%s: ccu %s policy engine wouldn't go idle\n",
225  			__func__, ccu->name);
226  		return false;
227  	}
228  
229  	/*
230  	 * If it's a synchronous request, we'll wait for the voltage
231  	 * and frequency of the active load to stabilize before
232  	 * returning.  To do this we select the active load by
233  	 * setting the ATL bit.
234  	 *
235  	 * An asynchronous request instead ramps the voltage in the
236  	 * background, and when that process stabilizes, the target
237  	 * load is copied to the active load and the CCU frequency
238  	 * is switched.  We do this by selecting the target load
239  	 * (ATL bit clear) and setting the request auto-copy (AC bit
240  	 * set).
241  	 *
242  	 * Note, we do NOT read-modify-write this register.
243  	 */
244  	mask = (u32)1 << go_bit;
245  	if (sync)
246  		mask |= 1 << control->atl_bit;
247  	else
248  		mask |= 1 << control->ac_bit;
249  	__ccu_write(ccu, offset, mask);
250  
251  	/* Wait for indication that operation is complete. */
252  	ret = __ccu_wait_bit(ccu, offset, go_bit, false);
253  	if (!ret)
254  		pr_err("%s: ccu %s policy engine never started\n",
255  			__func__, ccu->name);
256  
257  	return ret;
258  }
259  
__ccu_policy_engine_stop(struct ccu_data * ccu)260  static bool __ccu_policy_engine_stop(struct ccu_data *ccu)
261  {
262  	struct bcm_lvm_en *enable = &ccu->policy.enable;
263  	u32 offset;
264  	u32 enable_bit;
265  	bool ret;
266  
267  	/* If we don't need to control policy for this CCU, we're done. */
268  	if (!policy_lvm_en_exists(enable))
269  		return true;
270  
271  	/* Ensure we're not busy before we start */
272  	offset = enable->offset;
273  	enable_bit = enable->bit;
274  	ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
275  	if (!ret) {
276  		pr_err("%s: ccu %s policy engine already stopped\n",
277  			__func__, ccu->name);
278  		return false;
279  	}
280  
281  	/* Now set the bit to stop the engine (NO read-modify-write) */
282  	__ccu_write(ccu, offset, (u32)1 << enable_bit);
283  
284  	/* Wait for indication that it has stopped. */
285  	ret = __ccu_wait_bit(ccu, offset, enable_bit, false);
286  	if (!ret)
287  		pr_err("%s: ccu %s policy engine never stopped\n",
288  			__func__, ccu->name);
289  
290  	return ret;
291  }
292  
293  /*
294   * A CCU has four operating conditions ("policies"), and some clocks
295   * can be disabled or enabled based on which policy is currently in
296   * effect.  Such clocks have a bit in a "policy mask" register for
297   * each policy indicating whether the clock is enabled for that
298   * policy or not.  The bit position for a clock is the same for all
299   * four registers, and the 32-bit registers are at consecutive
300   * addresses.
301   */
policy_init(struct ccu_data * ccu,struct bcm_clk_policy * policy)302  static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy)
303  {
304  	u32 offset;
305  	u32 mask;
306  	int i;
307  	bool ret;
308  
309  	if (!policy_exists(policy))
310  		return true;
311  
312  	/*
313  	 * We need to stop the CCU policy engine to allow update
314  	 * of our policy bits.
315  	 */
316  	if (!__ccu_policy_engine_stop(ccu)) {
317  		pr_err("%s: unable to stop CCU %s policy engine\n",
318  			__func__, ccu->name);
319  		return false;
320  	}
321  
322  	/*
323  	 * For now, if a clock defines its policy bit we just mark
324  	 * it "enabled" for all four policies.
325  	 */
326  	offset = policy->offset;
327  	mask = (u32)1 << policy->bit;
328  	for (i = 0; i < CCU_POLICY_COUNT; i++) {
329  		u32 reg_val;
330  
331  		reg_val = __ccu_read(ccu, offset);
332  		reg_val |= mask;
333  		__ccu_write(ccu, offset, reg_val);
334  		offset += sizeof(u32);
335  	}
336  
337  	/* We're done updating; fire up the policy engine again. */
338  	ret = __ccu_policy_engine_start(ccu, true);
339  	if (!ret)
340  		pr_err("%s: unable to restart CCU %s policy engine\n",
341  			__func__, ccu->name);
342  
343  	return ret;
344  }
345  
346  /* Gate operations */
347  
348  /* Determine whether a clock is gated.  CCU lock must be held.  */
349  static bool
__is_clk_gate_enabled(struct ccu_data * ccu,struct bcm_clk_gate * gate)350  __is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
351  {
352  	u32 bit_mask;
353  	u32 reg_val;
354  
355  	/* If there is no gate we can assume it's enabled. */
356  	if (!gate_exists(gate))
357  		return true;
358  
359  	bit_mask = 1 << gate->status_bit;
360  	reg_val = __ccu_read(ccu, gate->offset);
361  
362  	return (reg_val & bit_mask) != 0;
363  }
364  
365  /* Determine whether a clock is gated. */
366  static bool
is_clk_gate_enabled(struct ccu_data * ccu,struct bcm_clk_gate * gate)367  is_clk_gate_enabled(struct ccu_data *ccu, struct bcm_clk_gate *gate)
368  {
369  	long flags;
370  	bool ret;
371  
372  	/* Avoid taking the lock if we can */
373  	if (!gate_exists(gate))
374  		return true;
375  
376  	flags = ccu_lock(ccu);
377  	ret = __is_clk_gate_enabled(ccu, gate);
378  	ccu_unlock(ccu, flags);
379  
380  	return ret;
381  }
382  
383  /*
384   * Commit our desired gate state to the hardware.
385   * Returns true if successful, false otherwise.
386   */
387  static bool
__gate_commit(struct ccu_data * ccu,struct bcm_clk_gate * gate)388  __gate_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate)
389  {
390  	u32 reg_val;
391  	u32 mask;
392  	bool enabled = false;
393  
394  	BUG_ON(!gate_exists(gate));
395  	if (!gate_is_sw_controllable(gate))
396  		return true;		/* Nothing we can change */
397  
398  	reg_val = __ccu_read(ccu, gate->offset);
399  
400  	/* For a hardware/software gate, set which is in control */
401  	if (gate_is_hw_controllable(gate)) {
402  		mask = (u32)1 << gate->hw_sw_sel_bit;
403  		if (gate_is_sw_managed(gate))
404  			reg_val |= mask;
405  		else
406  			reg_val &= ~mask;
407  	}
408  
409  	/*
410  	 * If software is in control, enable or disable the gate.
411  	 * If hardware is, clear the enabled bit for good measure.
412  	 * If a software controlled gate can't be disabled, we're
413  	 * required to write a 0 into the enable bit (but the gate
414  	 * will be enabled).
415  	 */
416  	mask = (u32)1 << gate->en_bit;
417  	if (gate_is_sw_managed(gate) && (enabled = gate_is_enabled(gate)) &&
418  			!gate_is_no_disable(gate))
419  		reg_val |= mask;
420  	else
421  		reg_val &= ~mask;
422  
423  	__ccu_write(ccu, gate->offset, reg_val);
424  
425  	/* For a hardware controlled gate, we're done */
426  	if (!gate_is_sw_managed(gate))
427  		return true;
428  
429  	/* Otherwise wait for the gate to be in desired state */
430  	return __ccu_wait_bit(ccu, gate->offset, gate->status_bit, enabled);
431  }
432  
433  /*
434   * Initialize a gate.  Our desired state (hardware/software select,
435   * and if software, its enable state) is committed to hardware
436   * without the usual checks to see if it's already set up that way.
437   * Returns true if successful, false otherwise.
438   */
gate_init(struct ccu_data * ccu,struct bcm_clk_gate * gate)439  static bool gate_init(struct ccu_data *ccu, struct bcm_clk_gate *gate)
440  {
441  	if (!gate_exists(gate))
442  		return true;
443  	return __gate_commit(ccu, gate);
444  }
445  
446  /*
447   * Set a gate to enabled or disabled state.  Does nothing if the
448   * gate is not currently under software control, or if it is already
449   * in the requested state.  Returns true if successful, false
450   * otherwise.  CCU lock must be held.
451   */
452  static bool
__clk_gate(struct ccu_data * ccu,struct bcm_clk_gate * gate,bool enable)453  __clk_gate(struct ccu_data *ccu, struct bcm_clk_gate *gate, bool enable)
454  {
455  	bool ret;
456  
457  	if (!gate_exists(gate) || !gate_is_sw_managed(gate))
458  		return true;	/* Nothing to do */
459  
460  	if (!enable && gate_is_no_disable(gate)) {
461  		pr_warn("%s: invalid gate disable request (ignoring)\n",
462  			__func__);
463  		return true;
464  	}
465  
466  	if (enable == gate_is_enabled(gate))
467  		return true;	/* No change */
468  
469  	gate_flip_enabled(gate);
470  	ret = __gate_commit(ccu, gate);
471  	if (!ret)
472  		gate_flip_enabled(gate);	/* Revert the change */
473  
474  	return ret;
475  }
476  
477  /* Enable or disable a gate.  Returns 0 if successful, -EIO otherwise */
clk_gate(struct ccu_data * ccu,const char * name,struct bcm_clk_gate * gate,bool enable)478  static int clk_gate(struct ccu_data *ccu, const char *name,
479  			struct bcm_clk_gate *gate, bool enable)
480  {
481  	unsigned long flags;
482  	bool success;
483  
484  	/*
485  	 * Avoid taking the lock if we can.  We quietly ignore
486  	 * requests to change state that don't make sense.
487  	 */
488  	if (!gate_exists(gate) || !gate_is_sw_managed(gate))
489  		return 0;
490  	if (!enable && gate_is_no_disable(gate))
491  		return 0;
492  
493  	flags = ccu_lock(ccu);
494  	__ccu_write_enable(ccu);
495  
496  	success = __clk_gate(ccu, gate, enable);
497  
498  	__ccu_write_disable(ccu);
499  	ccu_unlock(ccu, flags);
500  
501  	if (success)
502  		return 0;
503  
504  	pr_err("%s: failed to %s gate for %s\n", __func__,
505  		enable ? "enable" : "disable", name);
506  
507  	return -EIO;
508  }
509  
510  /* Hysteresis operations */
511  
512  /*
513   * If a clock gate requires a turn-off delay it will have
514   * "hysteresis" register bits defined.  The first, if set, enables
515   * the delay; and if enabled, the second bit determines whether the
516   * delay is "low" or "high" (1 means high).  For now, if it's
517   * defined for a clock, we set it.
518   */
hyst_init(struct ccu_data * ccu,struct bcm_clk_hyst * hyst)519  static bool hyst_init(struct ccu_data *ccu, struct bcm_clk_hyst *hyst)
520  {
521  	u32 offset;
522  	u32 reg_val;
523  	u32 mask;
524  
525  	if (!hyst_exists(hyst))
526  		return true;
527  
528  	offset = hyst->offset;
529  	mask = (u32)1 << hyst->en_bit;
530  	mask |= (u32)1 << hyst->val_bit;
531  
532  	reg_val = __ccu_read(ccu, offset);
533  	reg_val |= mask;
534  	__ccu_write(ccu, offset, reg_val);
535  
536  	return true;
537  }
538  
539  /* Trigger operations */
540  
541  /*
542   * Caller must ensure CCU lock is held and access is enabled.
543   * Returns true if successful, false otherwise.
544   */
__clk_trigger(struct ccu_data * ccu,struct bcm_clk_trig * trig)545  static bool __clk_trigger(struct ccu_data *ccu, struct bcm_clk_trig *trig)
546  {
547  	/* Trigger the clock and wait for it to finish */
548  	__ccu_write(ccu, trig->offset, 1 << trig->bit);
549  
550  	return __ccu_wait_bit(ccu, trig->offset, trig->bit, false);
551  }
552  
553  /* Divider operations */
554  
555  /* Read a divider value and return the scaled divisor it represents. */
divider_read_scaled(struct ccu_data * ccu,struct bcm_clk_div * div)556  static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div)
557  {
558  	unsigned long flags;
559  	u32 reg_val;
560  	u32 reg_div;
561  
562  	if (divider_is_fixed(div))
563  		return (u64)div->u.fixed;
564  
565  	flags = ccu_lock(ccu);
566  	reg_val = __ccu_read(ccu, div->u.s.offset);
567  	ccu_unlock(ccu, flags);
568  
569  	/* Extract the full divider field from the register value */
570  	reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width);
571  
572  	/* Return the scaled divisor value it represents */
573  	return scaled_div_value(div, reg_div);
574  }
575  
576  /*
577   * Convert a divider's scaled divisor value into its recorded form
578   * and commit it into the hardware divider register.
579   *
580   * Returns 0 on success.  Returns -EINVAL for invalid arguments.
581   * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
582   */
__div_commit(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_div * div,struct bcm_clk_trig * trig)583  static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
584  			struct bcm_clk_div *div, struct bcm_clk_trig *trig)
585  {
586  	bool enabled;
587  	u32 reg_div;
588  	u32 reg_val;
589  	int ret = 0;
590  
591  	BUG_ON(divider_is_fixed(div));
592  
593  	/*
594  	 * If we're just initializing the divider, and no initial
595  	 * state was defined in the device tree, we just find out
596  	 * what its current value is rather than updating it.
597  	 */
598  	if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) {
599  		reg_val = __ccu_read(ccu, div->u.s.offset);
600  		reg_div = bitfield_extract(reg_val, div->u.s.shift,
601  						div->u.s.width);
602  		div->u.s.scaled_div = scaled_div_value(div, reg_div);
603  
604  		return 0;
605  	}
606  
607  	/* Convert the scaled divisor to the value we need to record */
608  	reg_div = divider(div, div->u.s.scaled_div);
609  
610  	/* Clock needs to be enabled before changing the rate */
611  	enabled = __is_clk_gate_enabled(ccu, gate);
612  	if (!enabled && !__clk_gate(ccu, gate, true)) {
613  		ret = -ENXIO;
614  		goto out;
615  	}
616  
617  	/* Replace the divider value and record the result */
618  	reg_val = __ccu_read(ccu, div->u.s.offset);
619  	reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width,
620  					reg_div);
621  	__ccu_write(ccu, div->u.s.offset, reg_val);
622  
623  	/* If the trigger fails we still want to disable the gate */
624  	if (!__clk_trigger(ccu, trig))
625  		ret = -EIO;
626  
627  	/* Disable the clock again if it was disabled to begin with */
628  	if (!enabled && !__clk_gate(ccu, gate, false))
629  		ret = ret ? ret : -ENXIO;	/* return first error */
630  out:
631  	return ret;
632  }
633  
634  /*
635   * Initialize a divider by committing our desired state to hardware
636   * without the usual checks to see if it's already set up that way.
637   * Returns true if successful, false otherwise.
638   */
div_init(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_div * div,struct bcm_clk_trig * trig)639  static bool div_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
640  			struct bcm_clk_div *div, struct bcm_clk_trig *trig)
641  {
642  	if (!divider_exists(div) || divider_is_fixed(div))
643  		return true;
644  	return !__div_commit(ccu, gate, div, trig);
645  }
646  
divider_write(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_div * div,struct bcm_clk_trig * trig,u64 scaled_div)647  static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
648  			struct bcm_clk_div *div, struct bcm_clk_trig *trig,
649  			u64 scaled_div)
650  {
651  	unsigned long flags;
652  	u64 previous;
653  	int ret;
654  
655  	BUG_ON(divider_is_fixed(div));
656  
657  	previous = div->u.s.scaled_div;
658  	if (previous == scaled_div)
659  		return 0;	/* No change */
660  
661  	div->u.s.scaled_div = scaled_div;
662  
663  	flags = ccu_lock(ccu);
664  	__ccu_write_enable(ccu);
665  
666  	ret = __div_commit(ccu, gate, div, trig);
667  
668  	__ccu_write_disable(ccu);
669  	ccu_unlock(ccu, flags);
670  
671  	if (ret)
672  		div->u.s.scaled_div = previous;		/* Revert the change */
673  
674  	return ret;
675  
676  }
677  
678  /* Common clock rate helpers */
679  
680  /*
681   * Implement the common clock framework recalc_rate method, taking
682   * into account a divider and an optional pre-divider.  The
683   * pre-divider register pointer may be NULL.
684   */
clk_recalc_rate(struct ccu_data * ccu,struct bcm_clk_div * div,struct bcm_clk_div * pre_div,unsigned long parent_rate)685  static unsigned long clk_recalc_rate(struct ccu_data *ccu,
686  			struct bcm_clk_div *div, struct bcm_clk_div *pre_div,
687  			unsigned long parent_rate)
688  {
689  	u64 scaled_parent_rate;
690  	u64 scaled_div;
691  	u64 result;
692  
693  	if (!divider_exists(div))
694  		return parent_rate;
695  
696  	if (parent_rate > (unsigned long)LONG_MAX)
697  		return 0;	/* actually this would be a caller bug */
698  
699  	/*
700  	 * If there is a pre-divider, divide the scaled parent rate
701  	 * by the pre-divider value first.  In this case--to improve
702  	 * accuracy--scale the parent rate by *both* the pre-divider
703  	 * value and the divider before actually computing the
704  	 * result of the pre-divider.
705  	 *
706  	 * If there's only one divider, just scale the parent rate.
707  	 */
708  	if (pre_div && divider_exists(pre_div)) {
709  		u64 scaled_rate;
710  
711  		scaled_rate = scale_rate(pre_div, parent_rate);
712  		scaled_rate = scale_rate(div, scaled_rate);
713  		scaled_div = divider_read_scaled(ccu, pre_div);
714  		scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
715  							scaled_div);
716  	} else  {
717  		scaled_parent_rate = scale_rate(div, parent_rate);
718  	}
719  
720  	/*
721  	 * Get the scaled divisor value, and divide the scaled
722  	 * parent rate by that to determine this clock's resulting
723  	 * rate.
724  	 */
725  	scaled_div = divider_read_scaled(ccu, div);
726  	result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, scaled_div);
727  
728  	return (unsigned long)result;
729  }
730  
731  /*
732   * Compute the output rate produced when a given parent rate is fed
733   * into two dividers.  The pre-divider can be NULL, and even if it's
734   * non-null it may be nonexistent.  It's also OK for the divider to
735   * be nonexistent, and in that case the pre-divider is also ignored.
736   *
737   * If scaled_div is non-null, it is used to return the scaled divisor
738   * value used by the (downstream) divider to produce that rate.
739   */
round_rate(struct ccu_data * ccu,struct bcm_clk_div * div,struct bcm_clk_div * pre_div,unsigned long rate,unsigned long parent_rate,u64 * scaled_div)740  static long round_rate(struct ccu_data *ccu, struct bcm_clk_div *div,
741  				struct bcm_clk_div *pre_div,
742  				unsigned long rate, unsigned long parent_rate,
743  				u64 *scaled_div)
744  {
745  	u64 scaled_parent_rate;
746  	u64 min_scaled_div;
747  	u64 max_scaled_div;
748  	u64 best_scaled_div;
749  	u64 result;
750  
751  	BUG_ON(!divider_exists(div));
752  	BUG_ON(!rate);
753  	BUG_ON(parent_rate > (u64)LONG_MAX);
754  
755  	/*
756  	 * If there is a pre-divider, divide the scaled parent rate
757  	 * by the pre-divider value first.  In this case--to improve
758  	 * accuracy--scale the parent rate by *both* the pre-divider
759  	 * value and the divider before actually computing the
760  	 * result of the pre-divider.
761  	 *
762  	 * If there's only one divider, just scale the parent rate.
763  	 *
764  	 * For simplicity we treat the pre-divider as fixed (for now).
765  	 */
766  	if (divider_exists(pre_div)) {
767  		u64 scaled_rate;
768  		u64 scaled_pre_div;
769  
770  		scaled_rate = scale_rate(pre_div, parent_rate);
771  		scaled_rate = scale_rate(div, scaled_rate);
772  		scaled_pre_div = divider_read_scaled(ccu, pre_div);
773  		scaled_parent_rate = DIV_ROUND_CLOSEST_ULL(scaled_rate,
774  							scaled_pre_div);
775  	} else {
776  		scaled_parent_rate = scale_rate(div, parent_rate);
777  	}
778  
779  	/*
780  	 * Compute the best possible divider and ensure it is in
781  	 * range.  A fixed divider can't be changed, so just report
782  	 * the best we can do.
783  	 */
784  	if (!divider_is_fixed(div)) {
785  		best_scaled_div = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate,
786  							rate);
787  		min_scaled_div = scaled_div_min(div);
788  		max_scaled_div = scaled_div_max(div);
789  		if (best_scaled_div > max_scaled_div)
790  			best_scaled_div = max_scaled_div;
791  		else if (best_scaled_div < min_scaled_div)
792  			best_scaled_div = min_scaled_div;
793  	} else {
794  		best_scaled_div = divider_read_scaled(ccu, div);
795  	}
796  
797  	/* OK, figure out the resulting rate */
798  	result = DIV_ROUND_CLOSEST_ULL(scaled_parent_rate, best_scaled_div);
799  
800  	if (scaled_div)
801  		*scaled_div = best_scaled_div;
802  
803  	return (long)result;
804  }
805  
806  /* Common clock parent helpers */
807  
808  /*
809   * For a given parent selector (register field) value, find the
810   * index into a selector's parent_sel array that contains it.
811   * Returns the index, or BAD_CLK_INDEX if it's not found.
812   */
parent_index(struct bcm_clk_sel * sel,u8 parent_sel)813  static u8 parent_index(struct bcm_clk_sel *sel, u8 parent_sel)
814  {
815  	u8 i;
816  
817  	BUG_ON(sel->parent_count > (u32)U8_MAX);
818  	for (i = 0; i < sel->parent_count; i++)
819  		if (sel->parent_sel[i] == parent_sel)
820  			return i;
821  	return BAD_CLK_INDEX;
822  }
823  
824  /*
825   * Fetch the current value of the selector, and translate that into
826   * its corresponding index in the parent array we registered with
827   * the clock framework.
828   *
829   * Returns parent array index that corresponds with the value found,
830   * or BAD_CLK_INDEX if the found value is out of range.
831   */
selector_read_index(struct ccu_data * ccu,struct bcm_clk_sel * sel)832  static u8 selector_read_index(struct ccu_data *ccu, struct bcm_clk_sel *sel)
833  {
834  	unsigned long flags;
835  	u32 reg_val;
836  	u32 parent_sel;
837  	u8 index;
838  
839  	/* If there's no selector, there's only one parent */
840  	if (!selector_exists(sel))
841  		return 0;
842  
843  	/* Get the value in the selector register */
844  	flags = ccu_lock(ccu);
845  	reg_val = __ccu_read(ccu, sel->offset);
846  	ccu_unlock(ccu, flags);
847  
848  	parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
849  
850  	/* Look up that selector's parent array index and return it */
851  	index = parent_index(sel, parent_sel);
852  	if (index == BAD_CLK_INDEX)
853  		pr_err("%s: out-of-range parent selector %u (%s 0x%04x)\n",
854  			__func__, parent_sel, ccu->name, sel->offset);
855  
856  	return index;
857  }
858  
859  /*
860   * Commit our desired selector value to the hardware.
861   *
862   * Returns 0 on success.  Returns -EINVAL for invalid arguments.
863   * Returns -ENXIO if gating failed, and -EIO if a trigger failed.
864   */
865  static int
__sel_commit(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_sel * sel,struct bcm_clk_trig * trig)866  __sel_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate,
867  			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
868  {
869  	u32 parent_sel;
870  	u32 reg_val;
871  	bool enabled;
872  	int ret = 0;
873  
874  	BUG_ON(!selector_exists(sel));
875  
876  	/*
877  	 * If we're just initializing the selector, and no initial
878  	 * state was defined in the device tree, we just find out
879  	 * what its current value is rather than updating it.
880  	 */
881  	if (sel->clk_index == BAD_CLK_INDEX) {
882  		u8 index;
883  
884  		reg_val = __ccu_read(ccu, sel->offset);
885  		parent_sel = bitfield_extract(reg_val, sel->shift, sel->width);
886  		index = parent_index(sel, parent_sel);
887  		if (index == BAD_CLK_INDEX)
888  			return -EINVAL;
889  		sel->clk_index = index;
890  
891  		return 0;
892  	}
893  
894  	BUG_ON((u32)sel->clk_index >= sel->parent_count);
895  	parent_sel = sel->parent_sel[sel->clk_index];
896  
897  	/* Clock needs to be enabled before changing the parent */
898  	enabled = __is_clk_gate_enabled(ccu, gate);
899  	if (!enabled && !__clk_gate(ccu, gate, true))
900  		return -ENXIO;
901  
902  	/* Replace the selector value and record the result */
903  	reg_val = __ccu_read(ccu, sel->offset);
904  	reg_val = bitfield_replace(reg_val, sel->shift, sel->width, parent_sel);
905  	__ccu_write(ccu, sel->offset, reg_val);
906  
907  	/* If the trigger fails we still want to disable the gate */
908  	if (!__clk_trigger(ccu, trig))
909  		ret = -EIO;
910  
911  	/* Disable the clock again if it was disabled to begin with */
912  	if (!enabled && !__clk_gate(ccu, gate, false))
913  		ret = ret ? ret : -ENXIO;	/* return first error */
914  
915  	return ret;
916  }
917  
918  /*
919   * Initialize a selector by committing our desired state to hardware
920   * without the usual checks to see if it's already set up that way.
921   * Returns true if successful, false otherwise.
922   */
sel_init(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_sel * sel,struct bcm_clk_trig * trig)923  static bool sel_init(struct ccu_data *ccu, struct bcm_clk_gate *gate,
924  			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig)
925  {
926  	if (!selector_exists(sel))
927  		return true;
928  	return !__sel_commit(ccu, gate, sel, trig);
929  }
930  
931  /*
932   * Write a new value into a selector register to switch to a
933   * different parent clock.  Returns 0 on success, or an error code
934   * (from __sel_commit()) otherwise.
935   */
selector_write(struct ccu_data * ccu,struct bcm_clk_gate * gate,struct bcm_clk_sel * sel,struct bcm_clk_trig * trig,u8 index)936  static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate,
937  			struct bcm_clk_sel *sel, struct bcm_clk_trig *trig,
938  			u8 index)
939  {
940  	unsigned long flags;
941  	u8 previous;
942  	int ret;
943  
944  	previous = sel->clk_index;
945  	if (previous == index)
946  		return 0;	/* No change */
947  
948  	sel->clk_index = index;
949  
950  	flags = ccu_lock(ccu);
951  	__ccu_write_enable(ccu);
952  
953  	ret = __sel_commit(ccu, gate, sel, trig);
954  
955  	__ccu_write_disable(ccu);
956  	ccu_unlock(ccu, flags);
957  
958  	if (ret)
959  		sel->clk_index = previous;	/* Revert the change */
960  
961  	return ret;
962  }
963  
964  /* Clock operations */
965  
kona_peri_clk_enable(struct clk_hw * hw)966  static int kona_peri_clk_enable(struct clk_hw *hw)
967  {
968  	struct kona_clk *bcm_clk = to_kona_clk(hw);
969  	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
970  
971  	return clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, true);
972  }
973  
kona_peri_clk_disable(struct clk_hw * hw)974  static void kona_peri_clk_disable(struct clk_hw *hw)
975  {
976  	struct kona_clk *bcm_clk = to_kona_clk(hw);
977  	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
978  
979  	(void)clk_gate(bcm_clk->ccu, bcm_clk->init_data.name, gate, false);
980  }
981  
kona_peri_clk_is_enabled(struct clk_hw * hw)982  static int kona_peri_clk_is_enabled(struct clk_hw *hw)
983  {
984  	struct kona_clk *bcm_clk = to_kona_clk(hw);
985  	struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate;
986  
987  	return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0;
988  }
989  
kona_peri_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)990  static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw,
991  			unsigned long parent_rate)
992  {
993  	struct kona_clk *bcm_clk = to_kona_clk(hw);
994  	struct peri_clk_data *data = bcm_clk->u.peri;
995  
996  	return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div,
997  				parent_rate);
998  }
999  
kona_peri_clk_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)1000  static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1001  			unsigned long *parent_rate)
1002  {
1003  	struct kona_clk *bcm_clk = to_kona_clk(hw);
1004  	struct bcm_clk_div *div = &bcm_clk->u.peri->div;
1005  
1006  	if (!divider_exists(div))
1007  		return clk_hw_get_rate(hw);
1008  
1009  	/* Quietly avoid a zero rate */
1010  	return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div,
1011  				rate ? rate : 1, *parent_rate, NULL);
1012  }
1013  
kona_peri_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1014  static int kona_peri_clk_determine_rate(struct clk_hw *hw,
1015  					struct clk_rate_request *req)
1016  {
1017  	struct kona_clk *bcm_clk = to_kona_clk(hw);
1018  	struct clk_hw *current_parent;
1019  	unsigned long parent_rate;
1020  	unsigned long best_delta;
1021  	unsigned long best_rate;
1022  	u32 parent_count;
1023  	long rate;
1024  	u32 which;
1025  
1026  	/*
1027  	 * If there is no other parent to choose, use the current one.
1028  	 * Note:  We don't honor (or use) CLK_SET_RATE_NO_REPARENT.
1029  	 */
1030  	WARN_ON_ONCE(bcm_clk->init_data.flags & CLK_SET_RATE_NO_REPARENT);
1031  	parent_count = (u32)bcm_clk->init_data.num_parents;
1032  	if (parent_count < 2) {
1033  		rate = kona_peri_clk_round_rate(hw, req->rate,
1034  						&req->best_parent_rate);
1035  		if (rate < 0)
1036  			return rate;
1037  
1038  		req->rate = rate;
1039  		return 0;
1040  	}
1041  
1042  	/* Unless we can do better, stick with current parent */
1043  	current_parent = clk_hw_get_parent(hw);
1044  	parent_rate = clk_hw_get_rate(current_parent);
1045  	best_rate = kona_peri_clk_round_rate(hw, req->rate, &parent_rate);
1046  	best_delta = abs(best_rate - req->rate);
1047  
1048  	/* Check whether any other parent clock can produce a better result */
1049  	for (which = 0; which < parent_count; which++) {
1050  		struct clk_hw *parent = clk_hw_get_parent_by_index(hw, which);
1051  		unsigned long delta;
1052  		unsigned long other_rate;
1053  
1054  		BUG_ON(!parent);
1055  		if (parent == current_parent)
1056  			continue;
1057  
1058  		/* We don't support CLK_SET_RATE_PARENT */
1059  		parent_rate = clk_hw_get_rate(parent);
1060  		other_rate = kona_peri_clk_round_rate(hw, req->rate,
1061  						      &parent_rate);
1062  		delta = abs(other_rate - req->rate);
1063  		if (delta < best_delta) {
1064  			best_delta = delta;
1065  			best_rate = other_rate;
1066  			req->best_parent_hw = parent;
1067  			req->best_parent_rate = parent_rate;
1068  		}
1069  	}
1070  
1071  	req->rate = best_rate;
1072  	return 0;
1073  }
1074  
kona_peri_clk_set_parent(struct clk_hw * hw,u8 index)1075  static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index)
1076  {
1077  	struct kona_clk *bcm_clk = to_kona_clk(hw);
1078  	struct peri_clk_data *data = bcm_clk->u.peri;
1079  	struct bcm_clk_sel *sel = &data->sel;
1080  	struct bcm_clk_trig *trig;
1081  	int ret;
1082  
1083  	BUG_ON(index >= sel->parent_count);
1084  
1085  	/* If there's only one parent we don't require a selector */
1086  	if (!selector_exists(sel))
1087  		return 0;
1088  
1089  	/*
1090  	 * The regular trigger is used by default, but if there's a
1091  	 * pre-trigger we want to use that instead.
1092  	 */
1093  	trig = trigger_exists(&data->pre_trig) ? &data->pre_trig
1094  					       : &data->trig;
1095  
1096  	ret = selector_write(bcm_clk->ccu, &data->gate, sel, trig, index);
1097  	if (ret == -ENXIO) {
1098  		pr_err("%s: gating failure for %s\n", __func__,
1099  			bcm_clk->init_data.name);
1100  		ret = -EIO;	/* Don't proliferate weird errors */
1101  	} else if (ret == -EIO) {
1102  		pr_err("%s: %strigger failed for %s\n", __func__,
1103  			trig == &data->pre_trig ? "pre-" : "",
1104  			bcm_clk->init_data.name);
1105  	}
1106  
1107  	return ret;
1108  }
1109  
kona_peri_clk_get_parent(struct clk_hw * hw)1110  static u8 kona_peri_clk_get_parent(struct clk_hw *hw)
1111  {
1112  	struct kona_clk *bcm_clk = to_kona_clk(hw);
1113  	struct peri_clk_data *data = bcm_clk->u.peri;
1114  	u8 index;
1115  
1116  	index = selector_read_index(bcm_clk->ccu, &data->sel);
1117  
1118  	/* Not all callers would handle an out-of-range value gracefully */
1119  	return index == BAD_CLK_INDEX ? 0 : index;
1120  }
1121  
kona_peri_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1122  static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1123  			unsigned long parent_rate)
1124  {
1125  	struct kona_clk *bcm_clk = to_kona_clk(hw);
1126  	struct peri_clk_data *data = bcm_clk->u.peri;
1127  	struct bcm_clk_div *div = &data->div;
1128  	u64 scaled_div = 0;
1129  	int ret;
1130  
1131  	if (parent_rate > (unsigned long)LONG_MAX)
1132  		return -EINVAL;
1133  
1134  	if (rate == clk_hw_get_rate(hw))
1135  		return 0;
1136  
1137  	if (!divider_exists(div))
1138  		return rate == parent_rate ? 0 : -EINVAL;
1139  
1140  	/*
1141  	 * A fixed divider can't be changed.  (Nor can a fixed
1142  	 * pre-divider be, but for now we never actually try to
1143  	 * change that.)  Tolerate a request for a no-op change.
1144  	 */
1145  	if (divider_is_fixed(&data->div))
1146  		return rate == parent_rate ? 0 : -EINVAL;
1147  
1148  	/*
1149  	 * Get the scaled divisor value needed to achieve a clock
1150  	 * rate as close as possible to what was requested, given
1151  	 * the parent clock rate supplied.
1152  	 */
1153  	(void)round_rate(bcm_clk->ccu, div, &data->pre_div,
1154  				rate ? rate : 1, parent_rate, &scaled_div);
1155  
1156  	/*
1157  	 * We aren't updating any pre-divider at this point, so
1158  	 * we'll use the regular trigger.
1159  	 */
1160  	ret = divider_write(bcm_clk->ccu, &data->gate, &data->div,
1161  				&data->trig, scaled_div);
1162  	if (ret == -ENXIO) {
1163  		pr_err("%s: gating failure for %s\n", __func__,
1164  			bcm_clk->init_data.name);
1165  		ret = -EIO;	/* Don't proliferate weird errors */
1166  	} else if (ret == -EIO) {
1167  		pr_err("%s: trigger failed for %s\n", __func__,
1168  			bcm_clk->init_data.name);
1169  	}
1170  
1171  	return ret;
1172  }
1173  
1174  struct clk_ops kona_peri_clk_ops = {
1175  	.enable = kona_peri_clk_enable,
1176  	.disable = kona_peri_clk_disable,
1177  	.is_enabled = kona_peri_clk_is_enabled,
1178  	.recalc_rate = kona_peri_clk_recalc_rate,
1179  	.determine_rate = kona_peri_clk_determine_rate,
1180  	.set_parent = kona_peri_clk_set_parent,
1181  	.get_parent = kona_peri_clk_get_parent,
1182  	.set_rate = kona_peri_clk_set_rate,
1183  };
1184  
1185  /* Put a peripheral clock into its initial state */
__peri_clk_init(struct kona_clk * bcm_clk)1186  static bool __peri_clk_init(struct kona_clk *bcm_clk)
1187  {
1188  	struct ccu_data *ccu = bcm_clk->ccu;
1189  	struct peri_clk_data *peri = bcm_clk->u.peri;
1190  	const char *name = bcm_clk->init_data.name;
1191  	struct bcm_clk_trig *trig;
1192  
1193  	BUG_ON(bcm_clk->type != bcm_clk_peri);
1194  
1195  	if (!policy_init(ccu, &peri->policy)) {
1196  		pr_err("%s: error initializing policy for %s\n",
1197  			__func__, name);
1198  		return false;
1199  	}
1200  	if (!gate_init(ccu, &peri->gate)) {
1201  		pr_err("%s: error initializing gate for %s\n", __func__, name);
1202  		return false;
1203  	}
1204  	if (!hyst_init(ccu, &peri->hyst)) {
1205  		pr_err("%s: error initializing hyst for %s\n", __func__, name);
1206  		return false;
1207  	}
1208  	if (!div_init(ccu, &peri->gate, &peri->div, &peri->trig)) {
1209  		pr_err("%s: error initializing divider for %s\n", __func__,
1210  			name);
1211  		return false;
1212  	}
1213  
1214  	/*
1215  	 * For the pre-divider and selector, the pre-trigger is used
1216  	 * if it's present, otherwise we just use the regular trigger.
1217  	 */
1218  	trig = trigger_exists(&peri->pre_trig) ? &peri->pre_trig
1219  					       : &peri->trig;
1220  
1221  	if (!div_init(ccu, &peri->gate, &peri->pre_div, trig)) {
1222  		pr_err("%s: error initializing pre-divider for %s\n", __func__,
1223  			name);
1224  		return false;
1225  	}
1226  
1227  	if (!sel_init(ccu, &peri->gate, &peri->sel, trig)) {
1228  		pr_err("%s: error initializing selector for %s\n", __func__,
1229  			name);
1230  		return false;
1231  	}
1232  
1233  	return true;
1234  }
1235  
__kona_clk_init(struct kona_clk * bcm_clk)1236  static bool __kona_clk_init(struct kona_clk *bcm_clk)
1237  {
1238  	switch (bcm_clk->type) {
1239  	case bcm_clk_peri:
1240  		return __peri_clk_init(bcm_clk);
1241  	default:
1242  		BUG();
1243  	}
1244  	return false;
1245  }
1246  
1247  /* Set a CCU and all its clocks into their desired initial state */
kona_ccu_init(struct ccu_data * ccu)1248  bool __init kona_ccu_init(struct ccu_data *ccu)
1249  {
1250  	unsigned long flags;
1251  	unsigned int which;
1252  	struct kona_clk *kona_clks = ccu->kona_clks;
1253  	bool success = true;
1254  
1255  	flags = ccu_lock(ccu);
1256  	__ccu_write_enable(ccu);
1257  
1258  	for (which = 0; which < ccu->clk_num; which++) {
1259  		struct kona_clk *bcm_clk = &kona_clks[which];
1260  
1261  		if (!bcm_clk->ccu)
1262  			continue;
1263  
1264  		success &= __kona_clk_init(bcm_clk);
1265  	}
1266  
1267  	__ccu_write_disable(ccu);
1268  	ccu_unlock(ccu, flags);
1269  	return success;
1270  }
1271