1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * StarFive JH71X0 Clock Generator Driver
4  *
5  * Copyright (C) 2021-2022 Emil Renner Berthing <kernel@esmil.dk>
6  */
7 
8 #include <linux/clk-provider.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/io.h>
12 
13 #include "clk-starfive-jh71x0.h"
14 
jh71x0_clk_from(struct clk_hw * hw)15 static struct jh71x0_clk *jh71x0_clk_from(struct clk_hw *hw)
16 {
17 	return container_of(hw, struct jh71x0_clk, hw);
18 }
19 
jh71x0_priv_from(struct jh71x0_clk * clk)20 static struct jh71x0_clk_priv *jh71x0_priv_from(struct jh71x0_clk *clk)
21 {
22 	return container_of(clk, struct jh71x0_clk_priv, reg[clk->idx]);
23 }
24 
jh71x0_clk_reg_get(struct jh71x0_clk * clk)25 static u32 jh71x0_clk_reg_get(struct jh71x0_clk *clk)
26 {
27 	struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
28 	void __iomem *reg = priv->base + 4 * clk->idx;
29 
30 	return readl_relaxed(reg);
31 }
32 
jh71x0_clk_reg_rmw(struct jh71x0_clk * clk,u32 mask,u32 value)33 static void jh71x0_clk_reg_rmw(struct jh71x0_clk *clk, u32 mask, u32 value)
34 {
35 	struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
36 	void __iomem *reg = priv->base + 4 * clk->idx;
37 	unsigned long flags;
38 
39 	spin_lock_irqsave(&priv->rmw_lock, flags);
40 	value |= readl_relaxed(reg) & ~mask;
41 	writel_relaxed(value, reg);
42 	spin_unlock_irqrestore(&priv->rmw_lock, flags);
43 }
44 
jh71x0_clk_enable(struct clk_hw * hw)45 static int jh71x0_clk_enable(struct clk_hw *hw)
46 {
47 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
48 
49 	jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, JH71X0_CLK_ENABLE);
50 	return 0;
51 }
52 
jh71x0_clk_disable(struct clk_hw * hw)53 static void jh71x0_clk_disable(struct clk_hw *hw)
54 {
55 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
56 
57 	jh71x0_clk_reg_rmw(clk, JH71X0_CLK_ENABLE, 0);
58 }
59 
jh71x0_clk_is_enabled(struct clk_hw * hw)60 static int jh71x0_clk_is_enabled(struct clk_hw *hw)
61 {
62 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
63 
64 	return !!(jh71x0_clk_reg_get(clk) & JH71X0_CLK_ENABLE);
65 }
66 
jh71x0_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)67 static unsigned long jh71x0_clk_recalc_rate(struct clk_hw *hw,
68 					    unsigned long parent_rate)
69 {
70 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
71 	u32 div = jh71x0_clk_reg_get(clk) & JH71X0_CLK_DIV_MASK;
72 
73 	return div ? parent_rate / div : 0;
74 }
75 
jh71x0_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)76 static int jh71x0_clk_determine_rate(struct clk_hw *hw,
77 				     struct clk_rate_request *req)
78 {
79 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
80 	unsigned long parent = req->best_parent_rate;
81 	unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
82 	unsigned long div = min_t(unsigned long, DIV_ROUND_UP(parent, rate), clk->max_div);
83 	unsigned long result = parent / div;
84 
85 	/*
86 	 * we want the result clamped by min_rate and max_rate if possible:
87 	 * case 1: div hits the max divider value, which means it's less than
88 	 * parent / rate, so the result is greater than rate and min_rate in
89 	 * particular. we can't do anything about result > max_rate because the
90 	 * divider doesn't go any further.
91 	 * case 2: div = DIV_ROUND_UP(parent, rate) which means the result is
92 	 * always lower or equal to rate and max_rate. however the result may
93 	 * turn out lower than min_rate, but then the next higher rate is fine:
94 	 *   div - 1 = ceil(parent / rate) - 1 < parent / rate
95 	 * and thus
96 	 *   min_rate <= rate < parent / (div - 1)
97 	 */
98 	if (result < req->min_rate && div > 1)
99 		result = parent / (div - 1);
100 
101 	req->rate = result;
102 	return 0;
103 }
104 
jh71x0_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)105 static int jh71x0_clk_set_rate(struct clk_hw *hw,
106 			       unsigned long rate,
107 			       unsigned long parent_rate)
108 {
109 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
110 	unsigned long div = clamp(DIV_ROUND_CLOSEST(parent_rate, rate),
111 				  1UL, (unsigned long)clk->max_div);
112 
113 	jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, div);
114 	return 0;
115 }
116 
jh71x0_clk_frac_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)117 static unsigned long jh71x0_clk_frac_recalc_rate(struct clk_hw *hw,
118 						 unsigned long parent_rate)
119 {
120 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
121 	u32 reg = jh71x0_clk_reg_get(clk);
122 	unsigned long div100 = 100 * (reg & JH71X0_CLK_INT_MASK) +
123 			       ((reg & JH71X0_CLK_FRAC_MASK) >> JH71X0_CLK_FRAC_SHIFT);
124 
125 	return (div100 >= JH71X0_CLK_FRAC_MIN) ? 100 * parent_rate / div100 : 0;
126 }
127 
jh71x0_clk_frac_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)128 static int jh71x0_clk_frac_determine_rate(struct clk_hw *hw,
129 					  struct clk_rate_request *req)
130 {
131 	unsigned long parent100 = 100 * req->best_parent_rate;
132 	unsigned long rate = clamp(req->rate, req->min_rate, req->max_rate);
133 	unsigned long div100 = clamp(DIV_ROUND_CLOSEST(parent100, rate),
134 				     JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
135 	unsigned long result = parent100 / div100;
136 
137 	/* clamp the result as in jh71x0_clk_determine_rate() above */
138 	if (result > req->max_rate && div100 < JH71X0_CLK_FRAC_MAX)
139 		result = parent100 / (div100 + 1);
140 	if (result < req->min_rate && div100 > JH71X0_CLK_FRAC_MIN)
141 		result = parent100 / (div100 - 1);
142 
143 	req->rate = result;
144 	return 0;
145 }
146 
jh71x0_clk_frac_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)147 static int jh71x0_clk_frac_set_rate(struct clk_hw *hw,
148 				    unsigned long rate,
149 				    unsigned long parent_rate)
150 {
151 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
152 	unsigned long div100 = clamp(DIV_ROUND_CLOSEST(100 * parent_rate, rate),
153 				     JH71X0_CLK_FRAC_MIN, JH71X0_CLK_FRAC_MAX);
154 	u32 value = ((div100 % 100) << JH71X0_CLK_FRAC_SHIFT) | (div100 / 100);
155 
156 	jh71x0_clk_reg_rmw(clk, JH71X0_CLK_DIV_MASK, value);
157 	return 0;
158 }
159 
jh71x0_clk_get_parent(struct clk_hw * hw)160 static u8 jh71x0_clk_get_parent(struct clk_hw *hw)
161 {
162 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
163 	u32 value = jh71x0_clk_reg_get(clk);
164 
165 	return (value & JH71X0_CLK_MUX_MASK) >> JH71X0_CLK_MUX_SHIFT;
166 }
167 
jh71x0_clk_set_parent(struct clk_hw * hw,u8 index)168 static int jh71x0_clk_set_parent(struct clk_hw *hw, u8 index)
169 {
170 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
171 	u32 value = (u32)index << JH71X0_CLK_MUX_SHIFT;
172 
173 	jh71x0_clk_reg_rmw(clk, JH71X0_CLK_MUX_MASK, value);
174 	return 0;
175 }
176 
jh71x0_clk_get_phase(struct clk_hw * hw)177 static int jh71x0_clk_get_phase(struct clk_hw *hw)
178 {
179 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
180 	u32 value = jh71x0_clk_reg_get(clk);
181 
182 	return (value & JH71X0_CLK_INVERT) ? 180 : 0;
183 }
184 
jh71x0_clk_set_phase(struct clk_hw * hw,int degrees)185 static int jh71x0_clk_set_phase(struct clk_hw *hw, int degrees)
186 {
187 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
188 	u32 value;
189 
190 	if (degrees == 0)
191 		value = 0;
192 	else if (degrees == 180)
193 		value = JH71X0_CLK_INVERT;
194 	else
195 		return -EINVAL;
196 
197 	jh71x0_clk_reg_rmw(clk, JH71X0_CLK_INVERT, value);
198 	return 0;
199 }
200 
201 #ifdef CONFIG_DEBUG_FS
jh71x0_clk_debug_init(struct clk_hw * hw,struct dentry * dentry)202 static void jh71x0_clk_debug_init(struct clk_hw *hw, struct dentry *dentry)
203 {
204 	static const struct debugfs_reg32 jh71x0_clk_reg = {
205 		.name = "CTRL",
206 		.offset = 0,
207 	};
208 	struct jh71x0_clk *clk = jh71x0_clk_from(hw);
209 	struct jh71x0_clk_priv *priv = jh71x0_priv_from(clk);
210 	struct debugfs_regset32 *regset;
211 
212 	regset = devm_kzalloc(priv->dev, sizeof(*regset), GFP_KERNEL);
213 	if (!regset)
214 		return;
215 
216 	regset->regs = &jh71x0_clk_reg;
217 	regset->nregs = 1;
218 	regset->base = priv->base + 4 * clk->idx;
219 
220 	debugfs_create_regset32("registers", 0400, dentry, regset);
221 }
222 #else
223 #define jh71x0_clk_debug_init NULL
224 #endif
225 
226 static const struct clk_ops jh71x0_clk_gate_ops = {
227 	.enable = jh71x0_clk_enable,
228 	.disable = jh71x0_clk_disable,
229 	.is_enabled = jh71x0_clk_is_enabled,
230 	.debug_init = jh71x0_clk_debug_init,
231 };
232 
233 static const struct clk_ops jh71x0_clk_div_ops = {
234 	.recalc_rate = jh71x0_clk_recalc_rate,
235 	.determine_rate = jh71x0_clk_determine_rate,
236 	.set_rate = jh71x0_clk_set_rate,
237 	.debug_init = jh71x0_clk_debug_init,
238 };
239 
240 static const struct clk_ops jh71x0_clk_fdiv_ops = {
241 	.recalc_rate = jh71x0_clk_frac_recalc_rate,
242 	.determine_rate = jh71x0_clk_frac_determine_rate,
243 	.set_rate = jh71x0_clk_frac_set_rate,
244 	.debug_init = jh71x0_clk_debug_init,
245 };
246 
247 static const struct clk_ops jh71x0_clk_gdiv_ops = {
248 	.enable = jh71x0_clk_enable,
249 	.disable = jh71x0_clk_disable,
250 	.is_enabled = jh71x0_clk_is_enabled,
251 	.recalc_rate = jh71x0_clk_recalc_rate,
252 	.determine_rate = jh71x0_clk_determine_rate,
253 	.set_rate = jh71x0_clk_set_rate,
254 	.debug_init = jh71x0_clk_debug_init,
255 };
256 
257 static const struct clk_ops jh71x0_clk_mux_ops = {
258 	.determine_rate = __clk_mux_determine_rate,
259 	.set_parent = jh71x0_clk_set_parent,
260 	.get_parent = jh71x0_clk_get_parent,
261 	.debug_init = jh71x0_clk_debug_init,
262 };
263 
264 static const struct clk_ops jh71x0_clk_gmux_ops = {
265 	.enable = jh71x0_clk_enable,
266 	.disable = jh71x0_clk_disable,
267 	.is_enabled = jh71x0_clk_is_enabled,
268 	.determine_rate = __clk_mux_determine_rate,
269 	.set_parent = jh71x0_clk_set_parent,
270 	.get_parent = jh71x0_clk_get_parent,
271 	.debug_init = jh71x0_clk_debug_init,
272 };
273 
274 static const struct clk_ops jh71x0_clk_mdiv_ops = {
275 	.recalc_rate = jh71x0_clk_recalc_rate,
276 	.determine_rate = jh71x0_clk_determine_rate,
277 	.get_parent = jh71x0_clk_get_parent,
278 	.set_parent = jh71x0_clk_set_parent,
279 	.set_rate = jh71x0_clk_set_rate,
280 	.debug_init = jh71x0_clk_debug_init,
281 };
282 
283 static const struct clk_ops jh71x0_clk_gmd_ops = {
284 	.enable = jh71x0_clk_enable,
285 	.disable = jh71x0_clk_disable,
286 	.is_enabled = jh71x0_clk_is_enabled,
287 	.recalc_rate = jh71x0_clk_recalc_rate,
288 	.determine_rate = jh71x0_clk_determine_rate,
289 	.get_parent = jh71x0_clk_get_parent,
290 	.set_parent = jh71x0_clk_set_parent,
291 	.set_rate = jh71x0_clk_set_rate,
292 	.debug_init = jh71x0_clk_debug_init,
293 };
294 
295 static const struct clk_ops jh71x0_clk_inv_ops = {
296 	.get_phase = jh71x0_clk_get_phase,
297 	.set_phase = jh71x0_clk_set_phase,
298 	.debug_init = jh71x0_clk_debug_init,
299 };
300 
starfive_jh71x0_clk_ops(u32 max)301 const struct clk_ops *starfive_jh71x0_clk_ops(u32 max)
302 {
303 	if (max & JH71X0_CLK_DIV_MASK) {
304 		if (max & JH71X0_CLK_MUX_MASK) {
305 			if (max & JH71X0_CLK_ENABLE)
306 				return &jh71x0_clk_gmd_ops;
307 			return &jh71x0_clk_mdiv_ops;
308 		}
309 		if (max & JH71X0_CLK_ENABLE)
310 			return &jh71x0_clk_gdiv_ops;
311 		if (max == JH71X0_CLK_FRAC_MAX)
312 			return &jh71x0_clk_fdiv_ops;
313 		return &jh71x0_clk_div_ops;
314 	}
315 
316 	if (max & JH71X0_CLK_MUX_MASK) {
317 		if (max & JH71X0_CLK_ENABLE)
318 			return &jh71x0_clk_gmux_ops;
319 		return &jh71x0_clk_mux_ops;
320 	}
321 
322 	if (max & JH71X0_CLK_ENABLE)
323 		return &jh71x0_clk_gate_ops;
324 
325 	return &jh71x0_clk_inv_ops;
326 }
327 EXPORT_SYMBOL_GPL(starfive_jh71x0_clk_ops);
328