xref: /openbmc/linux/drivers/clk/mmp/clk-mix.c (revision dff03381)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mmp mix(div and mux) clock operation source file
4  *
5  * Copyright (C) 2014 Marvell
6  * Chao Xie <chao.xie@marvell.com>
7  */
8 
9 #include <linux/clk-provider.h>
10 #include <linux/slab.h>
11 #include <linux/io.h>
12 #include <linux/err.h>
13 
14 #include "clk.h"
15 
16 /*
17  * The mix clock is a clock combined mux and div type clock.
18  * Because the div field and mux field need to be set at same
19  * time, we can not divide it into 2 types of clock
20  */
21 
22 #define to_clk_mix(hw)	container_of(hw, struct mmp_clk_mix, hw)
23 
24 static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
25 {
26 	unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
27 	unsigned int maxdiv = 0;
28 	struct clk_div_table *clkt;
29 
30 	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
31 		return div_mask;
32 	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
33 		return 1 << div_mask;
34 	if (mix->div_table) {
35 		for (clkt = mix->div_table; clkt->div; clkt++)
36 			if (clkt->div > maxdiv)
37 				maxdiv = clkt->div;
38 		return maxdiv;
39 	}
40 	return div_mask + 1;
41 }
42 
43 static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
44 {
45 	struct clk_div_table *clkt;
46 
47 	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
48 		return val;
49 	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
50 		return 1 << val;
51 	if (mix->div_table) {
52 		for (clkt = mix->div_table; clkt->div; clkt++)
53 			if (clkt->val == val)
54 				return clkt->div;
55 		if (clkt->div == 0)
56 			return 0;
57 	}
58 	return val + 1;
59 }
60 
61 static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
62 {
63 	int num_parents = clk_hw_get_num_parents(&mix->hw);
64 	int i;
65 
66 	if (mix->mux_flags & CLK_MUX_INDEX_BIT)
67 		return ffs(val) - 1;
68 	if (mix->mux_flags & CLK_MUX_INDEX_ONE)
69 		return val - 1;
70 	if (mix->mux_table) {
71 		for (i = 0; i < num_parents; i++)
72 			if (mix->mux_table[i] == val)
73 				return i;
74 		if (i == num_parents)
75 			return 0;
76 	}
77 
78 	return val;
79 }
80 static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
81 {
82 	struct clk_div_table *clkt;
83 
84 	if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
85 		return div;
86 	if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
87 		return __ffs(div);
88 	if (mix->div_table) {
89 		for (clkt = mix->div_table; clkt->div; clkt++)
90 			if (clkt->div == div)
91 				return clkt->val;
92 		if (clkt->div == 0)
93 			return 0;
94 	}
95 
96 	return div - 1;
97 }
98 
99 static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
100 {
101 	if (mix->mux_table)
102 		return mix->mux_table[mux];
103 
104 	return mux;
105 }
106 
107 static void _filter_clk_table(struct mmp_clk_mix *mix,
108 				struct mmp_clk_mix_clk_table *table,
109 				unsigned int table_size)
110 {
111 	int i;
112 	struct mmp_clk_mix_clk_table *item;
113 	struct clk_hw *parent, *hw;
114 	unsigned long parent_rate;
115 
116 	hw = &mix->hw;
117 
118 	for (i = 0; i < table_size; i++) {
119 		item = &table[i];
120 		parent = clk_hw_get_parent_by_index(hw, item->parent_index);
121 		parent_rate = clk_hw_get_rate(parent);
122 		if (parent_rate % item->rate) {
123 			item->valid = 0;
124 		} else {
125 			item->divisor = parent_rate / item->rate;
126 			item->valid = 1;
127 		}
128 	}
129 }
130 
131 static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
132 			unsigned int change_mux, unsigned int change_div)
133 {
134 	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
135 	u8 width, shift;
136 	u32 mux_div, fc_req;
137 	int ret, timeout = 50;
138 	unsigned long flags = 0;
139 
140 	if (!change_mux && !change_div)
141 		return -EINVAL;
142 
143 	if (mix->lock)
144 		spin_lock_irqsave(mix->lock, flags);
145 
146 	if (mix->type == MMP_CLK_MIX_TYPE_V1
147 		|| mix->type == MMP_CLK_MIX_TYPE_V2)
148 		mux_div = readl(ri->reg_clk_ctrl);
149 	else
150 		mux_div = readl(ri->reg_clk_sel);
151 
152 	if (change_div) {
153 		width = ri->width_div;
154 		shift = ri->shift_div;
155 		mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
156 		mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
157 	}
158 
159 	if (change_mux) {
160 		width = ri->width_mux;
161 		shift = ri->shift_mux;
162 		mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
163 		mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
164 	}
165 
166 	if (mix->type == MMP_CLK_MIX_TYPE_V1) {
167 		writel(mux_div, ri->reg_clk_ctrl);
168 	} else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
169 		mux_div |= (1 << ri->bit_fc);
170 		writel(mux_div, ri->reg_clk_ctrl);
171 
172 		do {
173 			fc_req = readl(ri->reg_clk_ctrl);
174 			timeout--;
175 			if (!(fc_req & (1 << ri->bit_fc)))
176 				break;
177 		} while (timeout);
178 
179 		if (timeout == 0) {
180 			pr_err("%s:%s cannot do frequency change\n",
181 				__func__, clk_hw_get_name(&mix->hw));
182 			ret = -EBUSY;
183 			goto error;
184 		}
185 	} else {
186 		fc_req = readl(ri->reg_clk_ctrl);
187 		fc_req |= 1 << ri->bit_fc;
188 		writel(fc_req, ri->reg_clk_ctrl);
189 		writel(mux_div, ri->reg_clk_sel);
190 		fc_req &= ~(1 << ri->bit_fc);
191 	}
192 
193 	ret = 0;
194 error:
195 	if (mix->lock)
196 		spin_unlock_irqrestore(mix->lock, flags);
197 
198 	return ret;
199 }
200 
201 static int mmp_clk_mix_determine_rate(struct clk_hw *hw,
202 				      struct clk_rate_request *req)
203 {
204 	struct mmp_clk_mix *mix = to_clk_mix(hw);
205 	struct mmp_clk_mix_clk_table *item;
206 	struct clk_hw *parent, *parent_best;
207 	unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
208 	unsigned long gap, gap_best;
209 	u32 div_val_max;
210 	unsigned int div;
211 	int i, j;
212 
213 
214 	mix_rate_best = 0;
215 	parent_rate_best = 0;
216 	gap_best = ULONG_MAX;
217 	parent_best = NULL;
218 
219 	if (mix->table) {
220 		for (i = 0; i < mix->table_size; i++) {
221 			item = &mix->table[i];
222 			if (item->valid == 0)
223 				continue;
224 			parent = clk_hw_get_parent_by_index(hw,
225 							item->parent_index);
226 			parent_rate = clk_hw_get_rate(parent);
227 			mix_rate = parent_rate / item->divisor;
228 			gap = abs(mix_rate - req->rate);
229 			if (!parent_best || gap < gap_best) {
230 				parent_best = parent;
231 				parent_rate_best = parent_rate;
232 				mix_rate_best = mix_rate;
233 				gap_best = gap;
234 				if (gap_best == 0)
235 					goto found;
236 			}
237 		}
238 	} else {
239 		for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
240 			parent = clk_hw_get_parent_by_index(hw, i);
241 			parent_rate = clk_hw_get_rate(parent);
242 			div_val_max = _get_maxdiv(mix);
243 			for (j = 0; j < div_val_max; j++) {
244 				div = _get_div(mix, j);
245 				mix_rate = parent_rate / div;
246 				gap = abs(mix_rate - req->rate);
247 				if (!parent_best || gap < gap_best) {
248 					parent_best = parent;
249 					parent_rate_best = parent_rate;
250 					mix_rate_best = mix_rate;
251 					gap_best = gap;
252 					if (gap_best == 0)
253 						goto found;
254 				}
255 			}
256 		}
257 	}
258 
259 found:
260 	if (!parent_best)
261 		return -EINVAL;
262 
263 	req->best_parent_rate = parent_rate_best;
264 	req->best_parent_hw = parent_best;
265 	req->rate = mix_rate_best;
266 
267 	return 0;
268 }
269 
270 static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
271 						unsigned long rate,
272 						unsigned long parent_rate,
273 						u8 index)
274 {
275 	struct mmp_clk_mix *mix = to_clk_mix(hw);
276 	unsigned int div;
277 	u32 div_val, mux_val;
278 
279 	div = parent_rate / rate;
280 	div_val = _get_div_val(mix, div);
281 	mux_val = _get_mux_val(mix, index);
282 
283 	return _set_rate(mix, mux_val, div_val, 1, 1);
284 }
285 
286 static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
287 {
288 	struct mmp_clk_mix *mix = to_clk_mix(hw);
289 	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
290 	unsigned long flags = 0;
291 	u32 mux_div = 0;
292 	u8 width, shift;
293 	u32 mux_val;
294 
295 	if (mix->lock)
296 		spin_lock_irqsave(mix->lock, flags);
297 
298 	if (mix->type == MMP_CLK_MIX_TYPE_V1
299 		|| mix->type == MMP_CLK_MIX_TYPE_V2)
300 		mux_div = readl(ri->reg_clk_ctrl);
301 	else
302 		mux_div = readl(ri->reg_clk_sel);
303 
304 	if (mix->lock)
305 		spin_unlock_irqrestore(mix->lock, flags);
306 
307 	width = mix->reg_info.width_mux;
308 	shift = mix->reg_info.shift_mux;
309 
310 	mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
311 
312 	return _get_mux(mix, mux_val);
313 }
314 
315 static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
316 					unsigned long parent_rate)
317 {
318 	struct mmp_clk_mix *mix = to_clk_mix(hw);
319 	struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
320 	unsigned long flags = 0;
321 	u32 mux_div = 0;
322 	u8 width, shift;
323 	unsigned int div;
324 
325 	if (mix->lock)
326 		spin_lock_irqsave(mix->lock, flags);
327 
328 	if (mix->type == MMP_CLK_MIX_TYPE_V1
329 		|| mix->type == MMP_CLK_MIX_TYPE_V2)
330 		mux_div = readl(ri->reg_clk_ctrl);
331 	else
332 		mux_div = readl(ri->reg_clk_sel);
333 
334 	if (mix->lock)
335 		spin_unlock_irqrestore(mix->lock, flags);
336 
337 	width = mix->reg_info.width_div;
338 	shift = mix->reg_info.shift_div;
339 
340 	div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
341 
342 	return parent_rate / div;
343 }
344 
345 static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
346 {
347 	struct mmp_clk_mix *mix = to_clk_mix(hw);
348 	struct mmp_clk_mix_clk_table *item;
349 	int i;
350 	u32 div_val, mux_val;
351 
352 	if (mix->table) {
353 		for (i = 0; i < mix->table_size; i++) {
354 			item = &mix->table[i];
355 			if (item->valid == 0)
356 				continue;
357 			if (item->parent_index == index)
358 				break;
359 		}
360 		if (i < mix->table_size) {
361 			div_val = _get_div_val(mix, item->divisor);
362 			mux_val = _get_mux_val(mix, item->parent_index);
363 		} else
364 			return -EINVAL;
365 	} else {
366 		mux_val = _get_mux_val(mix, index);
367 		div_val = 0;
368 	}
369 
370 	return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
371 }
372 
373 static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
374 				unsigned long best_parent_rate)
375 {
376 	struct mmp_clk_mix *mix = to_clk_mix(hw);
377 	struct mmp_clk_mix_clk_table *item;
378 	unsigned long parent_rate;
379 	unsigned int best_divisor;
380 	struct clk_hw *parent;
381 	int i;
382 
383 	best_divisor = best_parent_rate / rate;
384 
385 	if (mix->table) {
386 		for (i = 0; i < mix->table_size; i++) {
387 			item = &mix->table[i];
388 			if (item->valid == 0)
389 				continue;
390 			parent = clk_hw_get_parent_by_index(hw,
391 							item->parent_index);
392 			parent_rate = clk_hw_get_rate(parent);
393 			if (parent_rate == best_parent_rate
394 				&& item->divisor == best_divisor)
395 				break;
396 		}
397 		if (i < mix->table_size)
398 			return _set_rate(mix,
399 					_get_mux_val(mix, item->parent_index),
400 					_get_div_val(mix, item->divisor),
401 					1, 1);
402 		else
403 			return -EINVAL;
404 	} else {
405 		for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
406 			parent = clk_hw_get_parent_by_index(hw, i);
407 			parent_rate = clk_hw_get_rate(parent);
408 			if (parent_rate == best_parent_rate)
409 				break;
410 		}
411 		if (i < clk_hw_get_num_parents(hw))
412 			return _set_rate(mix, _get_mux_val(mix, i),
413 					_get_div_val(mix, best_divisor), 1, 1);
414 		else
415 			return -EINVAL;
416 	}
417 }
418 
419 static int mmp_clk_mix_init(struct clk_hw *hw)
420 {
421 	struct mmp_clk_mix *mix = to_clk_mix(hw);
422 
423 	if (mix->table)
424 		_filter_clk_table(mix, mix->table, mix->table_size);
425 
426 	return 0;
427 }
428 
429 const struct clk_ops mmp_clk_mix_ops = {
430 	.determine_rate = mmp_clk_mix_determine_rate,
431 	.set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
432 	.set_rate = mmp_clk_set_rate,
433 	.set_parent = mmp_clk_set_parent,
434 	.get_parent = mmp_clk_mix_get_parent,
435 	.recalc_rate = mmp_clk_mix_recalc_rate,
436 	.init = mmp_clk_mix_init,
437 };
438 
439 struct clk *mmp_clk_register_mix(struct device *dev,
440 					const char *name,
441 					const char * const *parent_names,
442 					u8 num_parents,
443 					unsigned long flags,
444 					struct mmp_clk_mix_config *config,
445 					spinlock_t *lock)
446 {
447 	struct mmp_clk_mix *mix;
448 	struct clk *clk;
449 	struct clk_init_data init;
450 	size_t table_bytes;
451 
452 	mix = kzalloc(sizeof(*mix), GFP_KERNEL);
453 	if (!mix)
454 		return ERR_PTR(-ENOMEM);
455 
456 	init.name = name;
457 	init.flags = flags | CLK_GET_RATE_NOCACHE;
458 	init.parent_names = parent_names;
459 	init.num_parents = num_parents;
460 	init.ops = &mmp_clk_mix_ops;
461 
462 	memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
463 	if (config->table) {
464 		table_bytes = sizeof(*config->table) * config->table_size;
465 		mix->table = kmemdup(config->table, table_bytes, GFP_KERNEL);
466 		if (!mix->table)
467 			goto free_mix;
468 
469 		mix->table_size = config->table_size;
470 	}
471 
472 	if (config->mux_table) {
473 		table_bytes = sizeof(u32) * num_parents;
474 		mix->mux_table = kmemdup(config->mux_table, table_bytes,
475 					 GFP_KERNEL);
476 		if (!mix->mux_table) {
477 			kfree(mix->table);
478 			goto free_mix;
479 		}
480 	}
481 
482 	mix->div_flags = config->div_flags;
483 	mix->mux_flags = config->mux_flags;
484 	mix->lock = lock;
485 	mix->hw.init = &init;
486 
487 	if (config->reg_info.bit_fc >= 32)
488 		mix->type = MMP_CLK_MIX_TYPE_V1;
489 	else if (config->reg_info.reg_clk_sel)
490 		mix->type = MMP_CLK_MIX_TYPE_V3;
491 	else
492 		mix->type = MMP_CLK_MIX_TYPE_V2;
493 	clk = clk_register(dev, &mix->hw);
494 
495 	if (IS_ERR(clk)) {
496 		kfree(mix->mux_table);
497 		kfree(mix->table);
498 		kfree(mix);
499 	}
500 
501 	return clk;
502 
503 free_mix:
504 	kfree(mix);
505 	return ERR_PTR(-ENOMEM);
506 }
507