xref: /openbmc/linux/drivers/clk/qcom/clk-rcg2.c (revision 9fb29c73)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/regmap.h>
14 #include <linux/math64.h>
15 #include <linux/slab.h>
16 
17 #include <asm/div64.h>
18 
19 #include "clk-rcg.h"
20 #include "common.h"
21 
22 #define CMD_REG			0x0
23 #define CMD_UPDATE		BIT(0)
24 #define CMD_ROOT_EN		BIT(1)
25 #define CMD_DIRTY_CFG		BIT(4)
26 #define CMD_DIRTY_N		BIT(5)
27 #define CMD_DIRTY_M		BIT(6)
28 #define CMD_DIRTY_D		BIT(7)
29 #define CMD_ROOT_OFF		BIT(31)
30 
31 #define CFG_REG			0x4
32 #define CFG_SRC_DIV_SHIFT	0
33 #define CFG_SRC_SEL_SHIFT	8
34 #define CFG_SRC_SEL_MASK	(0x7 << CFG_SRC_SEL_SHIFT)
35 #define CFG_MODE_SHIFT		12
36 #define CFG_MODE_MASK		(0x3 << CFG_MODE_SHIFT)
37 #define CFG_MODE_DUAL_EDGE	(0x2 << CFG_MODE_SHIFT)
38 #define CFG_HW_CLK_CTRL_MASK	BIT(20)
39 
40 #define M_REG			0x8
41 #define N_REG			0xc
42 #define D_REG			0x10
43 
44 /* Dynamic Frequency Scaling */
45 #define MAX_PERF_LEVEL		8
46 #define SE_CMD_DFSR_OFFSET	0x14
47 #define SE_CMD_DFS_EN		BIT(0)
48 #define SE_PERF_DFSR(level)	(0x1c + 0x4 * (level))
49 #define SE_PERF_M_DFSR(level)	(0x5c + 0x4 * (level))
50 #define SE_PERF_N_DFSR(level)	(0x9c + 0x4 * (level))
51 
52 enum freq_policy {
53 	FLOOR,
54 	CEIL,
55 };
56 
57 static int clk_rcg2_is_enabled(struct clk_hw *hw)
58 {
59 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
60 	u32 cmd;
61 	int ret;
62 
63 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
64 	if (ret)
65 		return ret;
66 
67 	return (cmd & CMD_ROOT_OFF) == 0;
68 }
69 
70 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
71 {
72 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
73 	int num_parents = clk_hw_get_num_parents(hw);
74 	u32 cfg;
75 	int i, ret;
76 
77 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
78 	if (ret)
79 		goto err;
80 
81 	cfg &= CFG_SRC_SEL_MASK;
82 	cfg >>= CFG_SRC_SEL_SHIFT;
83 
84 	for (i = 0; i < num_parents; i++)
85 		if (cfg == rcg->parent_map[i].cfg)
86 			return i;
87 
88 err:
89 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
90 		 __func__, clk_hw_get_name(hw));
91 	return 0;
92 }
93 
94 static int update_config(struct clk_rcg2 *rcg)
95 {
96 	int count, ret;
97 	u32 cmd;
98 	struct clk_hw *hw = &rcg->clkr.hw;
99 	const char *name = clk_hw_get_name(hw);
100 
101 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
102 				 CMD_UPDATE, CMD_UPDATE);
103 	if (ret)
104 		return ret;
105 
106 	/* Wait for update to take effect */
107 	for (count = 500; count > 0; count--) {
108 		ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
109 		if (ret)
110 			return ret;
111 		if (!(cmd & CMD_UPDATE))
112 			return 0;
113 		udelay(1);
114 	}
115 
116 	WARN(1, "%s: rcg didn't update its configuration.", name);
117 	return 0;
118 }
119 
120 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
121 {
122 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
123 	int ret;
124 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
125 
126 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
127 				 CFG_SRC_SEL_MASK, cfg);
128 	if (ret)
129 		return ret;
130 
131 	return update_config(rcg);
132 }
133 
134 /*
135  * Calculate m/n:d rate
136  *
137  *          parent_rate     m
138  *   rate = ----------- x  ---
139  *            hid_div       n
140  */
141 static unsigned long
142 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
143 {
144 	if (hid_div) {
145 		rate *= 2;
146 		rate /= hid_div + 1;
147 	}
148 
149 	if (mode) {
150 		u64 tmp = rate;
151 		tmp *= m;
152 		do_div(tmp, n);
153 		rate = tmp;
154 	}
155 
156 	return rate;
157 }
158 
159 static unsigned long
160 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
161 {
162 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
163 	u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
164 
165 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
166 
167 	if (rcg->mnd_width) {
168 		mask = BIT(rcg->mnd_width) - 1;
169 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + M_REG, &m);
170 		m &= mask;
171 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + N_REG, &n);
172 		n =  ~n;
173 		n &= mask;
174 		n += m;
175 		mode = cfg & CFG_MODE_MASK;
176 		mode >>= CFG_MODE_SHIFT;
177 	}
178 
179 	mask = BIT(rcg->hid_width) - 1;
180 	hid_div = cfg >> CFG_SRC_DIV_SHIFT;
181 	hid_div &= mask;
182 
183 	return calc_rate(parent_rate, m, n, mode, hid_div);
184 }
185 
186 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
187 				    struct clk_rate_request *req,
188 				    enum freq_policy policy)
189 {
190 	unsigned long clk_flags, rate = req->rate;
191 	struct clk_hw *p;
192 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
193 	int index;
194 
195 	switch (policy) {
196 	case FLOOR:
197 		f = qcom_find_freq_floor(f, rate);
198 		break;
199 	case CEIL:
200 		f = qcom_find_freq(f, rate);
201 		break;
202 	default:
203 		return -EINVAL;
204 	};
205 
206 	if (!f)
207 		return -EINVAL;
208 
209 	index = qcom_find_src_index(hw, rcg->parent_map, f->src);
210 	if (index < 0)
211 		return index;
212 
213 	clk_flags = clk_hw_get_flags(hw);
214 	p = clk_hw_get_parent_by_index(hw, index);
215 	if (clk_flags & CLK_SET_RATE_PARENT) {
216 		rate = f->freq;
217 		if (f->pre_div) {
218 			rate /= 2;
219 			rate *= f->pre_div + 1;
220 		}
221 
222 		if (f->n) {
223 			u64 tmp = rate;
224 			tmp = tmp * f->n;
225 			do_div(tmp, f->m);
226 			rate = tmp;
227 		}
228 	} else {
229 		rate =  clk_hw_get_rate(p);
230 	}
231 	req->best_parent_hw = p;
232 	req->best_parent_rate = rate;
233 	req->rate = f->freq;
234 
235 	return 0;
236 }
237 
238 static int clk_rcg2_determine_rate(struct clk_hw *hw,
239 				   struct clk_rate_request *req)
240 {
241 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
242 
243 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
244 }
245 
246 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
247 					 struct clk_rate_request *req)
248 {
249 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
250 
251 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
252 }
253 
254 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
255 {
256 	u32 cfg, mask;
257 	struct clk_hw *hw = &rcg->clkr.hw;
258 	int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
259 
260 	if (index < 0)
261 		return index;
262 
263 	if (rcg->mnd_width && f->n) {
264 		mask = BIT(rcg->mnd_width) - 1;
265 		ret = regmap_update_bits(rcg->clkr.regmap,
266 				rcg->cmd_rcgr + M_REG, mask, f->m);
267 		if (ret)
268 			return ret;
269 
270 		ret = regmap_update_bits(rcg->clkr.regmap,
271 				rcg->cmd_rcgr + N_REG, mask, ~(f->n - f->m));
272 		if (ret)
273 			return ret;
274 
275 		ret = regmap_update_bits(rcg->clkr.regmap,
276 				rcg->cmd_rcgr + D_REG, mask, ~f->n);
277 		if (ret)
278 			return ret;
279 	}
280 
281 	mask = BIT(rcg->hid_width) - 1;
282 	mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
283 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
284 	cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
285 	if (rcg->mnd_width && f->n && (f->m != f->n))
286 		cfg |= CFG_MODE_DUAL_EDGE;
287 
288 	return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
289 					mask, cfg);
290 }
291 
292 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
293 {
294 	int ret;
295 
296 	ret = __clk_rcg2_configure(rcg, f);
297 	if (ret)
298 		return ret;
299 
300 	return update_config(rcg);
301 }
302 
303 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
304 			       enum freq_policy policy)
305 {
306 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
307 	const struct freq_tbl *f;
308 
309 	switch (policy) {
310 	case FLOOR:
311 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
312 		break;
313 	case CEIL:
314 		f = qcom_find_freq(rcg->freq_tbl, rate);
315 		break;
316 	default:
317 		return -EINVAL;
318 	};
319 
320 	if (!f)
321 		return -EINVAL;
322 
323 	return clk_rcg2_configure(rcg, f);
324 }
325 
326 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
327 			    unsigned long parent_rate)
328 {
329 	return __clk_rcg2_set_rate(hw, rate, CEIL);
330 }
331 
332 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
333 				   unsigned long parent_rate)
334 {
335 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
336 }
337 
338 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
339 		unsigned long rate, unsigned long parent_rate, u8 index)
340 {
341 	return __clk_rcg2_set_rate(hw, rate, CEIL);
342 }
343 
344 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
345 		unsigned long rate, unsigned long parent_rate, u8 index)
346 {
347 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
348 }
349 
350 const struct clk_ops clk_rcg2_ops = {
351 	.is_enabled = clk_rcg2_is_enabled,
352 	.get_parent = clk_rcg2_get_parent,
353 	.set_parent = clk_rcg2_set_parent,
354 	.recalc_rate = clk_rcg2_recalc_rate,
355 	.determine_rate = clk_rcg2_determine_rate,
356 	.set_rate = clk_rcg2_set_rate,
357 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
358 };
359 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
360 
361 const struct clk_ops clk_rcg2_floor_ops = {
362 	.is_enabled = clk_rcg2_is_enabled,
363 	.get_parent = clk_rcg2_get_parent,
364 	.set_parent = clk_rcg2_set_parent,
365 	.recalc_rate = clk_rcg2_recalc_rate,
366 	.determine_rate = clk_rcg2_determine_floor_rate,
367 	.set_rate = clk_rcg2_set_floor_rate,
368 	.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
369 };
370 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
371 
372 struct frac_entry {
373 	int num;
374 	int den;
375 };
376 
377 static const struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
378 	{ 52, 295 },	/* 119 M */
379 	{ 11, 57 },	/* 130.25 M */
380 	{ 63, 307 },	/* 138.50 M */
381 	{ 11, 50 },	/* 148.50 M */
382 	{ 47, 206 },	/* 154 M */
383 	{ 31, 100 },	/* 205.25 M */
384 	{ 107, 269 },	/* 268.50 M */
385 	{ },
386 };
387 
388 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
389 	{ 31, 211 },	/* 119 M */
390 	{ 32, 199 },	/* 130.25 M */
391 	{ 63, 307 },	/* 138.50 M */
392 	{ 11, 60 },	/* 148.50 M */
393 	{ 50, 263 },	/* 154 M */
394 	{ 31, 120 },	/* 205.25 M */
395 	{ 119, 359 },	/* 268.50 M */
396 	{ },
397 };
398 
399 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
400 			      unsigned long parent_rate)
401 {
402 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
403 	struct freq_tbl f = *rcg->freq_tbl;
404 	const struct frac_entry *frac;
405 	int delta = 100000;
406 	s64 src_rate = parent_rate;
407 	s64 request;
408 	u32 mask = BIT(rcg->hid_width) - 1;
409 	u32 hid_div;
410 
411 	if (src_rate == 810000000)
412 		frac = frac_table_810m;
413 	else
414 		frac = frac_table_675m;
415 
416 	for (; frac->num; frac++) {
417 		request = rate;
418 		request *= frac->den;
419 		request = div_s64(request, frac->num);
420 		if ((src_rate < (request - delta)) ||
421 		    (src_rate > (request + delta)))
422 			continue;
423 
424 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
425 				&hid_div);
426 		f.pre_div = hid_div;
427 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
428 		f.pre_div &= mask;
429 		f.m = frac->num;
430 		f.n = frac->den;
431 
432 		return clk_rcg2_configure(rcg, &f);
433 	}
434 
435 	return -EINVAL;
436 }
437 
438 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
439 		unsigned long rate, unsigned long parent_rate, u8 index)
440 {
441 	/* Parent index is set statically in frequency table */
442 	return clk_edp_pixel_set_rate(hw, rate, parent_rate);
443 }
444 
445 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
446 					struct clk_rate_request *req)
447 {
448 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
449 	const struct freq_tbl *f = rcg->freq_tbl;
450 	const struct frac_entry *frac;
451 	int delta = 100000;
452 	s64 request;
453 	u32 mask = BIT(rcg->hid_width) - 1;
454 	u32 hid_div;
455 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
456 
457 	/* Force the correct parent */
458 	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
459 	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
460 
461 	if (req->best_parent_rate == 810000000)
462 		frac = frac_table_810m;
463 	else
464 		frac = frac_table_675m;
465 
466 	for (; frac->num; frac++) {
467 		request = req->rate;
468 		request *= frac->den;
469 		request = div_s64(request, frac->num);
470 		if ((req->best_parent_rate < (request - delta)) ||
471 		    (req->best_parent_rate > (request + delta)))
472 			continue;
473 
474 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
475 				&hid_div);
476 		hid_div >>= CFG_SRC_DIV_SHIFT;
477 		hid_div &= mask;
478 
479 		req->rate = calc_rate(req->best_parent_rate,
480 				      frac->num, frac->den,
481 				      !!frac->den, hid_div);
482 		return 0;
483 	}
484 
485 	return -EINVAL;
486 }
487 
488 const struct clk_ops clk_edp_pixel_ops = {
489 	.is_enabled = clk_rcg2_is_enabled,
490 	.get_parent = clk_rcg2_get_parent,
491 	.set_parent = clk_rcg2_set_parent,
492 	.recalc_rate = clk_rcg2_recalc_rate,
493 	.set_rate = clk_edp_pixel_set_rate,
494 	.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
495 	.determine_rate = clk_edp_pixel_determine_rate,
496 };
497 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
498 
499 static int clk_byte_determine_rate(struct clk_hw *hw,
500 				   struct clk_rate_request *req)
501 {
502 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
503 	const struct freq_tbl *f = rcg->freq_tbl;
504 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
505 	unsigned long parent_rate, div;
506 	u32 mask = BIT(rcg->hid_width) - 1;
507 	struct clk_hw *p;
508 
509 	if (req->rate == 0)
510 		return -EINVAL;
511 
512 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
513 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
514 
515 	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
516 	div = min_t(u32, div, mask);
517 
518 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
519 
520 	return 0;
521 }
522 
523 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
524 			 unsigned long parent_rate)
525 {
526 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
527 	struct freq_tbl f = *rcg->freq_tbl;
528 	unsigned long div;
529 	u32 mask = BIT(rcg->hid_width) - 1;
530 
531 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
532 	div = min_t(u32, div, mask);
533 
534 	f.pre_div = div;
535 
536 	return clk_rcg2_configure(rcg, &f);
537 }
538 
539 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
540 		unsigned long rate, unsigned long parent_rate, u8 index)
541 {
542 	/* Parent index is set statically in frequency table */
543 	return clk_byte_set_rate(hw, rate, parent_rate);
544 }
545 
546 const struct clk_ops clk_byte_ops = {
547 	.is_enabled = clk_rcg2_is_enabled,
548 	.get_parent = clk_rcg2_get_parent,
549 	.set_parent = clk_rcg2_set_parent,
550 	.recalc_rate = clk_rcg2_recalc_rate,
551 	.set_rate = clk_byte_set_rate,
552 	.set_rate_and_parent = clk_byte_set_rate_and_parent,
553 	.determine_rate = clk_byte_determine_rate,
554 };
555 EXPORT_SYMBOL_GPL(clk_byte_ops);
556 
557 static int clk_byte2_determine_rate(struct clk_hw *hw,
558 				    struct clk_rate_request *req)
559 {
560 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
561 	unsigned long parent_rate, div;
562 	u32 mask = BIT(rcg->hid_width) - 1;
563 	struct clk_hw *p;
564 	unsigned long rate = req->rate;
565 
566 	if (rate == 0)
567 		return -EINVAL;
568 
569 	p = req->best_parent_hw;
570 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
571 
572 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
573 	div = min_t(u32, div, mask);
574 
575 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
576 
577 	return 0;
578 }
579 
580 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
581 			 unsigned long parent_rate)
582 {
583 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
584 	struct freq_tbl f = { 0 };
585 	unsigned long div;
586 	int i, num_parents = clk_hw_get_num_parents(hw);
587 	u32 mask = BIT(rcg->hid_width) - 1;
588 	u32 cfg;
589 
590 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
591 	div = min_t(u32, div, mask);
592 
593 	f.pre_div = div;
594 
595 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
596 	cfg &= CFG_SRC_SEL_MASK;
597 	cfg >>= CFG_SRC_SEL_SHIFT;
598 
599 	for (i = 0; i < num_parents; i++) {
600 		if (cfg == rcg->parent_map[i].cfg) {
601 			f.src = rcg->parent_map[i].src;
602 			return clk_rcg2_configure(rcg, &f);
603 		}
604 	}
605 
606 	return -EINVAL;
607 }
608 
609 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
610 		unsigned long rate, unsigned long parent_rate, u8 index)
611 {
612 	/* Read the hardware to determine parent during set_rate */
613 	return clk_byte2_set_rate(hw, rate, parent_rate);
614 }
615 
616 const struct clk_ops clk_byte2_ops = {
617 	.is_enabled = clk_rcg2_is_enabled,
618 	.get_parent = clk_rcg2_get_parent,
619 	.set_parent = clk_rcg2_set_parent,
620 	.recalc_rate = clk_rcg2_recalc_rate,
621 	.set_rate = clk_byte2_set_rate,
622 	.set_rate_and_parent = clk_byte2_set_rate_and_parent,
623 	.determine_rate = clk_byte2_determine_rate,
624 };
625 EXPORT_SYMBOL_GPL(clk_byte2_ops);
626 
627 static const struct frac_entry frac_table_pixel[] = {
628 	{ 3, 8 },
629 	{ 2, 9 },
630 	{ 4, 9 },
631 	{ 1, 1 },
632 	{ }
633 };
634 
635 static int clk_pixel_determine_rate(struct clk_hw *hw,
636 				    struct clk_rate_request *req)
637 {
638 	unsigned long request, src_rate;
639 	int delta = 100000;
640 	const struct frac_entry *frac = frac_table_pixel;
641 
642 	for (; frac->num; frac++) {
643 		request = (req->rate * frac->den) / frac->num;
644 
645 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
646 		if ((src_rate < (request - delta)) ||
647 			(src_rate > (request + delta)))
648 			continue;
649 
650 		req->best_parent_rate = src_rate;
651 		req->rate = (src_rate * frac->num) / frac->den;
652 		return 0;
653 	}
654 
655 	return -EINVAL;
656 }
657 
658 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
659 		unsigned long parent_rate)
660 {
661 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
662 	struct freq_tbl f = { 0 };
663 	const struct frac_entry *frac = frac_table_pixel;
664 	unsigned long request;
665 	int delta = 100000;
666 	u32 mask = BIT(rcg->hid_width) - 1;
667 	u32 hid_div, cfg;
668 	int i, num_parents = clk_hw_get_num_parents(hw);
669 
670 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
671 	cfg &= CFG_SRC_SEL_MASK;
672 	cfg >>= CFG_SRC_SEL_SHIFT;
673 
674 	for (i = 0; i < num_parents; i++)
675 		if (cfg == rcg->parent_map[i].cfg) {
676 			f.src = rcg->parent_map[i].src;
677 			break;
678 		}
679 
680 	for (; frac->num; frac++) {
681 		request = (rate * frac->den) / frac->num;
682 
683 		if ((parent_rate < (request - delta)) ||
684 			(parent_rate > (request + delta)))
685 			continue;
686 
687 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
688 				&hid_div);
689 		f.pre_div = hid_div;
690 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
691 		f.pre_div &= mask;
692 		f.m = frac->num;
693 		f.n = frac->den;
694 
695 		return clk_rcg2_configure(rcg, &f);
696 	}
697 	return -EINVAL;
698 }
699 
700 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
701 		unsigned long parent_rate, u8 index)
702 {
703 	return clk_pixel_set_rate(hw, rate, parent_rate);
704 }
705 
706 const struct clk_ops clk_pixel_ops = {
707 	.is_enabled = clk_rcg2_is_enabled,
708 	.get_parent = clk_rcg2_get_parent,
709 	.set_parent = clk_rcg2_set_parent,
710 	.recalc_rate = clk_rcg2_recalc_rate,
711 	.set_rate = clk_pixel_set_rate,
712 	.set_rate_and_parent = clk_pixel_set_rate_and_parent,
713 	.determine_rate = clk_pixel_determine_rate,
714 };
715 EXPORT_SYMBOL_GPL(clk_pixel_ops);
716 
717 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
718 				    struct clk_rate_request *req)
719 {
720 	struct clk_rate_request parent_req = { };
721 	struct clk_hw *p2, *p8, *p9, *xo;
722 	unsigned long p9_rate;
723 	int ret;
724 
725 	xo = clk_hw_get_parent_by_index(hw, 0);
726 	if (req->rate == clk_hw_get_rate(xo)) {
727 		req->best_parent_hw = xo;
728 		return 0;
729 	}
730 
731 	p9 = clk_hw_get_parent_by_index(hw, 2);
732 	p2 = clk_hw_get_parent_by_index(hw, 3);
733 	p8 = clk_hw_get_parent_by_index(hw, 4);
734 
735 	/* PLL9 is a fixed rate PLL */
736 	p9_rate = clk_hw_get_rate(p9);
737 
738 	parent_req.rate = req->rate = min(req->rate, p9_rate);
739 	if (req->rate == p9_rate) {
740 		req->rate = req->best_parent_rate = p9_rate;
741 		req->best_parent_hw = p9;
742 		return 0;
743 	}
744 
745 	if (req->best_parent_hw == p9) {
746 		/* Are we going back to a previously used rate? */
747 		if (clk_hw_get_rate(p8) == req->rate)
748 			req->best_parent_hw = p8;
749 		else
750 			req->best_parent_hw = p2;
751 	} else if (req->best_parent_hw == p8) {
752 		req->best_parent_hw = p2;
753 	} else {
754 		req->best_parent_hw = p8;
755 	}
756 
757 	ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
758 	if (ret)
759 		return ret;
760 
761 	req->rate = req->best_parent_rate = parent_req.rate;
762 
763 	return 0;
764 }
765 
766 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
767 		unsigned long parent_rate, u8 index)
768 {
769 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
770 	u32 cfg;
771 	int ret;
772 
773 	/* Just mux it, we don't use the division or m/n hardware */
774 	cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
775 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
776 	if (ret)
777 		return ret;
778 
779 	return update_config(rcg);
780 }
781 
782 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
783 			      unsigned long parent_rate)
784 {
785 	/*
786 	 * We should never get here; clk_gfx3d_determine_rate() should always
787 	 * make us use a different parent than what we're currently using, so
788 	 * clk_gfx3d_set_rate_and_parent() should always be called.
789 	 */
790 	return 0;
791 }
792 
793 const struct clk_ops clk_gfx3d_ops = {
794 	.is_enabled = clk_rcg2_is_enabled,
795 	.get_parent = clk_rcg2_get_parent,
796 	.set_parent = clk_rcg2_set_parent,
797 	.recalc_rate = clk_rcg2_recalc_rate,
798 	.set_rate = clk_gfx3d_set_rate,
799 	.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
800 	.determine_rate = clk_gfx3d_determine_rate,
801 };
802 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
803 
804 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
805 {
806 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
807 	const char *name = clk_hw_get_name(hw);
808 	int ret, count;
809 
810 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
811 				 CMD_ROOT_EN, CMD_ROOT_EN);
812 	if (ret)
813 		return ret;
814 
815 	/* wait for RCG to turn ON */
816 	for (count = 500; count > 0; count--) {
817 		if (clk_rcg2_is_enabled(hw))
818 			return 0;
819 
820 		udelay(1);
821 	}
822 
823 	pr_err("%s: RCG did not turn on\n", name);
824 	return -ETIMEDOUT;
825 }
826 
827 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
828 {
829 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
830 
831 	return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
832 					CMD_ROOT_EN, 0);
833 }
834 
835 static int
836 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
837 {
838 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
839 	int ret;
840 
841 	ret = clk_rcg2_set_force_enable(hw);
842 	if (ret)
843 		return ret;
844 
845 	ret = clk_rcg2_configure(rcg, f);
846 	if (ret)
847 		return ret;
848 
849 	return clk_rcg2_clear_force_enable(hw);
850 }
851 
852 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
853 				    unsigned long parent_rate)
854 {
855 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
856 	const struct freq_tbl *f;
857 
858 	f = qcom_find_freq(rcg->freq_tbl, rate);
859 	if (!f)
860 		return -EINVAL;
861 
862 	/*
863 	 * In case clock is disabled, update the CFG, M, N and D registers
864 	 * and don't hit the update bit of CMD register.
865 	 */
866 	if (!__clk_is_enabled(hw->clk))
867 		return __clk_rcg2_configure(rcg, f);
868 
869 	return clk_rcg2_shared_force_enable_clear(hw, f);
870 }
871 
872 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
873 		unsigned long rate, unsigned long parent_rate, u8 index)
874 {
875 	return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
876 }
877 
878 static int clk_rcg2_shared_enable(struct clk_hw *hw)
879 {
880 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
881 	int ret;
882 
883 	/*
884 	 * Set the update bit because required configuration has already
885 	 * been written in clk_rcg2_shared_set_rate()
886 	 */
887 	ret = clk_rcg2_set_force_enable(hw);
888 	if (ret)
889 		return ret;
890 
891 	ret = update_config(rcg);
892 	if (ret)
893 		return ret;
894 
895 	return clk_rcg2_clear_force_enable(hw);
896 }
897 
898 static void clk_rcg2_shared_disable(struct clk_hw *hw)
899 {
900 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
901 	u32 cfg;
902 
903 	/*
904 	 * Store current configuration as switching to safe source would clear
905 	 * the SRC and DIV of CFG register
906 	 */
907 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
908 
909 	/*
910 	 * Park the RCG at a safe configuration - sourced off of safe source.
911 	 * Force enable and disable the RCG while configuring it to safeguard
912 	 * against any update signal coming from the downstream clock.
913 	 * The current parent is still prepared and enabled at this point, and
914 	 * the safe source is always on while application processor subsystem
915 	 * is online. Therefore, the RCG can safely switch its parent.
916 	 */
917 	clk_rcg2_set_force_enable(hw);
918 
919 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
920 		     rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
921 
922 	update_config(rcg);
923 
924 	clk_rcg2_clear_force_enable(hw);
925 
926 	/* Write back the stored configuration corresponding to current rate */
927 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
928 }
929 
930 const struct clk_ops clk_rcg2_shared_ops = {
931 	.enable = clk_rcg2_shared_enable,
932 	.disable = clk_rcg2_shared_disable,
933 	.get_parent = clk_rcg2_get_parent,
934 	.set_parent = clk_rcg2_set_parent,
935 	.recalc_rate = clk_rcg2_recalc_rate,
936 	.determine_rate = clk_rcg2_determine_rate,
937 	.set_rate = clk_rcg2_shared_set_rate,
938 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
939 };
940 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
941 
942 /* Common APIs to be used for DFS based RCGR */
943 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
944 				       struct freq_tbl *f)
945 {
946 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
947 	struct clk_hw *p;
948 	unsigned long prate = 0;
949 	u32 val, mask, cfg, mode;
950 	int i, num_parents;
951 
952 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
953 
954 	mask = BIT(rcg->hid_width) - 1;
955 	f->pre_div = 1;
956 	if (cfg & mask)
957 		f->pre_div = cfg & mask;
958 
959 	cfg &= CFG_SRC_SEL_MASK;
960 	cfg >>= CFG_SRC_SEL_SHIFT;
961 
962 	num_parents = clk_hw_get_num_parents(hw);
963 	for (i = 0; i < num_parents; i++) {
964 		if (cfg == rcg->parent_map[i].cfg) {
965 			f->src = rcg->parent_map[i].src;
966 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
967 			prate = clk_hw_get_rate(p);
968 		}
969 	}
970 
971 	mode = cfg & CFG_MODE_MASK;
972 	mode >>= CFG_MODE_SHIFT;
973 	if (mode) {
974 		mask = BIT(rcg->mnd_width) - 1;
975 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
976 			    &val);
977 		val &= mask;
978 		f->m = val;
979 
980 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
981 			    &val);
982 		val = ~val;
983 		val &= mask;
984 		val += f->m;
985 		f->n = val;
986 	}
987 
988 	f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
989 }
990 
991 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
992 {
993 	struct freq_tbl *freq_tbl;
994 	int i;
995 
996 	/* Allocate space for 1 extra since table is NULL terminated */
997 	freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
998 	if (!freq_tbl)
999 		return -ENOMEM;
1000 	rcg->freq_tbl = freq_tbl;
1001 
1002 	for (i = 0; i < MAX_PERF_LEVEL; i++)
1003 		clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1004 
1005 	return 0;
1006 }
1007 
1008 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1009 				   struct clk_rate_request *req)
1010 {
1011 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1012 	int ret;
1013 
1014 	if (!rcg->freq_tbl) {
1015 		ret = clk_rcg2_dfs_populate_freq_table(rcg);
1016 		if (ret) {
1017 			pr_err("Failed to update DFS tables for %s\n",
1018 					clk_hw_get_name(hw));
1019 			return ret;
1020 		}
1021 	}
1022 
1023 	return clk_rcg2_determine_rate(hw, req);
1024 }
1025 
1026 static unsigned long
1027 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1028 {
1029 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1030 	u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1031 
1032 	regmap_read(rcg->clkr.regmap,
1033 		    rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1034 	level &= GENMASK(4, 1);
1035 	level >>= 1;
1036 
1037 	if (rcg->freq_tbl)
1038 		return rcg->freq_tbl[level].freq;
1039 
1040 	/*
1041 	 * Assume that parent_rate is actually the parent because
1042 	 * we can't do any better at figuring it out when the table
1043 	 * hasn't been populated yet. We only populate the table
1044 	 * in determine_rate because we can't guarantee the parents
1045 	 * will be registered with the framework until then.
1046 	 */
1047 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1048 		    &cfg);
1049 
1050 	mask = BIT(rcg->hid_width) - 1;
1051 	pre_div = 1;
1052 	if (cfg & mask)
1053 		pre_div = cfg & mask;
1054 
1055 	mode = cfg & CFG_MODE_MASK;
1056 	mode >>= CFG_MODE_SHIFT;
1057 	if (mode) {
1058 		mask = BIT(rcg->mnd_width) - 1;
1059 		regmap_read(rcg->clkr.regmap,
1060 			    rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1061 		m &= mask;
1062 
1063 		regmap_read(rcg->clkr.regmap,
1064 			    rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1065 		n = ~n;
1066 		n &= mask;
1067 		n += m;
1068 	}
1069 
1070 	return calc_rate(parent_rate, m, n, mode, pre_div);
1071 }
1072 
1073 static const struct clk_ops clk_rcg2_dfs_ops = {
1074 	.is_enabled = clk_rcg2_is_enabled,
1075 	.get_parent = clk_rcg2_get_parent,
1076 	.determine_rate = clk_rcg2_dfs_determine_rate,
1077 	.recalc_rate = clk_rcg2_dfs_recalc_rate,
1078 };
1079 
1080 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1081 			       struct regmap *regmap)
1082 {
1083 	struct clk_rcg2 *rcg = data->rcg;
1084 	struct clk_init_data *init = data->init;
1085 	u32 val;
1086 	int ret;
1087 
1088 	ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1089 	if (ret)
1090 		return -EINVAL;
1091 
1092 	if (!(val & SE_CMD_DFS_EN))
1093 		return 0;
1094 
1095 	/*
1096 	 * Rate changes with consumer writing a register in
1097 	 * their own I/O region
1098 	 */
1099 	init->flags |= CLK_GET_RATE_NOCACHE;
1100 	init->ops = &clk_rcg2_dfs_ops;
1101 
1102 	rcg->freq_tbl = NULL;
1103 
1104 	pr_debug("DFS registered for clk %s\n", init->name);
1105 
1106 	return 0;
1107 }
1108 
1109 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1110 			     const struct clk_rcg_dfs_data *rcgs, size_t len)
1111 {
1112 	int i, ret;
1113 
1114 	for (i = 0; i < len; i++) {
1115 		ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1116 		if (ret) {
1117 			const char *name = rcgs[i].init->name;
1118 
1119 			pr_err("DFS register failed for clk %s\n", name);
1120 			return ret;
1121 		}
1122 	}
1123 
1124 	return 0;
1125 }
1126 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1127