xref: /openbmc/linux/drivers/clk/qcom/clk-rcg2.c (revision 7b73a9c8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/regmap.h>
14 #include <linux/math64.h>
15 #include <linux/slab.h>
16 
17 #include <asm/div64.h>
18 
19 #include "clk-rcg.h"
20 #include "common.h"
21 
22 #define CMD_REG			0x0
23 #define CMD_UPDATE		BIT(0)
24 #define CMD_ROOT_EN		BIT(1)
25 #define CMD_DIRTY_CFG		BIT(4)
26 #define CMD_DIRTY_N		BIT(5)
27 #define CMD_DIRTY_M		BIT(6)
28 #define CMD_DIRTY_D		BIT(7)
29 #define CMD_ROOT_OFF		BIT(31)
30 
31 #define CFG_REG			0x4
32 #define CFG_SRC_DIV_SHIFT	0
33 #define CFG_SRC_SEL_SHIFT	8
34 #define CFG_SRC_SEL_MASK	(0x7 << CFG_SRC_SEL_SHIFT)
35 #define CFG_MODE_SHIFT		12
36 #define CFG_MODE_MASK		(0x3 << CFG_MODE_SHIFT)
37 #define CFG_MODE_DUAL_EDGE	(0x2 << CFG_MODE_SHIFT)
38 #define CFG_HW_CLK_CTRL_MASK	BIT(20)
39 
40 #define M_REG			0x8
41 #define N_REG			0xc
42 #define D_REG			0x10
43 
44 #define RCG_CFG_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
45 #define RCG_M_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
46 #define RCG_N_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
47 #define RCG_D_OFFSET(rcg)	((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
48 
49 /* Dynamic Frequency Scaling */
50 #define MAX_PERF_LEVEL		8
51 #define SE_CMD_DFSR_OFFSET	0x14
52 #define SE_CMD_DFS_EN		BIT(0)
53 #define SE_PERF_DFSR(level)	(0x1c + 0x4 * (level))
54 #define SE_PERF_M_DFSR(level)	(0x5c + 0x4 * (level))
55 #define SE_PERF_N_DFSR(level)	(0x9c + 0x4 * (level))
56 
57 enum freq_policy {
58 	FLOOR,
59 	CEIL,
60 };
61 
62 static int clk_rcg2_is_enabled(struct clk_hw *hw)
63 {
64 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
65 	u32 cmd;
66 	int ret;
67 
68 	ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
69 	if (ret)
70 		return ret;
71 
72 	return (cmd & CMD_ROOT_OFF) == 0;
73 }
74 
75 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
76 {
77 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
78 	int num_parents = clk_hw_get_num_parents(hw);
79 	u32 cfg;
80 	int i, ret;
81 
82 	ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
83 	if (ret)
84 		goto err;
85 
86 	cfg &= CFG_SRC_SEL_MASK;
87 	cfg >>= CFG_SRC_SEL_SHIFT;
88 
89 	for (i = 0; i < num_parents; i++)
90 		if (cfg == rcg->parent_map[i].cfg)
91 			return i;
92 
93 err:
94 	pr_debug("%s: Clock %s has invalid parent, using default.\n",
95 		 __func__, clk_hw_get_name(hw));
96 	return 0;
97 }
98 
99 static int update_config(struct clk_rcg2 *rcg)
100 {
101 	int count, ret;
102 	u32 cmd;
103 	struct clk_hw *hw = &rcg->clkr.hw;
104 	const char *name = clk_hw_get_name(hw);
105 
106 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
107 				 CMD_UPDATE, CMD_UPDATE);
108 	if (ret)
109 		return ret;
110 
111 	/* Wait for update to take effect */
112 	for (count = 500; count > 0; count--) {
113 		ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
114 		if (ret)
115 			return ret;
116 		if (!(cmd & CMD_UPDATE))
117 			return 0;
118 		udelay(1);
119 	}
120 
121 	WARN(1, "%s: rcg didn't update its configuration.", name);
122 	return -EBUSY;
123 }
124 
125 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
126 {
127 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
128 	int ret;
129 	u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
130 
131 	ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
132 				 CFG_SRC_SEL_MASK, cfg);
133 	if (ret)
134 		return ret;
135 
136 	return update_config(rcg);
137 }
138 
139 /*
140  * Calculate m/n:d rate
141  *
142  *          parent_rate     m
143  *   rate = ----------- x  ---
144  *            hid_div       n
145  */
146 static unsigned long
147 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
148 {
149 	if (hid_div) {
150 		rate *= 2;
151 		rate /= hid_div + 1;
152 	}
153 
154 	if (mode) {
155 		u64 tmp = rate;
156 		tmp *= m;
157 		do_div(tmp, n);
158 		rate = tmp;
159 	}
160 
161 	return rate;
162 }
163 
164 static unsigned long
165 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
166 {
167 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
168 	u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
169 
170 	regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
171 
172 	if (rcg->mnd_width) {
173 		mask = BIT(rcg->mnd_width) - 1;
174 		regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
175 		m &= mask;
176 		regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
177 		n =  ~n;
178 		n &= mask;
179 		n += m;
180 		mode = cfg & CFG_MODE_MASK;
181 		mode >>= CFG_MODE_SHIFT;
182 	}
183 
184 	mask = BIT(rcg->hid_width) - 1;
185 	hid_div = cfg >> CFG_SRC_DIV_SHIFT;
186 	hid_div &= mask;
187 
188 	return calc_rate(parent_rate, m, n, mode, hid_div);
189 }
190 
191 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
192 				    struct clk_rate_request *req,
193 				    enum freq_policy policy)
194 {
195 	unsigned long clk_flags, rate = req->rate;
196 	struct clk_hw *p;
197 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
198 	int index;
199 
200 	switch (policy) {
201 	case FLOOR:
202 		f = qcom_find_freq_floor(f, rate);
203 		break;
204 	case CEIL:
205 		f = qcom_find_freq(f, rate);
206 		break;
207 	default:
208 		return -EINVAL;
209 	}
210 
211 	if (!f)
212 		return -EINVAL;
213 
214 	index = qcom_find_src_index(hw, rcg->parent_map, f->src);
215 	if (index < 0)
216 		return index;
217 
218 	clk_flags = clk_hw_get_flags(hw);
219 	p = clk_hw_get_parent_by_index(hw, index);
220 	if (clk_flags & CLK_SET_RATE_PARENT) {
221 		rate = f->freq;
222 		if (f->pre_div) {
223 			if (!rate)
224 				rate = req->rate;
225 			rate /= 2;
226 			rate *= f->pre_div + 1;
227 		}
228 
229 		if (f->n) {
230 			u64 tmp = rate;
231 			tmp = tmp * f->n;
232 			do_div(tmp, f->m);
233 			rate = tmp;
234 		}
235 	} else {
236 		rate =  clk_hw_get_rate(p);
237 	}
238 	req->best_parent_hw = p;
239 	req->best_parent_rate = rate;
240 	req->rate = f->freq;
241 
242 	return 0;
243 }
244 
245 static int clk_rcg2_determine_rate(struct clk_hw *hw,
246 				   struct clk_rate_request *req)
247 {
248 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
249 
250 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
251 }
252 
253 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
254 					 struct clk_rate_request *req)
255 {
256 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
257 
258 	return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
259 }
260 
261 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
262 {
263 	u32 cfg, mask;
264 	struct clk_hw *hw = &rcg->clkr.hw;
265 	int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
266 
267 	if (index < 0)
268 		return index;
269 
270 	if (rcg->mnd_width && f->n) {
271 		mask = BIT(rcg->mnd_width) - 1;
272 		ret = regmap_update_bits(rcg->clkr.regmap,
273 				RCG_M_OFFSET(rcg), mask, f->m);
274 		if (ret)
275 			return ret;
276 
277 		ret = regmap_update_bits(rcg->clkr.regmap,
278 				RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
279 		if (ret)
280 			return ret;
281 
282 		ret = regmap_update_bits(rcg->clkr.regmap,
283 				RCG_D_OFFSET(rcg), mask, ~f->n);
284 		if (ret)
285 			return ret;
286 	}
287 
288 	mask = BIT(rcg->hid_width) - 1;
289 	mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
290 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
291 	cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
292 	if (rcg->mnd_width && f->n && (f->m != f->n))
293 		cfg |= CFG_MODE_DUAL_EDGE;
294 	return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
295 					mask, cfg);
296 }
297 
298 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
299 {
300 	int ret;
301 
302 	ret = __clk_rcg2_configure(rcg, f);
303 	if (ret)
304 		return ret;
305 
306 	return update_config(rcg);
307 }
308 
309 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
310 			       enum freq_policy policy)
311 {
312 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
313 	const struct freq_tbl *f;
314 
315 	switch (policy) {
316 	case FLOOR:
317 		f = qcom_find_freq_floor(rcg->freq_tbl, rate);
318 		break;
319 	case CEIL:
320 		f = qcom_find_freq(rcg->freq_tbl, rate);
321 		break;
322 	default:
323 		return -EINVAL;
324 	}
325 
326 	if (!f)
327 		return -EINVAL;
328 
329 	return clk_rcg2_configure(rcg, f);
330 }
331 
332 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
333 			    unsigned long parent_rate)
334 {
335 	return __clk_rcg2_set_rate(hw, rate, CEIL);
336 }
337 
338 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
339 				   unsigned long parent_rate)
340 {
341 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
342 }
343 
344 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
345 		unsigned long rate, unsigned long parent_rate, u8 index)
346 {
347 	return __clk_rcg2_set_rate(hw, rate, CEIL);
348 }
349 
350 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
351 		unsigned long rate, unsigned long parent_rate, u8 index)
352 {
353 	return __clk_rcg2_set_rate(hw, rate, FLOOR);
354 }
355 
356 const struct clk_ops clk_rcg2_ops = {
357 	.is_enabled = clk_rcg2_is_enabled,
358 	.get_parent = clk_rcg2_get_parent,
359 	.set_parent = clk_rcg2_set_parent,
360 	.recalc_rate = clk_rcg2_recalc_rate,
361 	.determine_rate = clk_rcg2_determine_rate,
362 	.set_rate = clk_rcg2_set_rate,
363 	.set_rate_and_parent = clk_rcg2_set_rate_and_parent,
364 };
365 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
366 
367 const struct clk_ops clk_rcg2_floor_ops = {
368 	.is_enabled = clk_rcg2_is_enabled,
369 	.get_parent = clk_rcg2_get_parent,
370 	.set_parent = clk_rcg2_set_parent,
371 	.recalc_rate = clk_rcg2_recalc_rate,
372 	.determine_rate = clk_rcg2_determine_floor_rate,
373 	.set_rate = clk_rcg2_set_floor_rate,
374 	.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
375 };
376 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
377 
378 struct frac_entry {
379 	int num;
380 	int den;
381 };
382 
383 static const struct frac_entry frac_table_675m[] = {	/* link rate of 270M */
384 	{ 52, 295 },	/* 119 M */
385 	{ 11, 57 },	/* 130.25 M */
386 	{ 63, 307 },	/* 138.50 M */
387 	{ 11, 50 },	/* 148.50 M */
388 	{ 47, 206 },	/* 154 M */
389 	{ 31, 100 },	/* 205.25 M */
390 	{ 107, 269 },	/* 268.50 M */
391 	{ },
392 };
393 
394 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
395 	{ 31, 211 },	/* 119 M */
396 	{ 32, 199 },	/* 130.25 M */
397 	{ 63, 307 },	/* 138.50 M */
398 	{ 11, 60 },	/* 148.50 M */
399 	{ 50, 263 },	/* 154 M */
400 	{ 31, 120 },	/* 205.25 M */
401 	{ 119, 359 },	/* 268.50 M */
402 	{ },
403 };
404 
405 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
406 			      unsigned long parent_rate)
407 {
408 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
409 	struct freq_tbl f = *rcg->freq_tbl;
410 	const struct frac_entry *frac;
411 	int delta = 100000;
412 	s64 src_rate = parent_rate;
413 	s64 request;
414 	u32 mask = BIT(rcg->hid_width) - 1;
415 	u32 hid_div;
416 
417 	if (src_rate == 810000000)
418 		frac = frac_table_810m;
419 	else
420 		frac = frac_table_675m;
421 
422 	for (; frac->num; frac++) {
423 		request = rate;
424 		request *= frac->den;
425 		request = div_s64(request, frac->num);
426 		if ((src_rate < (request - delta)) ||
427 		    (src_rate > (request + delta)))
428 			continue;
429 
430 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
431 				&hid_div);
432 		f.pre_div = hid_div;
433 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
434 		f.pre_div &= mask;
435 		f.m = frac->num;
436 		f.n = frac->den;
437 
438 		return clk_rcg2_configure(rcg, &f);
439 	}
440 
441 	return -EINVAL;
442 }
443 
444 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
445 		unsigned long rate, unsigned long parent_rate, u8 index)
446 {
447 	/* Parent index is set statically in frequency table */
448 	return clk_edp_pixel_set_rate(hw, rate, parent_rate);
449 }
450 
451 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
452 					struct clk_rate_request *req)
453 {
454 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
455 	const struct freq_tbl *f = rcg->freq_tbl;
456 	const struct frac_entry *frac;
457 	int delta = 100000;
458 	s64 request;
459 	u32 mask = BIT(rcg->hid_width) - 1;
460 	u32 hid_div;
461 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
462 
463 	/* Force the correct parent */
464 	req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
465 	req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
466 
467 	if (req->best_parent_rate == 810000000)
468 		frac = frac_table_810m;
469 	else
470 		frac = frac_table_675m;
471 
472 	for (; frac->num; frac++) {
473 		request = req->rate;
474 		request *= frac->den;
475 		request = div_s64(request, frac->num);
476 		if ((req->best_parent_rate < (request - delta)) ||
477 		    (req->best_parent_rate > (request + delta)))
478 			continue;
479 
480 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
481 				&hid_div);
482 		hid_div >>= CFG_SRC_DIV_SHIFT;
483 		hid_div &= mask;
484 
485 		req->rate = calc_rate(req->best_parent_rate,
486 				      frac->num, frac->den,
487 				      !!frac->den, hid_div);
488 		return 0;
489 	}
490 
491 	return -EINVAL;
492 }
493 
494 const struct clk_ops clk_edp_pixel_ops = {
495 	.is_enabled = clk_rcg2_is_enabled,
496 	.get_parent = clk_rcg2_get_parent,
497 	.set_parent = clk_rcg2_set_parent,
498 	.recalc_rate = clk_rcg2_recalc_rate,
499 	.set_rate = clk_edp_pixel_set_rate,
500 	.set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
501 	.determine_rate = clk_edp_pixel_determine_rate,
502 };
503 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
504 
505 static int clk_byte_determine_rate(struct clk_hw *hw,
506 				   struct clk_rate_request *req)
507 {
508 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
509 	const struct freq_tbl *f = rcg->freq_tbl;
510 	int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
511 	unsigned long parent_rate, div;
512 	u32 mask = BIT(rcg->hid_width) - 1;
513 	struct clk_hw *p;
514 
515 	if (req->rate == 0)
516 		return -EINVAL;
517 
518 	req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
519 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
520 
521 	div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
522 	div = min_t(u32, div, mask);
523 
524 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
525 
526 	return 0;
527 }
528 
529 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
530 			 unsigned long parent_rate)
531 {
532 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
533 	struct freq_tbl f = *rcg->freq_tbl;
534 	unsigned long div;
535 	u32 mask = BIT(rcg->hid_width) - 1;
536 
537 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
538 	div = min_t(u32, div, mask);
539 
540 	f.pre_div = div;
541 
542 	return clk_rcg2_configure(rcg, &f);
543 }
544 
545 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
546 		unsigned long rate, unsigned long parent_rate, u8 index)
547 {
548 	/* Parent index is set statically in frequency table */
549 	return clk_byte_set_rate(hw, rate, parent_rate);
550 }
551 
552 const struct clk_ops clk_byte_ops = {
553 	.is_enabled = clk_rcg2_is_enabled,
554 	.get_parent = clk_rcg2_get_parent,
555 	.set_parent = clk_rcg2_set_parent,
556 	.recalc_rate = clk_rcg2_recalc_rate,
557 	.set_rate = clk_byte_set_rate,
558 	.set_rate_and_parent = clk_byte_set_rate_and_parent,
559 	.determine_rate = clk_byte_determine_rate,
560 };
561 EXPORT_SYMBOL_GPL(clk_byte_ops);
562 
563 static int clk_byte2_determine_rate(struct clk_hw *hw,
564 				    struct clk_rate_request *req)
565 {
566 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
567 	unsigned long parent_rate, div;
568 	u32 mask = BIT(rcg->hid_width) - 1;
569 	struct clk_hw *p;
570 	unsigned long rate = req->rate;
571 
572 	if (rate == 0)
573 		return -EINVAL;
574 
575 	p = req->best_parent_hw;
576 	req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
577 
578 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
579 	div = min_t(u32, div, mask);
580 
581 	req->rate = calc_rate(parent_rate, 0, 0, 0, div);
582 
583 	return 0;
584 }
585 
586 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
587 			 unsigned long parent_rate)
588 {
589 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
590 	struct freq_tbl f = { 0 };
591 	unsigned long div;
592 	int i, num_parents = clk_hw_get_num_parents(hw);
593 	u32 mask = BIT(rcg->hid_width) - 1;
594 	u32 cfg;
595 
596 	div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
597 	div = min_t(u32, div, mask);
598 
599 	f.pre_div = div;
600 
601 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
602 	cfg &= CFG_SRC_SEL_MASK;
603 	cfg >>= CFG_SRC_SEL_SHIFT;
604 
605 	for (i = 0; i < num_parents; i++) {
606 		if (cfg == rcg->parent_map[i].cfg) {
607 			f.src = rcg->parent_map[i].src;
608 			return clk_rcg2_configure(rcg, &f);
609 		}
610 	}
611 
612 	return -EINVAL;
613 }
614 
615 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
616 		unsigned long rate, unsigned long parent_rate, u8 index)
617 {
618 	/* Read the hardware to determine parent during set_rate */
619 	return clk_byte2_set_rate(hw, rate, parent_rate);
620 }
621 
622 const struct clk_ops clk_byte2_ops = {
623 	.is_enabled = clk_rcg2_is_enabled,
624 	.get_parent = clk_rcg2_get_parent,
625 	.set_parent = clk_rcg2_set_parent,
626 	.recalc_rate = clk_rcg2_recalc_rate,
627 	.set_rate = clk_byte2_set_rate,
628 	.set_rate_and_parent = clk_byte2_set_rate_and_parent,
629 	.determine_rate = clk_byte2_determine_rate,
630 };
631 EXPORT_SYMBOL_GPL(clk_byte2_ops);
632 
633 static const struct frac_entry frac_table_pixel[] = {
634 	{ 3, 8 },
635 	{ 2, 9 },
636 	{ 4, 9 },
637 	{ 1, 1 },
638 	{ }
639 };
640 
641 static int clk_pixel_determine_rate(struct clk_hw *hw,
642 				    struct clk_rate_request *req)
643 {
644 	unsigned long request, src_rate;
645 	int delta = 100000;
646 	const struct frac_entry *frac = frac_table_pixel;
647 
648 	for (; frac->num; frac++) {
649 		request = (req->rate * frac->den) / frac->num;
650 
651 		src_rate = clk_hw_round_rate(req->best_parent_hw, request);
652 		if ((src_rate < (request - delta)) ||
653 			(src_rate > (request + delta)))
654 			continue;
655 
656 		req->best_parent_rate = src_rate;
657 		req->rate = (src_rate * frac->num) / frac->den;
658 		return 0;
659 	}
660 
661 	return -EINVAL;
662 }
663 
664 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
665 		unsigned long parent_rate)
666 {
667 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
668 	struct freq_tbl f = { 0 };
669 	const struct frac_entry *frac = frac_table_pixel;
670 	unsigned long request;
671 	int delta = 100000;
672 	u32 mask = BIT(rcg->hid_width) - 1;
673 	u32 hid_div, cfg;
674 	int i, num_parents = clk_hw_get_num_parents(hw);
675 
676 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
677 	cfg &= CFG_SRC_SEL_MASK;
678 	cfg >>= CFG_SRC_SEL_SHIFT;
679 
680 	for (i = 0; i < num_parents; i++)
681 		if (cfg == rcg->parent_map[i].cfg) {
682 			f.src = rcg->parent_map[i].src;
683 			break;
684 		}
685 
686 	for (; frac->num; frac++) {
687 		request = (rate * frac->den) / frac->num;
688 
689 		if ((parent_rate < (request - delta)) ||
690 			(parent_rate > (request + delta)))
691 			continue;
692 
693 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
694 				&hid_div);
695 		f.pre_div = hid_div;
696 		f.pre_div >>= CFG_SRC_DIV_SHIFT;
697 		f.pre_div &= mask;
698 		f.m = frac->num;
699 		f.n = frac->den;
700 
701 		return clk_rcg2_configure(rcg, &f);
702 	}
703 	return -EINVAL;
704 }
705 
706 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
707 		unsigned long parent_rate, u8 index)
708 {
709 	return clk_pixel_set_rate(hw, rate, parent_rate);
710 }
711 
712 const struct clk_ops clk_pixel_ops = {
713 	.is_enabled = clk_rcg2_is_enabled,
714 	.get_parent = clk_rcg2_get_parent,
715 	.set_parent = clk_rcg2_set_parent,
716 	.recalc_rate = clk_rcg2_recalc_rate,
717 	.set_rate = clk_pixel_set_rate,
718 	.set_rate_and_parent = clk_pixel_set_rate_and_parent,
719 	.determine_rate = clk_pixel_determine_rate,
720 };
721 EXPORT_SYMBOL_GPL(clk_pixel_ops);
722 
723 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
724 				    struct clk_rate_request *req)
725 {
726 	struct clk_rate_request parent_req = { };
727 	struct clk_hw *p2, *p8, *p9, *xo;
728 	unsigned long p9_rate;
729 	int ret;
730 
731 	xo = clk_hw_get_parent_by_index(hw, 0);
732 	if (req->rate == clk_hw_get_rate(xo)) {
733 		req->best_parent_hw = xo;
734 		return 0;
735 	}
736 
737 	p9 = clk_hw_get_parent_by_index(hw, 2);
738 	p2 = clk_hw_get_parent_by_index(hw, 3);
739 	p8 = clk_hw_get_parent_by_index(hw, 4);
740 
741 	/* PLL9 is a fixed rate PLL */
742 	p9_rate = clk_hw_get_rate(p9);
743 
744 	parent_req.rate = req->rate = min(req->rate, p9_rate);
745 	if (req->rate == p9_rate) {
746 		req->rate = req->best_parent_rate = p9_rate;
747 		req->best_parent_hw = p9;
748 		return 0;
749 	}
750 
751 	if (req->best_parent_hw == p9) {
752 		/* Are we going back to a previously used rate? */
753 		if (clk_hw_get_rate(p8) == req->rate)
754 			req->best_parent_hw = p8;
755 		else
756 			req->best_parent_hw = p2;
757 	} else if (req->best_parent_hw == p8) {
758 		req->best_parent_hw = p2;
759 	} else {
760 		req->best_parent_hw = p8;
761 	}
762 
763 	ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
764 	if (ret)
765 		return ret;
766 
767 	req->rate = req->best_parent_rate = parent_req.rate;
768 
769 	return 0;
770 }
771 
772 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
773 		unsigned long parent_rate, u8 index)
774 {
775 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
776 	u32 cfg;
777 	int ret;
778 
779 	/* Just mux it, we don't use the division or m/n hardware */
780 	cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
781 	ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
782 	if (ret)
783 		return ret;
784 
785 	return update_config(rcg);
786 }
787 
788 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
789 			      unsigned long parent_rate)
790 {
791 	/*
792 	 * We should never get here; clk_gfx3d_determine_rate() should always
793 	 * make us use a different parent than what we're currently using, so
794 	 * clk_gfx3d_set_rate_and_parent() should always be called.
795 	 */
796 	return 0;
797 }
798 
799 const struct clk_ops clk_gfx3d_ops = {
800 	.is_enabled = clk_rcg2_is_enabled,
801 	.get_parent = clk_rcg2_get_parent,
802 	.set_parent = clk_rcg2_set_parent,
803 	.recalc_rate = clk_rcg2_recalc_rate,
804 	.set_rate = clk_gfx3d_set_rate,
805 	.set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
806 	.determine_rate = clk_gfx3d_determine_rate,
807 };
808 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
809 
810 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
811 {
812 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
813 	const char *name = clk_hw_get_name(hw);
814 	int ret, count;
815 
816 	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
817 				 CMD_ROOT_EN, CMD_ROOT_EN);
818 	if (ret)
819 		return ret;
820 
821 	/* wait for RCG to turn ON */
822 	for (count = 500; count > 0; count--) {
823 		if (clk_rcg2_is_enabled(hw))
824 			return 0;
825 
826 		udelay(1);
827 	}
828 
829 	pr_err("%s: RCG did not turn on\n", name);
830 	return -ETIMEDOUT;
831 }
832 
833 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
834 {
835 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
836 
837 	return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
838 					CMD_ROOT_EN, 0);
839 }
840 
841 static int
842 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
843 {
844 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
845 	int ret;
846 
847 	ret = clk_rcg2_set_force_enable(hw);
848 	if (ret)
849 		return ret;
850 
851 	ret = clk_rcg2_configure(rcg, f);
852 	if (ret)
853 		return ret;
854 
855 	return clk_rcg2_clear_force_enable(hw);
856 }
857 
858 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
859 				    unsigned long parent_rate)
860 {
861 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
862 	const struct freq_tbl *f;
863 
864 	f = qcom_find_freq(rcg->freq_tbl, rate);
865 	if (!f)
866 		return -EINVAL;
867 
868 	/*
869 	 * In case clock is disabled, update the CFG, M, N and D registers
870 	 * and don't hit the update bit of CMD register.
871 	 */
872 	if (!__clk_is_enabled(hw->clk))
873 		return __clk_rcg2_configure(rcg, f);
874 
875 	return clk_rcg2_shared_force_enable_clear(hw, f);
876 }
877 
878 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
879 		unsigned long rate, unsigned long parent_rate, u8 index)
880 {
881 	return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
882 }
883 
884 static int clk_rcg2_shared_enable(struct clk_hw *hw)
885 {
886 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
887 	int ret;
888 
889 	/*
890 	 * Set the update bit because required configuration has already
891 	 * been written in clk_rcg2_shared_set_rate()
892 	 */
893 	ret = clk_rcg2_set_force_enable(hw);
894 	if (ret)
895 		return ret;
896 
897 	ret = update_config(rcg);
898 	if (ret)
899 		return ret;
900 
901 	return clk_rcg2_clear_force_enable(hw);
902 }
903 
904 static void clk_rcg2_shared_disable(struct clk_hw *hw)
905 {
906 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
907 	u32 cfg;
908 
909 	/*
910 	 * Store current configuration as switching to safe source would clear
911 	 * the SRC and DIV of CFG register
912 	 */
913 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
914 
915 	/*
916 	 * Park the RCG at a safe configuration - sourced off of safe source.
917 	 * Force enable and disable the RCG while configuring it to safeguard
918 	 * against any update signal coming from the downstream clock.
919 	 * The current parent is still prepared and enabled at this point, and
920 	 * the safe source is always on while application processor subsystem
921 	 * is online. Therefore, the RCG can safely switch its parent.
922 	 */
923 	clk_rcg2_set_force_enable(hw);
924 
925 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
926 		     rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
927 
928 	update_config(rcg);
929 
930 	clk_rcg2_clear_force_enable(hw);
931 
932 	/* Write back the stored configuration corresponding to current rate */
933 	regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
934 }
935 
936 const struct clk_ops clk_rcg2_shared_ops = {
937 	.enable = clk_rcg2_shared_enable,
938 	.disable = clk_rcg2_shared_disable,
939 	.get_parent = clk_rcg2_get_parent,
940 	.set_parent = clk_rcg2_set_parent,
941 	.recalc_rate = clk_rcg2_recalc_rate,
942 	.determine_rate = clk_rcg2_determine_rate,
943 	.set_rate = clk_rcg2_shared_set_rate,
944 	.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
945 };
946 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
947 
948 /* Common APIs to be used for DFS based RCGR */
949 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
950 				       struct freq_tbl *f)
951 {
952 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
953 	struct clk_hw *p;
954 	unsigned long prate = 0;
955 	u32 val, mask, cfg, mode;
956 	int i, num_parents;
957 
958 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
959 
960 	mask = BIT(rcg->hid_width) - 1;
961 	f->pre_div = 1;
962 	if (cfg & mask)
963 		f->pre_div = cfg & mask;
964 
965 	cfg &= CFG_SRC_SEL_MASK;
966 	cfg >>= CFG_SRC_SEL_SHIFT;
967 
968 	num_parents = clk_hw_get_num_parents(hw);
969 	for (i = 0; i < num_parents; i++) {
970 		if (cfg == rcg->parent_map[i].cfg) {
971 			f->src = rcg->parent_map[i].src;
972 			p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
973 			prate = clk_hw_get_rate(p);
974 		}
975 	}
976 
977 	mode = cfg & CFG_MODE_MASK;
978 	mode >>= CFG_MODE_SHIFT;
979 	if (mode) {
980 		mask = BIT(rcg->mnd_width) - 1;
981 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
982 			    &val);
983 		val &= mask;
984 		f->m = val;
985 
986 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
987 			    &val);
988 		val = ~val;
989 		val &= mask;
990 		val += f->m;
991 		f->n = val;
992 	}
993 
994 	f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
995 }
996 
997 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
998 {
999 	struct freq_tbl *freq_tbl;
1000 	int i;
1001 
1002 	/* Allocate space for 1 extra since table is NULL terminated */
1003 	freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1004 	if (!freq_tbl)
1005 		return -ENOMEM;
1006 	rcg->freq_tbl = freq_tbl;
1007 
1008 	for (i = 0; i < MAX_PERF_LEVEL; i++)
1009 		clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1010 
1011 	return 0;
1012 }
1013 
1014 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1015 				   struct clk_rate_request *req)
1016 {
1017 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1018 	int ret;
1019 
1020 	if (!rcg->freq_tbl) {
1021 		ret = clk_rcg2_dfs_populate_freq_table(rcg);
1022 		if (ret) {
1023 			pr_err("Failed to update DFS tables for %s\n",
1024 					clk_hw_get_name(hw));
1025 			return ret;
1026 		}
1027 	}
1028 
1029 	return clk_rcg2_determine_rate(hw, req);
1030 }
1031 
1032 static unsigned long
1033 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1034 {
1035 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1036 	u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1037 
1038 	regmap_read(rcg->clkr.regmap,
1039 		    rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1040 	level &= GENMASK(4, 1);
1041 	level >>= 1;
1042 
1043 	if (rcg->freq_tbl)
1044 		return rcg->freq_tbl[level].freq;
1045 
1046 	/*
1047 	 * Assume that parent_rate is actually the parent because
1048 	 * we can't do any better at figuring it out when the table
1049 	 * hasn't been populated yet. We only populate the table
1050 	 * in determine_rate because we can't guarantee the parents
1051 	 * will be registered with the framework until then.
1052 	 */
1053 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1054 		    &cfg);
1055 
1056 	mask = BIT(rcg->hid_width) - 1;
1057 	pre_div = 1;
1058 	if (cfg & mask)
1059 		pre_div = cfg & mask;
1060 
1061 	mode = cfg & CFG_MODE_MASK;
1062 	mode >>= CFG_MODE_SHIFT;
1063 	if (mode) {
1064 		mask = BIT(rcg->mnd_width) - 1;
1065 		regmap_read(rcg->clkr.regmap,
1066 			    rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1067 		m &= mask;
1068 
1069 		regmap_read(rcg->clkr.regmap,
1070 			    rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1071 		n = ~n;
1072 		n &= mask;
1073 		n += m;
1074 	}
1075 
1076 	return calc_rate(parent_rate, m, n, mode, pre_div);
1077 }
1078 
1079 static const struct clk_ops clk_rcg2_dfs_ops = {
1080 	.is_enabled = clk_rcg2_is_enabled,
1081 	.get_parent = clk_rcg2_get_parent,
1082 	.determine_rate = clk_rcg2_dfs_determine_rate,
1083 	.recalc_rate = clk_rcg2_dfs_recalc_rate,
1084 };
1085 
1086 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1087 			       struct regmap *regmap)
1088 {
1089 	struct clk_rcg2 *rcg = data->rcg;
1090 	struct clk_init_data *init = data->init;
1091 	u32 val;
1092 	int ret;
1093 
1094 	ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1095 	if (ret)
1096 		return -EINVAL;
1097 
1098 	if (!(val & SE_CMD_DFS_EN))
1099 		return 0;
1100 
1101 	/*
1102 	 * Rate changes with consumer writing a register in
1103 	 * their own I/O region
1104 	 */
1105 	init->flags |= CLK_GET_RATE_NOCACHE;
1106 	init->ops = &clk_rcg2_dfs_ops;
1107 
1108 	rcg->freq_tbl = NULL;
1109 
1110 	return 0;
1111 }
1112 
1113 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1114 			     const struct clk_rcg_dfs_data *rcgs, size_t len)
1115 {
1116 	int i, ret;
1117 
1118 	for (i = 0; i < len; i++) {
1119 		ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1120 		if (ret)
1121 			return ret;
1122 	}
1123 
1124 	return 0;
1125 }
1126 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1127