xref: /openbmc/linux/drivers/clk/sunxi-ng/ccu_nm.c (revision 4419617e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Maxime Ripard
4  * Maxime Ripard <maxime.ripard@free-electrons.com>
5  */
6 
7 #include <linux/clk-provider.h>
8 #include <linux/io.h>
9 
10 #include "ccu_frac.h"
11 #include "ccu_gate.h"
12 #include "ccu_nm.h"
13 
14 struct _ccu_nm {
15 	unsigned long	n, min_n, max_n;
16 	unsigned long	m, min_m, max_m;
17 };
18 
19 static unsigned long ccu_nm_calc_rate(unsigned long parent,
20 				      unsigned long n, unsigned long m)
21 {
22 	u64 rate = parent;
23 
24 	rate *= n;
25 	do_div(rate, m);
26 
27 	return rate;
28 }
29 
30 static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
31 			     struct _ccu_nm *nm)
32 {
33 	unsigned long best_rate = 0;
34 	unsigned long best_n = 0, best_m = 0;
35 	unsigned long _n, _m;
36 
37 	for (_n = nm->min_n; _n <= nm->max_n; _n++) {
38 		for (_m = nm->min_m; _m <= nm->max_m; _m++) {
39 			unsigned long tmp_rate = ccu_nm_calc_rate(parent,
40 								  _n, _m);
41 
42 			if (tmp_rate > rate)
43 				continue;
44 
45 			if ((rate - tmp_rate) < (rate - best_rate)) {
46 				best_rate = tmp_rate;
47 				best_n = _n;
48 				best_m = _m;
49 			}
50 		}
51 	}
52 
53 	nm->n = best_n;
54 	nm->m = best_m;
55 }
56 
57 static void ccu_nm_disable(struct clk_hw *hw)
58 {
59 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
60 
61 	return ccu_gate_helper_disable(&nm->common, nm->enable);
62 }
63 
64 static int ccu_nm_enable(struct clk_hw *hw)
65 {
66 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
67 
68 	return ccu_gate_helper_enable(&nm->common, nm->enable);
69 }
70 
71 static int ccu_nm_is_enabled(struct clk_hw *hw)
72 {
73 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
74 
75 	return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
76 }
77 
78 static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
79 					unsigned long parent_rate)
80 {
81 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
82 	unsigned long rate;
83 	unsigned long n, m;
84 	u32 reg;
85 
86 	if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
87 		rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
88 
89 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
90 			rate /= nm->fixed_post_div;
91 
92 		return rate;
93 	}
94 
95 	reg = readl(nm->common.base + nm->common.reg);
96 
97 	n = reg >> nm->n.shift;
98 	n &= (1 << nm->n.width) - 1;
99 	n += nm->n.offset;
100 	if (!n)
101 		n++;
102 
103 	m = reg >> nm->m.shift;
104 	m &= (1 << nm->m.width) - 1;
105 	m += nm->m.offset;
106 	if (!m)
107 		m++;
108 
109 	if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
110 		rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
111 	else
112 		rate = ccu_nm_calc_rate(parent_rate, n, m);
113 
114 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
115 		rate /= nm->fixed_post_div;
116 
117 	return rate;
118 }
119 
120 static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
121 			      unsigned long *parent_rate)
122 {
123 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
124 	struct _ccu_nm _nm;
125 
126 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
127 		rate *= nm->fixed_post_div;
128 
129 	if (rate < nm->min_rate) {
130 		rate = nm->min_rate;
131 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
132 			rate /= nm->fixed_post_div;
133 		return rate;
134 	}
135 
136 	if (nm->max_rate && rate > nm->max_rate) {
137 		rate = nm->max_rate;
138 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
139 			rate /= nm->fixed_post_div;
140 		return rate;
141 	}
142 
143 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
144 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
145 			rate /= nm->fixed_post_div;
146 		return rate;
147 	}
148 
149 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
150 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
151 			rate /= nm->fixed_post_div;
152 		return rate;
153 	}
154 
155 	_nm.min_n = nm->n.min ?: 1;
156 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
157 	_nm.min_m = 1;
158 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
159 
160 	ccu_nm_find_best(*parent_rate, rate, &_nm);
161 	rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
162 
163 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
164 		rate /= nm->fixed_post_div;
165 
166 	return rate;
167 }
168 
169 static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
170 			   unsigned long parent_rate)
171 {
172 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
173 	struct _ccu_nm _nm;
174 	unsigned long flags;
175 	u32 reg;
176 
177 	/* Adjust target rate according to post-dividers */
178 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
179 		rate = rate * nm->fixed_post_div;
180 
181 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
182 		spin_lock_irqsave(nm->common.lock, flags);
183 
184 		/* most SoCs require M to be 0 if fractional mode is used */
185 		reg = readl(nm->common.base + nm->common.reg);
186 		reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
187 		writel(reg, nm->common.base + nm->common.reg);
188 
189 		spin_unlock_irqrestore(nm->common.lock, flags);
190 
191 		ccu_frac_helper_enable(&nm->common, &nm->frac);
192 
193 		return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
194 						rate, nm->lock);
195 	} else {
196 		ccu_frac_helper_disable(&nm->common, &nm->frac);
197 	}
198 
199 	_nm.min_n = nm->n.min ?: 1;
200 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
201 	_nm.min_m = 1;
202 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
203 
204 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
205 		ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
206 
207 		/* Sigma delta modulation requires specific N and M factors */
208 		ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
209 					   &_nm.m, &_nm.n);
210 	} else {
211 		ccu_sdm_helper_disable(&nm->common, &nm->sdm);
212 		ccu_nm_find_best(parent_rate, rate, &_nm);
213 	}
214 
215 	spin_lock_irqsave(nm->common.lock, flags);
216 
217 	reg = readl(nm->common.base + nm->common.reg);
218 	reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
219 	reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
220 
221 	reg |= (_nm.n - nm->n.offset) << nm->n.shift;
222 	reg |= (_nm.m - nm->m.offset) << nm->m.shift;
223 	writel(reg, nm->common.base + nm->common.reg);
224 
225 	spin_unlock_irqrestore(nm->common.lock, flags);
226 
227 	ccu_helper_wait_for_lock(&nm->common, nm->lock);
228 
229 	return 0;
230 }
231 
232 const struct clk_ops ccu_nm_ops = {
233 	.disable	= ccu_nm_disable,
234 	.enable		= ccu_nm_enable,
235 	.is_enabled	= ccu_nm_is_enabled,
236 
237 	.recalc_rate	= ccu_nm_recalc_rate,
238 	.round_rate	= ccu_nm_round_rate,
239 	.set_rate	= ccu_nm_set_rate,
240 };
241