xref: /openbmc/linux/drivers/clk/sunxi-ng/ccu_nm.c (revision 6d99a79c)
1 /*
2  * Copyright (C) 2016 Maxime Ripard
3  * Maxime Ripard <maxime.ripard@free-electrons.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation; either version 2 of
8  * the License, or (at your option) any later version.
9  */
10 
11 #include <linux/clk-provider.h>
12 
13 #include "ccu_frac.h"
14 #include "ccu_gate.h"
15 #include "ccu_nm.h"
16 
17 struct _ccu_nm {
18 	unsigned long	n, min_n, max_n;
19 	unsigned long	m, min_m, max_m;
20 };
21 
22 static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
23 			     struct _ccu_nm *nm)
24 {
25 	unsigned long best_rate = 0;
26 	unsigned long best_n = 0, best_m = 0;
27 	unsigned long _n, _m;
28 
29 	for (_n = nm->min_n; _n <= nm->max_n; _n++) {
30 		for (_m = nm->min_m; _m <= nm->max_m; _m++) {
31 			unsigned long tmp_rate = parent * _n  / _m;
32 
33 			if (tmp_rate > rate)
34 				continue;
35 
36 			if ((rate - tmp_rate) < (rate - best_rate)) {
37 				best_rate = tmp_rate;
38 				best_n = _n;
39 				best_m = _m;
40 			}
41 		}
42 	}
43 
44 	nm->n = best_n;
45 	nm->m = best_m;
46 }
47 
48 static void ccu_nm_disable(struct clk_hw *hw)
49 {
50 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
51 
52 	return ccu_gate_helper_disable(&nm->common, nm->enable);
53 }
54 
55 static int ccu_nm_enable(struct clk_hw *hw)
56 {
57 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
58 
59 	return ccu_gate_helper_enable(&nm->common, nm->enable);
60 }
61 
62 static int ccu_nm_is_enabled(struct clk_hw *hw)
63 {
64 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
65 
66 	return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
67 }
68 
69 static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
70 					unsigned long parent_rate)
71 {
72 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
73 	unsigned long rate;
74 	unsigned long n, m;
75 	u32 reg;
76 
77 	if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
78 		rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
79 
80 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
81 			rate /= nm->fixed_post_div;
82 
83 		return rate;
84 	}
85 
86 	reg = readl(nm->common.base + nm->common.reg);
87 
88 	n = reg >> nm->n.shift;
89 	n &= (1 << nm->n.width) - 1;
90 	n += nm->n.offset;
91 	if (!n)
92 		n++;
93 
94 	m = reg >> nm->m.shift;
95 	m &= (1 << nm->m.width) - 1;
96 	m += nm->m.offset;
97 	if (!m)
98 		m++;
99 
100 	if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
101 		rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
102 	else
103 		rate = parent_rate * n / m;
104 
105 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
106 		rate /= nm->fixed_post_div;
107 
108 	return rate;
109 }
110 
111 static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
112 			      unsigned long *parent_rate)
113 {
114 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
115 	struct _ccu_nm _nm;
116 
117 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
118 		rate *= nm->fixed_post_div;
119 
120 	if (rate < nm->min_rate) {
121 		rate = nm->min_rate;
122 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
123 			rate /= nm->fixed_post_div;
124 		return rate;
125 	}
126 
127 	if (nm->max_rate && rate > nm->max_rate) {
128 		rate = nm->max_rate;
129 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
130 			rate /= nm->fixed_post_div;
131 		return rate;
132 	}
133 
134 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
135 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
136 			rate /= nm->fixed_post_div;
137 		return rate;
138 	}
139 
140 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
141 		if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
142 			rate /= nm->fixed_post_div;
143 		return rate;
144 	}
145 
146 	_nm.min_n = nm->n.min ?: 1;
147 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
148 	_nm.min_m = 1;
149 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
150 
151 	ccu_nm_find_best(*parent_rate, rate, &_nm);
152 	rate = *parent_rate * _nm.n / _nm.m;
153 
154 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
155 		rate /= nm->fixed_post_div;
156 
157 	return rate;
158 }
159 
160 static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
161 			   unsigned long parent_rate)
162 {
163 	struct ccu_nm *nm = hw_to_ccu_nm(hw);
164 	struct _ccu_nm _nm;
165 	unsigned long flags;
166 	u32 reg;
167 
168 	/* Adjust target rate according to post-dividers */
169 	if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
170 		rate = rate * nm->fixed_post_div;
171 
172 	if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
173 		spin_lock_irqsave(nm->common.lock, flags);
174 
175 		/* most SoCs require M to be 0 if fractional mode is used */
176 		reg = readl(nm->common.base + nm->common.reg);
177 		reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
178 		writel(reg, nm->common.base + nm->common.reg);
179 
180 		spin_unlock_irqrestore(nm->common.lock, flags);
181 
182 		ccu_frac_helper_enable(&nm->common, &nm->frac);
183 
184 		return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
185 						rate, nm->lock);
186 	} else {
187 		ccu_frac_helper_disable(&nm->common, &nm->frac);
188 	}
189 
190 	_nm.min_n = nm->n.min ?: 1;
191 	_nm.max_n = nm->n.max ?: 1 << nm->n.width;
192 	_nm.min_m = 1;
193 	_nm.max_m = nm->m.max ?: 1 << nm->m.width;
194 
195 	if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
196 		ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
197 
198 		/* Sigma delta modulation requires specific N and M factors */
199 		ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
200 					   &_nm.m, &_nm.n);
201 	} else {
202 		ccu_sdm_helper_disable(&nm->common, &nm->sdm);
203 		ccu_nm_find_best(parent_rate, rate, &_nm);
204 	}
205 
206 	spin_lock_irqsave(nm->common.lock, flags);
207 
208 	reg = readl(nm->common.base + nm->common.reg);
209 	reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
210 	reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
211 
212 	reg |= (_nm.n - nm->n.offset) << nm->n.shift;
213 	reg |= (_nm.m - nm->m.offset) << nm->m.shift;
214 	writel(reg, nm->common.base + nm->common.reg);
215 
216 	spin_unlock_irqrestore(nm->common.lock, flags);
217 
218 	ccu_helper_wait_for_lock(&nm->common, nm->lock);
219 
220 	return 0;
221 }
222 
223 const struct clk_ops ccu_nm_ops = {
224 	.disable	= ccu_nm_disable,
225 	.enable		= ccu_nm_enable,
226 	.is_enabled	= ccu_nm_is_enabled,
227 
228 	.recalc_rate	= ccu_nm_recalc_rate,
229 	.round_rate	= ccu_nm_round_rate,
230 	.set_rate	= ccu_nm_set_rate,
231 };
232