xref: /openbmc/linux/drivers/clk/mvebu/clk-corediv.c (revision 62e59c4e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * MVEBU Core divider clock
4  *
5  * Copyright (C) 2013 Marvell
6  *
7  * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
8  *
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/clk-provider.h>
13 #include <linux/io.h>
14 #include <linux/of_address.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include "common.h"
18 
19 #define CORE_CLK_DIV_RATIO_MASK		0xff
20 
21 /*
22  * This structure describes the hardware details (bit offset and mask)
23  * to configure one particular core divider clock. Those hardware
24  * details may differ from one SoC to another. This structure is
25  * therefore typically instantiated statically to describe the
26  * hardware details.
27  */
28 struct clk_corediv_desc {
29 	unsigned int mask;
30 	unsigned int offset;
31 	unsigned int fieldbit;
32 };
33 
34 /*
35  * This structure describes the hardware details to configure the core
36  * divider clocks on a given SoC. Amongst others, it points to the
37  * array of core divider clock descriptors for this SoC, as well as
38  * the corresponding operations to manipulate them.
39  */
40 struct clk_corediv_soc_desc {
41 	const struct clk_corediv_desc *descs;
42 	unsigned int ndescs;
43 	const struct clk_ops ops;
44 	u32 ratio_reload;
45 	u32 enable_bit_offset;
46 	u32 ratio_offset;
47 };
48 
49 /*
50  * This structure represents one core divider clock for the clock
51  * framework, and is dynamically allocated for each core divider clock
52  * existing in the current SoC.
53  */
54 struct clk_corediv {
55 	struct clk_hw hw;
56 	void __iomem *reg;
57 	const struct clk_corediv_desc *desc;
58 	const struct clk_corediv_soc_desc *soc_desc;
59 	spinlock_t lock;
60 };
61 
62 static struct clk_onecell_data clk_data;
63 
64 /*
65  * Description of the core divider clocks available. For now, we
66  * support only NAND, and it is available at the same register
67  * locations regardless of the SoC.
68  */
69 static const struct clk_corediv_desc mvebu_corediv_desc[] = {
70 	{ .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
71 };
72 
73 static const struct clk_corediv_desc mv98dx3236_corediv_desc[] = {
74 	{ .mask = 0x0f, .offset = 6, .fieldbit = 27 }, /* NAND clock */
75 };
76 
77 #define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
78 
clk_corediv_is_enabled(struct clk_hw * hwclk)79 static int clk_corediv_is_enabled(struct clk_hw *hwclk)
80 {
81 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
82 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
83 	const struct clk_corediv_desc *desc = corediv->desc;
84 	u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset;
85 
86 	return !!(readl(corediv->reg) & enable_mask);
87 }
88 
clk_corediv_enable(struct clk_hw * hwclk)89 static int clk_corediv_enable(struct clk_hw *hwclk)
90 {
91 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
92 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
93 	const struct clk_corediv_desc *desc = corediv->desc;
94 	unsigned long flags = 0;
95 	u32 reg;
96 
97 	spin_lock_irqsave(&corediv->lock, flags);
98 
99 	reg = readl(corediv->reg);
100 	reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
101 	writel(reg, corediv->reg);
102 
103 	spin_unlock_irqrestore(&corediv->lock, flags);
104 
105 	return 0;
106 }
107 
clk_corediv_disable(struct clk_hw * hwclk)108 static void clk_corediv_disable(struct clk_hw *hwclk)
109 {
110 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
111 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
112 	const struct clk_corediv_desc *desc = corediv->desc;
113 	unsigned long flags = 0;
114 	u32 reg;
115 
116 	spin_lock_irqsave(&corediv->lock, flags);
117 
118 	reg = readl(corediv->reg);
119 	reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
120 	writel(reg, corediv->reg);
121 
122 	spin_unlock_irqrestore(&corediv->lock, flags);
123 }
124 
clk_corediv_recalc_rate(struct clk_hw * hwclk,unsigned long parent_rate)125 static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
126 					 unsigned long parent_rate)
127 {
128 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
129 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
130 	const struct clk_corediv_desc *desc = corediv->desc;
131 	u32 reg, div;
132 
133 	reg = readl(corediv->reg + soc_desc->ratio_offset);
134 	div = (reg >> desc->offset) & desc->mask;
135 	return parent_rate / div;
136 }
137 
clk_corediv_round_rate(struct clk_hw * hwclk,unsigned long rate,unsigned long * parent_rate)138 static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
139 			       unsigned long *parent_rate)
140 {
141 	/* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
142 	u32 div;
143 
144 	div = *parent_rate / rate;
145 	if (div < 4)
146 		div = 4;
147 	else if (div > 6)
148 		div = 8;
149 
150 	return *parent_rate / div;
151 }
152 
clk_corediv_set_rate(struct clk_hw * hwclk,unsigned long rate,unsigned long parent_rate)153 static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
154 			    unsigned long parent_rate)
155 {
156 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
157 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
158 	const struct clk_corediv_desc *desc = corediv->desc;
159 	unsigned long flags = 0;
160 	u32 reg, div;
161 
162 	div = parent_rate / rate;
163 
164 	spin_lock_irqsave(&corediv->lock, flags);
165 
166 	/* Write new divider to the divider ratio register */
167 	reg = readl(corediv->reg + soc_desc->ratio_offset);
168 	reg &= ~(desc->mask << desc->offset);
169 	reg |= (div & desc->mask) << desc->offset;
170 	writel(reg, corediv->reg + soc_desc->ratio_offset);
171 
172 	/* Set reload-force for this clock */
173 	reg = readl(corediv->reg) | BIT(desc->fieldbit);
174 	writel(reg, corediv->reg);
175 
176 	/* Now trigger the clock update */
177 	reg = readl(corediv->reg) | soc_desc->ratio_reload;
178 	writel(reg, corediv->reg);
179 
180 	/*
181 	 * Wait for clocks to settle down, and then clear all the
182 	 * ratios request and the reload request.
183 	 */
184 	udelay(1000);
185 	reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload);
186 	writel(reg, corediv->reg);
187 	udelay(1000);
188 
189 	spin_unlock_irqrestore(&corediv->lock, flags);
190 
191 	return 0;
192 }
193 
194 static const struct clk_corediv_soc_desc armada370_corediv_soc = {
195 	.descs = mvebu_corediv_desc,
196 	.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
197 	.ops = {
198 		.enable = clk_corediv_enable,
199 		.disable = clk_corediv_disable,
200 		.is_enabled = clk_corediv_is_enabled,
201 		.recalc_rate = clk_corediv_recalc_rate,
202 		.round_rate = clk_corediv_round_rate,
203 		.set_rate = clk_corediv_set_rate,
204 	},
205 	.ratio_reload = BIT(8),
206 	.enable_bit_offset = 24,
207 	.ratio_offset = 0x8,
208 };
209 
210 static const struct clk_corediv_soc_desc armada380_corediv_soc = {
211 	.descs = mvebu_corediv_desc,
212 	.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
213 	.ops = {
214 		.enable = clk_corediv_enable,
215 		.disable = clk_corediv_disable,
216 		.is_enabled = clk_corediv_is_enabled,
217 		.recalc_rate = clk_corediv_recalc_rate,
218 		.round_rate = clk_corediv_round_rate,
219 		.set_rate = clk_corediv_set_rate,
220 	},
221 	.ratio_reload = BIT(8),
222 	.enable_bit_offset = 16,
223 	.ratio_offset = 0x4,
224 };
225 
226 static const struct clk_corediv_soc_desc armada375_corediv_soc = {
227 	.descs = mvebu_corediv_desc,
228 	.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
229 	.ops = {
230 		.recalc_rate = clk_corediv_recalc_rate,
231 		.round_rate = clk_corediv_round_rate,
232 		.set_rate = clk_corediv_set_rate,
233 	},
234 	.ratio_reload = BIT(8),
235 	.ratio_offset = 0x4,
236 };
237 
238 static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = {
239 	.descs = mv98dx3236_corediv_desc,
240 	.ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc),
241 	.ops = {
242 		.recalc_rate = clk_corediv_recalc_rate,
243 		.round_rate = clk_corediv_round_rate,
244 		.set_rate = clk_corediv_set_rate,
245 	},
246 	.ratio_reload = BIT(10),
247 	.ratio_offset = 0x8,
248 };
249 
250 static void __init
mvebu_corediv_clk_init(struct device_node * node,const struct clk_corediv_soc_desc * soc_desc)251 mvebu_corediv_clk_init(struct device_node *node,
252 		       const struct clk_corediv_soc_desc *soc_desc)
253 {
254 	struct clk_init_data init;
255 	struct clk_corediv *corediv;
256 	struct clk **clks;
257 	void __iomem *base;
258 	const char *parent_name;
259 	const char *clk_name;
260 	int i;
261 
262 	base = of_iomap(node, 0);
263 	if (WARN_ON(!base))
264 		return;
265 
266 	parent_name = of_clk_get_parent_name(node, 0);
267 
268 	clk_data.clk_num = soc_desc->ndescs;
269 
270 	/* clks holds the clock array */
271 	clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
272 				GFP_KERNEL);
273 	if (WARN_ON(!clks))
274 		goto err_unmap;
275 	/* corediv holds the clock specific array */
276 	corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
277 				GFP_KERNEL);
278 	if (WARN_ON(!corediv))
279 		goto err_free_clks;
280 
281 	spin_lock_init(&corediv->lock);
282 
283 	for (i = 0; i < clk_data.clk_num; i++) {
284 		of_property_read_string_index(node, "clock-output-names",
285 					      i, &clk_name);
286 		init.num_parents = 1;
287 		init.parent_names = &parent_name;
288 		init.name = clk_name;
289 		init.ops = &soc_desc->ops;
290 		init.flags = 0;
291 
292 		corediv[i].soc_desc = soc_desc;
293 		corediv[i].desc = soc_desc->descs + i;
294 		corediv[i].reg = base;
295 		corediv[i].hw.init = &init;
296 
297 		clks[i] = clk_register(NULL, &corediv[i].hw);
298 		WARN_ON(IS_ERR(clks[i]));
299 	}
300 
301 	clk_data.clks = clks;
302 	of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
303 	return;
304 
305 err_free_clks:
306 	kfree(clks);
307 err_unmap:
308 	iounmap(base);
309 }
310 
armada370_corediv_clk_init(struct device_node * node)311 static void __init armada370_corediv_clk_init(struct device_node *node)
312 {
313 	return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
314 }
315 CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock",
316 	       armada370_corediv_clk_init);
317 
armada375_corediv_clk_init(struct device_node * node)318 static void __init armada375_corediv_clk_init(struct device_node *node)
319 {
320 	return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
321 }
322 CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock",
323 	       armada375_corediv_clk_init);
324 
armada380_corediv_clk_init(struct device_node * node)325 static void __init armada380_corediv_clk_init(struct device_node *node)
326 {
327 	return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
328 }
329 CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock",
330 	       armada380_corediv_clk_init);
331 
mv98dx3236_corediv_clk_init(struct device_node * node)332 static void __init mv98dx3236_corediv_clk_init(struct device_node *node)
333 {
334 	return mvebu_corediv_clk_init(node, &mv98dx3236_corediv_soc);
335 }
336 CLK_OF_DECLARE(mv98dx3236_corediv_clk, "marvell,mv98dx3236-corediv-clock",
337 	       mv98dx3236_corediv_clk_init);
338