xref: /openbmc/linux/drivers/clk/mvebu/clk-corediv.c (revision b34e08d5)
1 /*
2  * MVEBU Core divider clock
3  *
4  * Copyright (C) 2013 Marvell
5  *
6  * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
7  *
8  * This file is licensed under the terms of the GNU General Public
9  * License version 2.  This program is licensed "as is" without any
10  * warranty of any kind, whether express or implied.
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/clk-provider.h>
15 #include <linux/of_address.h>
16 #include <linux/slab.h>
17 #include <linux/delay.h>
18 #include "common.h"
19 
20 #define CORE_CLK_DIV_RATIO_MASK		0xff
21 
22 /*
23  * This structure describes the hardware details (bit offset and mask)
24  * to configure one particular core divider clock. Those hardware
25  * details may differ from one SoC to another. This structure is
26  * therefore typically instantiated statically to describe the
27  * hardware details.
28  */
29 struct clk_corediv_desc {
30 	unsigned int mask;
31 	unsigned int offset;
32 	unsigned int fieldbit;
33 };
34 
35 /*
36  * This structure describes the hardware details to configure the core
37  * divider clocks on a given SoC. Amongst others, it points to the
38  * array of core divider clock descriptors for this SoC, as well as
39  * the corresponding operations to manipulate them.
40  */
41 struct clk_corediv_soc_desc {
42 	const struct clk_corediv_desc *descs;
43 	unsigned int ndescs;
44 	const struct clk_ops ops;
45 	u32 ratio_reload;
46 	u32 enable_bit_offset;
47 	u32 ratio_offset;
48 };
49 
50 /*
51  * This structure represents one core divider clock for the clock
52  * framework, and is dynamically allocated for each core divider clock
53  * existing in the current SoC.
54  */
55 struct clk_corediv {
56 	struct clk_hw hw;
57 	void __iomem *reg;
58 	const struct clk_corediv_desc *desc;
59 	const struct clk_corediv_soc_desc *soc_desc;
60 	spinlock_t lock;
61 };
62 
63 static struct clk_onecell_data clk_data;
64 
65 /*
66  * Description of the core divider clocks available. For now, we
67  * support only NAND, and it is available at the same register
68  * locations regardless of the SoC.
69  */
70 static const struct clk_corediv_desc mvebu_corediv_desc[] = {
71 	{ .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
72 };
73 
74 #define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
75 
76 static int clk_corediv_is_enabled(struct clk_hw *hwclk)
77 {
78 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
79 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
80 	const struct clk_corediv_desc *desc = corediv->desc;
81 	u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset;
82 
83 	return !!(readl(corediv->reg) & enable_mask);
84 }
85 
86 static int clk_corediv_enable(struct clk_hw *hwclk)
87 {
88 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
89 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
90 	const struct clk_corediv_desc *desc = corediv->desc;
91 	unsigned long flags = 0;
92 	u32 reg;
93 
94 	spin_lock_irqsave(&corediv->lock, flags);
95 
96 	reg = readl(corediv->reg);
97 	reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
98 	writel(reg, corediv->reg);
99 
100 	spin_unlock_irqrestore(&corediv->lock, flags);
101 
102 	return 0;
103 }
104 
105 static void clk_corediv_disable(struct clk_hw *hwclk)
106 {
107 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
108 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
109 	const struct clk_corediv_desc *desc = corediv->desc;
110 	unsigned long flags = 0;
111 	u32 reg;
112 
113 	spin_lock_irqsave(&corediv->lock, flags);
114 
115 	reg = readl(corediv->reg);
116 	reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset);
117 	writel(reg, corediv->reg);
118 
119 	spin_unlock_irqrestore(&corediv->lock, flags);
120 }
121 
122 static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
123 					 unsigned long parent_rate)
124 {
125 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
126 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
127 	const struct clk_corediv_desc *desc = corediv->desc;
128 	u32 reg, div;
129 
130 	reg = readl(corediv->reg + soc_desc->ratio_offset);
131 	div = (reg >> desc->offset) & desc->mask;
132 	return parent_rate / div;
133 }
134 
135 static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
136 			       unsigned long *parent_rate)
137 {
138 	/* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
139 	u32 div;
140 
141 	div = *parent_rate / rate;
142 	if (div < 4)
143 		div = 4;
144 	else if (div > 6)
145 		div = 8;
146 
147 	return *parent_rate / div;
148 }
149 
150 static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
151 			    unsigned long parent_rate)
152 {
153 	struct clk_corediv *corediv = to_corediv_clk(hwclk);
154 	const struct clk_corediv_soc_desc *soc_desc = corediv->soc_desc;
155 	const struct clk_corediv_desc *desc = corediv->desc;
156 	unsigned long flags = 0;
157 	u32 reg, div;
158 
159 	div = parent_rate / rate;
160 
161 	spin_lock_irqsave(&corediv->lock, flags);
162 
163 	/* Write new divider to the divider ratio register */
164 	reg = readl(corediv->reg + soc_desc->ratio_offset);
165 	reg &= ~(desc->mask << desc->offset);
166 	reg |= (div & desc->mask) << desc->offset;
167 	writel(reg, corediv->reg + soc_desc->ratio_offset);
168 
169 	/* Set reload-force for this clock */
170 	reg = readl(corediv->reg) | BIT(desc->fieldbit);
171 	writel(reg, corediv->reg);
172 
173 	/* Now trigger the clock update */
174 	reg = readl(corediv->reg) | soc_desc->ratio_reload;
175 	writel(reg, corediv->reg);
176 
177 	/*
178 	 * Wait for clocks to settle down, and then clear all the
179 	 * ratios request and the reload request.
180 	 */
181 	udelay(1000);
182 	reg &= ~(CORE_CLK_DIV_RATIO_MASK | soc_desc->ratio_reload);
183 	writel(reg, corediv->reg);
184 	udelay(1000);
185 
186 	spin_unlock_irqrestore(&corediv->lock, flags);
187 
188 	return 0;
189 }
190 
191 static const struct clk_corediv_soc_desc armada370_corediv_soc = {
192 	.descs = mvebu_corediv_desc,
193 	.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
194 	.ops = {
195 		.enable = clk_corediv_enable,
196 		.disable = clk_corediv_disable,
197 		.is_enabled = clk_corediv_is_enabled,
198 		.recalc_rate = clk_corediv_recalc_rate,
199 		.round_rate = clk_corediv_round_rate,
200 		.set_rate = clk_corediv_set_rate,
201 	},
202 	.ratio_reload = BIT(8),
203 	.enable_bit_offset = 24,
204 	.ratio_offset = 0x8,
205 };
206 
207 static const struct clk_corediv_soc_desc armada380_corediv_soc = {
208 	.descs = mvebu_corediv_desc,
209 	.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
210 	.ops = {
211 		.enable = clk_corediv_enable,
212 		.disable = clk_corediv_disable,
213 		.is_enabled = clk_corediv_is_enabled,
214 		.recalc_rate = clk_corediv_recalc_rate,
215 		.round_rate = clk_corediv_round_rate,
216 		.set_rate = clk_corediv_set_rate,
217 	},
218 	.ratio_reload = BIT(8),
219 	.enable_bit_offset = 16,
220 	.ratio_offset = 0x4,
221 };
222 
223 static const struct clk_corediv_soc_desc armada375_corediv_soc = {
224 	.descs = mvebu_corediv_desc,
225 	.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
226 	.ops = {
227 		.recalc_rate = clk_corediv_recalc_rate,
228 		.round_rate = clk_corediv_round_rate,
229 		.set_rate = clk_corediv_set_rate,
230 	},
231 	.ratio_reload = BIT(8),
232 	.ratio_offset = 0x4,
233 };
234 
235 static void __init
236 mvebu_corediv_clk_init(struct device_node *node,
237 		       const struct clk_corediv_soc_desc *soc_desc)
238 {
239 	struct clk_init_data init;
240 	struct clk_corediv *corediv;
241 	struct clk **clks;
242 	void __iomem *base;
243 	const char *parent_name;
244 	const char *clk_name;
245 	int i;
246 
247 	base = of_iomap(node, 0);
248 	if (WARN_ON(!base))
249 		return;
250 
251 	parent_name = of_clk_get_parent_name(node, 0);
252 
253 	clk_data.clk_num = soc_desc->ndescs;
254 
255 	/* clks holds the clock array */
256 	clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
257 				GFP_KERNEL);
258 	if (WARN_ON(!clks))
259 		goto err_unmap;
260 	/* corediv holds the clock specific array */
261 	corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
262 				GFP_KERNEL);
263 	if (WARN_ON(!corediv))
264 		goto err_free_clks;
265 
266 	spin_lock_init(&corediv->lock);
267 
268 	for (i = 0; i < clk_data.clk_num; i++) {
269 		of_property_read_string_index(node, "clock-output-names",
270 					      i, &clk_name);
271 		init.num_parents = 1;
272 		init.parent_names = &parent_name;
273 		init.name = clk_name;
274 		init.ops = &soc_desc->ops;
275 		init.flags = 0;
276 
277 		corediv[i].soc_desc = soc_desc;
278 		corediv[i].desc = soc_desc->descs + i;
279 		corediv[i].reg = base;
280 		corediv[i].hw.init = &init;
281 
282 		clks[i] = clk_register(NULL, &corediv[i].hw);
283 		WARN_ON(IS_ERR(clks[i]));
284 	}
285 
286 	clk_data.clks = clks;
287 	of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
288 	return;
289 
290 err_free_clks:
291 	kfree(clks);
292 err_unmap:
293 	iounmap(base);
294 }
295 
296 static void __init armada370_corediv_clk_init(struct device_node *node)
297 {
298 	return mvebu_corediv_clk_init(node, &armada370_corediv_soc);
299 }
300 CLK_OF_DECLARE(armada370_corediv_clk, "marvell,armada-370-corediv-clock",
301 	       armada370_corediv_clk_init);
302 
303 static void __init armada375_corediv_clk_init(struct device_node *node)
304 {
305 	return mvebu_corediv_clk_init(node, &armada375_corediv_soc);
306 }
307 CLK_OF_DECLARE(armada375_corediv_clk, "marvell,armada-375-corediv-clock",
308 	       armada375_corediv_clk_init);
309 
310 static void __init armada380_corediv_clk_init(struct device_node *node)
311 {
312 	return mvebu_corediv_clk_init(node, &armada380_corediv_soc);
313 }
314 CLK_OF_DECLARE(armada380_corediv_clk, "marvell,armada-380-corediv-clock",
315 	       armada380_corediv_clk_init);
316