xref: /openbmc/linux/drivers/clk/mvebu/common.c (revision c10d12e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Marvell EBU SoC common clock handling
4  *
5  * Copyright (C) 2012 Marvell
6  *
7  * Gregory CLEMENT <gregory.clement@free-electrons.com>
8  * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
9  * Andrew Lunn <andrew@lunn.ch>
10  *
11  */
12 
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/io.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/syscore_ops.h>
21 
22 #include "common.h"
23 
24 /*
25  * Core Clocks
26  */
27 
28 #define SSCG_CONF_MODE(reg)	(((reg) >> 16) & 0x3)
29 #define SSCG_SPREAD_DOWN	0x0
30 #define SSCG_SPREAD_UP		0x1
31 #define SSCG_SPREAD_CENTRAL	0x2
32 #define SSCG_CONF_LOW(reg)	(((reg) >> 8) & 0xFF)
33 #define SSCG_CONF_HIGH(reg)	((reg) & 0xFF)
34 
35 static struct clk_onecell_data clk_data;
36 
37 /*
38  * This function can be used by the Kirkwood, the Armada 370, the
39  * Armada XP and the Armada 375 SoC. The name of the function was
40  * chosen following the dt convention: using the first known SoC
41  * compatible with it.
42  */
43 u32 kirkwood_fix_sscg_deviation(u32 system_clk)
44 {
45 	struct device_node *sscg_np = NULL;
46 	void __iomem *sscg_map;
47 	u32 sscg_reg;
48 	s32 low_bound, high_bound;
49 	u64 freq_swing_half;
50 
51 	sscg_np = of_find_node_by_name(NULL, "sscg");
52 	if (sscg_np == NULL) {
53 		pr_err("cannot get SSCG register node\n");
54 		return system_clk;
55 	}
56 
57 	sscg_map = of_iomap(sscg_np, 0);
58 	if (sscg_map == NULL) {
59 		pr_err("cannot map SSCG register\n");
60 		goto out;
61 	}
62 
63 	sscg_reg = readl(sscg_map);
64 	high_bound = SSCG_CONF_HIGH(sscg_reg);
65 	low_bound = SSCG_CONF_LOW(sscg_reg);
66 
67 	if ((high_bound - low_bound) <= 0)
68 		goto out;
69 	/*
70 	 * From Marvell engineer we got the following formula (when
71 	 * this code was written, the datasheet was erroneous)
72 	 * Spread percentage = 1/96 * (H - L) / H
73 	 * H = SSCG_High_Boundary
74 	 * L = SSCG_Low_Boundary
75 	 *
76 	 * As the deviation is half of spread then it lead to the
77 	 * following formula in the code.
78 	 *
79 	 * To avoid an overflow and not lose any significant digit in
80 	 * the same time we have to use a 64 bit integer.
81 	 */
82 
83 	freq_swing_half = (((u64)high_bound - (u64)low_bound)
84 			* (u64)system_clk);
85 	do_div(freq_swing_half, (2 * 96 * high_bound));
86 
87 	switch (SSCG_CONF_MODE(sscg_reg)) {
88 	case SSCG_SPREAD_DOWN:
89 		system_clk -= freq_swing_half;
90 		break;
91 	case SSCG_SPREAD_UP:
92 		system_clk += freq_swing_half;
93 		break;
94 	case SSCG_SPREAD_CENTRAL:
95 	default:
96 		break;
97 	}
98 
99 	iounmap(sscg_map);
100 
101 out:
102 	of_node_put(sscg_np);
103 
104 	return system_clk;
105 }
106 
107 void __init mvebu_coreclk_setup(struct device_node *np,
108 				const struct coreclk_soc_desc *desc)
109 {
110 	const char *tclk_name = "tclk";
111 	const char *cpuclk_name = "cpuclk";
112 	void __iomem *base;
113 	unsigned long rate;
114 	int n;
115 
116 	base = of_iomap(np, 0);
117 	if (WARN_ON(!base))
118 		return;
119 
120 	/* Allocate struct for TCLK, cpu clk, and core ratio clocks */
121 	clk_data.clk_num = 2 + desc->num_ratios;
122 
123 	/* One more clock for the optional refclk */
124 	if (desc->get_refclk_freq)
125 		clk_data.clk_num += 1;
126 
127 	clk_data.clks = kcalloc(clk_data.clk_num, sizeof(*clk_data.clks),
128 				GFP_KERNEL);
129 	if (WARN_ON(!clk_data.clks)) {
130 		iounmap(base);
131 		return;
132 	}
133 
134 	/* Register TCLK */
135 	of_property_read_string_index(np, "clock-output-names", 0,
136 				      &tclk_name);
137 	rate = desc->get_tclk_freq(base);
138 	clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL, 0,
139 						   rate);
140 	WARN_ON(IS_ERR(clk_data.clks[0]));
141 
142 	/* Register CPU clock */
143 	of_property_read_string_index(np, "clock-output-names", 1,
144 				      &cpuclk_name);
145 	rate = desc->get_cpu_freq(base);
146 
147 	if (desc->is_sscg_enabled && desc->fix_sscg_deviation
148 		&& desc->is_sscg_enabled(base))
149 		rate = desc->fix_sscg_deviation(rate);
150 
151 	clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL, 0,
152 						   rate);
153 	WARN_ON(IS_ERR(clk_data.clks[1]));
154 
155 	/* Register fixed-factor clocks derived from CPU clock */
156 	for (n = 0; n < desc->num_ratios; n++) {
157 		const char *rclk_name = desc->ratios[n].name;
158 		int mult, div;
159 
160 		of_property_read_string_index(np, "clock-output-names",
161 					      2+n, &rclk_name);
162 		desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div);
163 		clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
164 				       cpuclk_name, 0, mult, div);
165 		WARN_ON(IS_ERR(clk_data.clks[2+n]));
166 	}
167 
168 	/* Register optional refclk */
169 	if (desc->get_refclk_freq) {
170 		const char *name = "refclk";
171 		of_property_read_string_index(np, "clock-output-names",
172 					      2 + desc->num_ratios, &name);
173 		rate = desc->get_refclk_freq(base);
174 		clk_data.clks[2 + desc->num_ratios] =
175 			clk_register_fixed_rate(NULL, name, NULL, 0, rate);
176 		WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios]));
177 	}
178 
179 	/* SAR register isn't needed anymore */
180 	iounmap(base);
181 
182 	of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
183 }
184 
185 /*
186  * Clock Gating Control
187  */
188 
189 DEFINE_SPINLOCK(ctrl_gating_lock);
190 
191 struct clk_gating_ctrl {
192 	spinlock_t *lock;
193 	struct clk **gates;
194 	int num_gates;
195 	void __iomem *base;
196 	u32 saved_reg;
197 };
198 
199 static struct clk_gating_ctrl *ctrl;
200 
201 static struct clk *clk_gating_get_src(
202 	struct of_phandle_args *clkspec, void *data)
203 {
204 	int n;
205 
206 	if (clkspec->args_count < 1)
207 		return ERR_PTR(-EINVAL);
208 
209 	for (n = 0; n < ctrl->num_gates; n++) {
210 		struct clk_gate *gate =
211 			to_clk_gate(__clk_get_hw(ctrl->gates[n]));
212 		if (clkspec->args[0] == gate->bit_idx)
213 			return ctrl->gates[n];
214 	}
215 	return ERR_PTR(-ENODEV);
216 }
217 
218 static int mvebu_clk_gating_suspend(void)
219 {
220 	ctrl->saved_reg = readl(ctrl->base);
221 	return 0;
222 }
223 
224 static void mvebu_clk_gating_resume(void)
225 {
226 	writel(ctrl->saved_reg, ctrl->base);
227 }
228 
229 static struct syscore_ops clk_gate_syscore_ops = {
230 	.suspend = mvebu_clk_gating_suspend,
231 	.resume = mvebu_clk_gating_resume,
232 };
233 
234 void __init mvebu_clk_gating_setup(struct device_node *np,
235 				   const struct clk_gating_soc_desc *desc)
236 {
237 	struct clk *clk;
238 	void __iomem *base;
239 	const char *default_parent = NULL;
240 	int n;
241 
242 	if (ctrl) {
243 		pr_err("mvebu-clk-gating: cannot instantiate more than one gateable clock device\n");
244 		return;
245 	}
246 
247 	base = of_iomap(np, 0);
248 	if (WARN_ON(!base))
249 		return;
250 
251 	clk = of_clk_get(np, 0);
252 	if (!IS_ERR(clk)) {
253 		default_parent = __clk_get_name(clk);
254 		clk_put(clk);
255 	}
256 
257 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
258 	if (WARN_ON(!ctrl))
259 		goto ctrl_out;
260 
261 	/* lock must already be initialized */
262 	ctrl->lock = &ctrl_gating_lock;
263 
264 	ctrl->base = base;
265 
266 	/* Count, allocate, and register clock gates */
267 	for (n = 0; desc[n].name;)
268 		n++;
269 
270 	ctrl->num_gates = n;
271 	ctrl->gates = kcalloc(ctrl->num_gates, sizeof(*ctrl->gates),
272 			      GFP_KERNEL);
273 	if (WARN_ON(!ctrl->gates))
274 		goto gates_out;
275 
276 	for (n = 0; n < ctrl->num_gates; n++) {
277 		const char *parent =
278 			(desc[n].parent) ? desc[n].parent : default_parent;
279 		ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent,
280 					desc[n].flags, base, desc[n].bit_idx,
281 					0, ctrl->lock);
282 		WARN_ON(IS_ERR(ctrl->gates[n]));
283 	}
284 
285 	of_clk_add_provider(np, clk_gating_get_src, ctrl);
286 
287 	register_syscore_ops(&clk_gate_syscore_ops);
288 
289 	return;
290 gates_out:
291 	kfree(ctrl);
292 ctrl_out:
293 	iounmap(base);
294 }
295