xref: /openbmc/linux/drivers/clk/imx/clk-scu.c (revision 77d8f3068c63ee0983f0b5ba3207d3f7cce11be4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP
4  *   Dong Aisheng <aisheng.dong@nxp.com>
5  */
6 
7 #include <dt-bindings/firmware/imx/rsrc.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/clk-provider.h>
10 #include <linux/err.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_domain.h>
14 #include <linux/slab.h>
15 
16 #include "clk-scu.h"
17 
18 #define IMX_SIP_CPUFREQ			0xC2000001
19 #define IMX_SIP_SET_CPUFREQ		0x00
20 
21 static struct imx_sc_ipc *ccm_ipc_handle;
22 struct device_node *pd_np;
23 
24 struct imx_scu_clk_node {
25 	const char *name;
26 	u32 rsrc;
27 	u8 clk_type;
28 	const char * const *parents;
29 	int num_parents;
30 
31 	struct clk_hw *hw;
32 	struct list_head node;
33 };
34 
35 struct list_head imx_scu_clks[IMX_SC_R_LAST];
36 
37 /*
38  * struct clk_scu - Description of one SCU clock
39  * @hw: the common clk_hw
40  * @rsrc_id: resource ID of this SCU clock
41  * @clk_type: type of this clock resource
42  */
43 struct clk_scu {
44 	struct clk_hw hw;
45 	u16 rsrc_id;
46 	u8 clk_type;
47 };
48 
49 /*
50  * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
51  * @hdr: SCU protocol header
52  * @rate: rate to set
53  * @resource: clock resource to set rate
54  * @clk: clk type of this resource
55  *
56  * This structure describes the SCU protocol of clock rate set
57  */
58 struct imx_sc_msg_req_set_clock_rate {
59 	struct imx_sc_rpc_msg hdr;
60 	__le32 rate;
61 	__le16 resource;
62 	u8 clk;
63 } __packed __aligned(4);
64 
65 struct req_get_clock_rate {
66 	__le16 resource;
67 	u8 clk;
68 } __packed __aligned(4);
69 
70 struct resp_get_clock_rate {
71 	__le32 rate;
72 };
73 
74 /*
75  * struct imx_sc_msg_get_clock_rate - clock get rate protocol
76  * @hdr: SCU protocol header
77  * @req: get rate request protocol
78  * @resp: get rate response protocol
79  *
80  * This structure describes the SCU protocol of clock rate get
81  */
82 struct imx_sc_msg_get_clock_rate {
83 	struct imx_sc_rpc_msg hdr;
84 	union {
85 		struct req_get_clock_rate req;
86 		struct resp_get_clock_rate resp;
87 	} data;
88 };
89 
90 /*
91  * struct imx_sc_msg_get_clock_parent - clock get parent protocol
92  * @hdr: SCU protocol header
93  * @req: get parent request protocol
94  * @resp: get parent response protocol
95  *
96  * This structure describes the SCU protocol of clock get parent
97  */
98 struct imx_sc_msg_get_clock_parent {
99 	struct imx_sc_rpc_msg hdr;
100 	union {
101 		struct req_get_clock_parent {
102 			__le16 resource;
103 			u8 clk;
104 		} __packed __aligned(4) req;
105 		struct resp_get_clock_parent {
106 			u8 parent;
107 		} resp;
108 	} data;
109 };
110 
111 /*
112  * struct imx_sc_msg_set_clock_parent - clock set parent protocol
113  * @hdr: SCU protocol header
114  * @req: set parent request protocol
115  *
116  * This structure describes the SCU protocol of clock set parent
117  */
118 struct imx_sc_msg_set_clock_parent {
119 	struct imx_sc_rpc_msg hdr;
120 	__le16 resource;
121 	u8 clk;
122 	u8 parent;
123 } __packed;
124 
125 /*
126  * struct imx_sc_msg_req_clock_enable - clock gate protocol
127  * @hdr: SCU protocol header
128  * @resource: clock resource to gate
129  * @clk: clk type of this resource
130  * @enable: whether gate off the clock
131  * @autog: HW auto gate enable
132  *
133  * This structure describes the SCU protocol of clock gate
134  */
135 struct imx_sc_msg_req_clock_enable {
136 	struct imx_sc_rpc_msg hdr;
137 	__le16 resource;
138 	u8 clk;
139 	u8 enable;
140 	u8 autog;
141 } __packed __aligned(4);
142 
143 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
144 {
145 	return container_of(hw, struct clk_scu, hw);
146 }
147 
148 int imx_clk_scu_init(struct device_node *np)
149 {
150 	struct platform_device *pd_dev;
151 	u32 clk_cells;
152 	int ret, i;
153 
154 	ret = imx_scu_get_handle(&ccm_ipc_handle);
155 	if (ret)
156 		return ret;
157 
158 	of_property_read_u32(np, "#clock-cells", &clk_cells);
159 
160 	if (clk_cells == 2) {
161 		for (i = 0; i < IMX_SC_R_LAST; i++)
162 			INIT_LIST_HEAD(&imx_scu_clks[i]);
163 		/*
164 		 * Note: SCU clock driver depends on SCU power domain to be ready
165 		 * first. As there're no power domains under scu clock node in dts,
166 		 * we can't use PROBE_DEFER automatically.
167 		 */
168 		pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
169 		pd_dev = of_find_device_by_node(pd_np);
170 		if (!pd_dev || !device_is_bound(&pd_dev->dev)) {
171 			of_node_put(pd_np);
172 			return -EPROBE_DEFER;
173 		}
174 	}
175 
176 	return 0;
177 }
178 
179 /*
180  * clk_scu_recalc_rate - Get clock rate for a SCU clock
181  * @hw: clock to get rate for
182  * @parent_rate: parent rate provided by common clock framework, not used
183  *
184  * Gets the current clock rate of a SCU clock. Returns the current
185  * clock rate, or zero in failure.
186  */
187 static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
188 					 unsigned long parent_rate)
189 {
190 	struct clk_scu *clk = to_clk_scu(hw);
191 	struct imx_sc_msg_get_clock_rate msg;
192 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
193 	int ret;
194 
195 	hdr->ver = IMX_SC_RPC_VERSION;
196 	hdr->svc = IMX_SC_RPC_SVC_PM;
197 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
198 	hdr->size = 2;
199 
200 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
201 	msg.data.req.clk = clk->clk_type;
202 
203 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
204 	if (ret) {
205 		pr_err("%s: failed to get clock rate %d\n",
206 		       clk_hw_get_name(hw), ret);
207 		return 0;
208 	}
209 
210 	return le32_to_cpu(msg.data.resp.rate);
211 }
212 
213 /*
214  * clk_scu_round_rate - Round clock rate for a SCU clock
215  * @hw: clock to round rate for
216  * @rate: rate to round
217  * @parent_rate: parent rate provided by common clock framework, not used
218  *
219  * Returns the current clock rate, or zero in failure.
220  */
221 static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
222 			       unsigned long *parent_rate)
223 {
224 	/*
225 	 * Assume we support all the requested rate and let the SCU firmware
226 	 * to handle the left work
227 	 */
228 	return rate;
229 }
230 
231 static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
232 				    unsigned long parent_rate)
233 {
234 	struct clk_scu *clk = to_clk_scu(hw);
235 	struct arm_smccc_res res;
236 	unsigned long cluster_id;
237 
238 	if (clk->rsrc_id == IMX_SC_R_A35)
239 		cluster_id = 0;
240 	else
241 		return -EINVAL;
242 
243 	/* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
244 	arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
245 		      cluster_id, rate, 0, 0, 0, 0, &res);
246 
247 	return 0;
248 }
249 
250 /*
251  * clk_scu_set_rate - Set rate for a SCU clock
252  * @hw: clock to change rate for
253  * @rate: target rate for the clock
254  * @parent_rate: rate of the clock parent, not used for SCU clocks
255  *
256  * Sets a clock frequency for a SCU clock. Returns the SCU
257  * protocol status.
258  */
259 static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
260 			    unsigned long parent_rate)
261 {
262 	struct clk_scu *clk = to_clk_scu(hw);
263 	struct imx_sc_msg_req_set_clock_rate msg;
264 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
265 
266 	hdr->ver = IMX_SC_RPC_VERSION;
267 	hdr->svc = IMX_SC_RPC_SVC_PM;
268 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
269 	hdr->size = 3;
270 
271 	msg.rate = cpu_to_le32(rate);
272 	msg.resource = cpu_to_le16(clk->rsrc_id);
273 	msg.clk = clk->clk_type;
274 
275 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
276 }
277 
278 static u8 clk_scu_get_parent(struct clk_hw *hw)
279 {
280 	struct clk_scu *clk = to_clk_scu(hw);
281 	struct imx_sc_msg_get_clock_parent msg;
282 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
283 	int ret;
284 
285 	hdr->ver = IMX_SC_RPC_VERSION;
286 	hdr->svc = IMX_SC_RPC_SVC_PM;
287 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
288 	hdr->size = 2;
289 
290 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
291 	msg.data.req.clk = clk->clk_type;
292 
293 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
294 	if (ret) {
295 		pr_err("%s: failed to get clock parent %d\n",
296 		       clk_hw_get_name(hw), ret);
297 		return 0;
298 	}
299 
300 	return msg.data.resp.parent;
301 }
302 
303 static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
304 {
305 	struct clk_scu *clk = to_clk_scu(hw);
306 	struct imx_sc_msg_set_clock_parent msg;
307 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
308 
309 	hdr->ver = IMX_SC_RPC_VERSION;
310 	hdr->svc = IMX_SC_RPC_SVC_PM;
311 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
312 	hdr->size = 2;
313 
314 	msg.resource = cpu_to_le16(clk->rsrc_id);
315 	msg.clk = clk->clk_type;
316 	msg.parent = index;
317 
318 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
319 }
320 
321 static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
322 			      u8 clk, bool enable, bool autog)
323 {
324 	struct imx_sc_msg_req_clock_enable msg;
325 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
326 
327 	hdr->ver = IMX_SC_RPC_VERSION;
328 	hdr->svc = IMX_SC_RPC_SVC_PM;
329 	hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
330 	hdr->size = 3;
331 
332 	msg.resource = cpu_to_le16(resource);
333 	msg.clk = clk;
334 	msg.enable = enable;
335 	msg.autog = autog;
336 
337 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
338 }
339 
340 /*
341  * clk_scu_prepare - Enable a SCU clock
342  * @hw: clock to enable
343  *
344  * Enable the clock at the DSC slice level
345  */
346 static int clk_scu_prepare(struct clk_hw *hw)
347 {
348 	struct clk_scu *clk = to_clk_scu(hw);
349 
350 	return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
351 				  clk->clk_type, true, false);
352 }
353 
354 /*
355  * clk_scu_unprepare - Disable a SCU clock
356  * @hw: clock to enable
357  *
358  * Disable the clock at the DSC slice level
359  */
360 static void clk_scu_unprepare(struct clk_hw *hw)
361 {
362 	struct clk_scu *clk = to_clk_scu(hw);
363 	int ret;
364 
365 	ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
366 				 clk->clk_type, false, false);
367 	if (ret)
368 		pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
369 			ret);
370 }
371 
372 static const struct clk_ops clk_scu_ops = {
373 	.recalc_rate = clk_scu_recalc_rate,
374 	.round_rate = clk_scu_round_rate,
375 	.set_rate = clk_scu_set_rate,
376 	.get_parent = clk_scu_get_parent,
377 	.set_parent = clk_scu_set_parent,
378 	.prepare = clk_scu_prepare,
379 	.unprepare = clk_scu_unprepare,
380 };
381 
382 static const struct clk_ops clk_scu_cpu_ops = {
383 	.recalc_rate = clk_scu_recalc_rate,
384 	.round_rate = clk_scu_round_rate,
385 	.set_rate = clk_scu_atf_set_cpu_rate,
386 	.prepare = clk_scu_prepare,
387 	.unprepare = clk_scu_unprepare,
388 };
389 
390 struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
391 			     int num_parents, u32 rsrc_id, u8 clk_type)
392 {
393 	struct clk_init_data init;
394 	struct clk_scu *clk;
395 	struct clk_hw *hw;
396 	int ret;
397 
398 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
399 	if (!clk)
400 		return ERR_PTR(-ENOMEM);
401 
402 	clk->rsrc_id = rsrc_id;
403 	clk->clk_type = clk_type;
404 
405 	init.name = name;
406 	init.ops = &clk_scu_ops;
407 	if (rsrc_id == IMX_SC_R_A35)
408 		init.ops = &clk_scu_cpu_ops;
409 	else
410 		init.ops = &clk_scu_ops;
411 	init.parent_names = parents;
412 	init.num_parents = num_parents;
413 
414 	/*
415 	 * Note on MX8, the clocks are tightly coupled with power domain
416 	 * that once the power domain is off, the clock status may be
417 	 * lost. So we make it NOCACHE to let user to retrieve the real
418 	 * clock status from HW instead of using the possible invalid
419 	 * cached rate.
420 	 */
421 	init.flags = CLK_GET_RATE_NOCACHE;
422 	clk->hw.init = &init;
423 
424 	hw = &clk->hw;
425 	ret = clk_hw_register(NULL, hw);
426 	if (ret) {
427 		kfree(clk);
428 		hw = ERR_PTR(ret);
429 	}
430 
431 	return hw;
432 }
433 
434 struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
435 				      void *data)
436 {
437 	unsigned int rsrc = clkspec->args[0];
438 	unsigned int idx = clkspec->args[1];
439 	struct list_head *scu_clks = data;
440 	struct imx_scu_clk_node *clk;
441 
442 	list_for_each_entry(clk, &scu_clks[rsrc], node) {
443 		if (clk->clk_type == idx)
444 			return clk->hw;
445 	}
446 
447 	return ERR_PTR(-ENODEV);
448 }
449 
450 static int imx_clk_scu_probe(struct platform_device *pdev)
451 {
452 	struct device *dev = &pdev->dev;
453 	struct imx_scu_clk_node *clk = dev_get_platdata(dev);
454 	struct clk_hw *hw;
455 
456 	hw = __imx_clk_scu(clk->name, clk->parents, clk->num_parents,
457 			   clk->rsrc, clk->clk_type);
458 	if (IS_ERR(hw))
459 		return PTR_ERR(hw);
460 
461 	clk->hw = hw;
462 	list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
463 
464 	dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
465 		clk->clk_type);
466 
467 	return 0;
468 }
469 
470 static struct platform_driver imx_clk_scu_driver = {
471 	.driver = {
472 		.name = "imx-scu-clk",
473 		.suppress_bind_attrs = true,
474 	},
475 	.probe = imx_clk_scu_probe,
476 };
477 builtin_platform_driver(imx_clk_scu_driver);
478 
479 static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
480 {
481 	struct of_phandle_args genpdspec = {
482 		.np = pd_np,
483 		.args_count = 1,
484 		.args[0] = rsrc_id,
485 	};
486 
487 	return of_genpd_add_device(&genpdspec, dev);
488 }
489 
490 struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
491 				     const char * const *parents,
492 				     int num_parents, u32 rsrc_id, u8 clk_type)
493 {
494 	struct imx_scu_clk_node clk = {
495 		.name = name,
496 		.rsrc = rsrc_id,
497 		.clk_type = clk_type,
498 		.parents = parents,
499 		.num_parents = num_parents,
500 	};
501 	struct platform_device *pdev;
502 	int ret;
503 
504 	pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
505 	if (!pdev) {
506 		pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
507 		       name, rsrc_id, clk_type);
508 		return ERR_PTR(-ENOMEM);
509 	}
510 
511 	ret = platform_device_add_data(pdev, &clk, sizeof(clk));
512 	if (ret) {
513 		platform_device_put(pdev);
514 		return ERR_PTR(ret);
515 	}
516 
517 	pdev->driver_override = "imx-scu-clk";
518 
519 	ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
520 	if (ret)
521 		pr_warn("%s: failed to attached the power domain %d\n",
522 			name, ret);
523 
524 	platform_device_add(pdev);
525 
526 	/* For API backwards compatiblilty, simply return NULL for success */
527 	return NULL;
528 }
529 
530 void imx_clk_scu_unregister(void)
531 {
532 	struct imx_scu_clk_node *clk;
533 	int i;
534 
535 	for (i = 0; i < IMX_SC_R_LAST; i++) {
536 		list_for_each_entry(clk, &imx_scu_clks[i], node) {
537 			clk_hw_unregister(clk->hw);
538 			kfree(clk);
539 		}
540 	}
541 }
542