xref: /openbmc/linux/drivers/clk/imx/clk-scu.c (revision f5cc14e4)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP
4  *   Dong Aisheng <aisheng.dong@nxp.com>
5  */
6 
7 #include <dt-bindings/firmware/imx/rsrc.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/clk-provider.h>
10 #include <linux/err.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 
17 #include "clk-scu.h"
18 
19 #define IMX_SIP_CPUFREQ			0xC2000001
20 #define IMX_SIP_SET_CPUFREQ		0x00
21 
22 static struct imx_sc_ipc *ccm_ipc_handle;
23 static struct device_node *pd_np;
24 static struct platform_driver imx_clk_scu_driver;
25 
26 struct imx_scu_clk_node {
27 	const char *name;
28 	u32 rsrc;
29 	u8 clk_type;
30 	const char * const *parents;
31 	int num_parents;
32 
33 	struct clk_hw *hw;
34 	struct list_head node;
35 };
36 
37 struct list_head imx_scu_clks[IMX_SC_R_LAST];
38 
39 /*
40  * struct clk_scu - Description of one SCU clock
41  * @hw: the common clk_hw
42  * @rsrc_id: resource ID of this SCU clock
43  * @clk_type: type of this clock resource
44  */
45 struct clk_scu {
46 	struct clk_hw hw;
47 	u16 rsrc_id;
48 	u8 clk_type;
49 
50 	/* for state save&restore */
51 	bool is_enabled;
52 	u32 rate;
53 };
54 
55 /*
56  * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
57  * @hdr: SCU protocol header
58  * @rate: rate to set
59  * @resource: clock resource to set rate
60  * @clk: clk type of this resource
61  *
62  * This structure describes the SCU protocol of clock rate set
63  */
64 struct imx_sc_msg_req_set_clock_rate {
65 	struct imx_sc_rpc_msg hdr;
66 	__le32 rate;
67 	__le16 resource;
68 	u8 clk;
69 } __packed __aligned(4);
70 
71 struct req_get_clock_rate {
72 	__le16 resource;
73 	u8 clk;
74 } __packed __aligned(4);
75 
76 struct resp_get_clock_rate {
77 	__le32 rate;
78 };
79 
80 /*
81  * struct imx_sc_msg_get_clock_rate - clock get rate protocol
82  * @hdr: SCU protocol header
83  * @req: get rate request protocol
84  * @resp: get rate response protocol
85  *
86  * This structure describes the SCU protocol of clock rate get
87  */
88 struct imx_sc_msg_get_clock_rate {
89 	struct imx_sc_rpc_msg hdr;
90 	union {
91 		struct req_get_clock_rate req;
92 		struct resp_get_clock_rate resp;
93 	} data;
94 };
95 
96 /*
97  * struct imx_sc_msg_get_clock_parent - clock get parent protocol
98  * @hdr: SCU protocol header
99  * @req: get parent request protocol
100  * @resp: get parent response protocol
101  *
102  * This structure describes the SCU protocol of clock get parent
103  */
104 struct imx_sc_msg_get_clock_parent {
105 	struct imx_sc_rpc_msg hdr;
106 	union {
107 		struct req_get_clock_parent {
108 			__le16 resource;
109 			u8 clk;
110 		} __packed __aligned(4) req;
111 		struct resp_get_clock_parent {
112 			u8 parent;
113 		} resp;
114 	} data;
115 };
116 
117 /*
118  * struct imx_sc_msg_set_clock_parent - clock set parent protocol
119  * @hdr: SCU protocol header
120  * @req: set parent request protocol
121  *
122  * This structure describes the SCU protocol of clock set parent
123  */
124 struct imx_sc_msg_set_clock_parent {
125 	struct imx_sc_rpc_msg hdr;
126 	__le16 resource;
127 	u8 clk;
128 	u8 parent;
129 } __packed;
130 
131 /*
132  * struct imx_sc_msg_req_clock_enable - clock gate protocol
133  * @hdr: SCU protocol header
134  * @resource: clock resource to gate
135  * @clk: clk type of this resource
136  * @enable: whether gate off the clock
137  * @autog: HW auto gate enable
138  *
139  * This structure describes the SCU protocol of clock gate
140  */
141 struct imx_sc_msg_req_clock_enable {
142 	struct imx_sc_rpc_msg hdr;
143 	__le16 resource;
144 	u8 clk;
145 	u8 enable;
146 	u8 autog;
147 } __packed __aligned(4);
148 
149 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
150 {
151 	return container_of(hw, struct clk_scu, hw);
152 }
153 
154 int imx_clk_scu_init(struct device_node *np)
155 {
156 	u32 clk_cells;
157 	int ret, i;
158 
159 	ret = imx_scu_get_handle(&ccm_ipc_handle);
160 	if (ret)
161 		return ret;
162 
163 	of_property_read_u32(np, "#clock-cells", &clk_cells);
164 
165 	if (clk_cells == 2) {
166 		for (i = 0; i < IMX_SC_R_LAST; i++)
167 			INIT_LIST_HEAD(&imx_scu_clks[i]);
168 
169 		/* pd_np will be used to attach power domains later */
170 		pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
171 		if (!pd_np)
172 			return -EINVAL;
173 	}
174 
175 	return platform_driver_register(&imx_clk_scu_driver);
176 }
177 
178 /*
179  * clk_scu_recalc_rate - Get clock rate for a SCU clock
180  * @hw: clock to get rate for
181  * @parent_rate: parent rate provided by common clock framework, not used
182  *
183  * Gets the current clock rate of a SCU clock. Returns the current
184  * clock rate, or zero in failure.
185  */
186 static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
187 					 unsigned long parent_rate)
188 {
189 	struct clk_scu *clk = to_clk_scu(hw);
190 	struct imx_sc_msg_get_clock_rate msg;
191 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
192 	int ret;
193 
194 	hdr->ver = IMX_SC_RPC_VERSION;
195 	hdr->svc = IMX_SC_RPC_SVC_PM;
196 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
197 	hdr->size = 2;
198 
199 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
200 	msg.data.req.clk = clk->clk_type;
201 
202 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
203 	if (ret) {
204 		pr_err("%s: failed to get clock rate %d\n",
205 		       clk_hw_get_name(hw), ret);
206 		return 0;
207 	}
208 
209 	return le32_to_cpu(msg.data.resp.rate);
210 }
211 
212 /*
213  * clk_scu_round_rate - Round clock rate for a SCU clock
214  * @hw: clock to round rate for
215  * @rate: rate to round
216  * @parent_rate: parent rate provided by common clock framework, not used
217  *
218  * Returns the current clock rate, or zero in failure.
219  */
220 static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
221 			       unsigned long *parent_rate)
222 {
223 	/*
224 	 * Assume we support all the requested rate and let the SCU firmware
225 	 * to handle the left work
226 	 */
227 	return rate;
228 }
229 
230 static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
231 				    unsigned long parent_rate)
232 {
233 	struct clk_scu *clk = to_clk_scu(hw);
234 	struct arm_smccc_res res;
235 	unsigned long cluster_id;
236 
237 	if (clk->rsrc_id == IMX_SC_R_A35)
238 		cluster_id = 0;
239 	else
240 		return -EINVAL;
241 
242 	/* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
243 	arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
244 		      cluster_id, rate, 0, 0, 0, 0, &res);
245 
246 	return 0;
247 }
248 
249 /*
250  * clk_scu_set_rate - Set rate for a SCU clock
251  * @hw: clock to change rate for
252  * @rate: target rate for the clock
253  * @parent_rate: rate of the clock parent, not used for SCU clocks
254  *
255  * Sets a clock frequency for a SCU clock. Returns the SCU
256  * protocol status.
257  */
258 static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
259 			    unsigned long parent_rate)
260 {
261 	struct clk_scu *clk = to_clk_scu(hw);
262 	struct imx_sc_msg_req_set_clock_rate msg;
263 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
264 
265 	hdr->ver = IMX_SC_RPC_VERSION;
266 	hdr->svc = IMX_SC_RPC_SVC_PM;
267 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
268 	hdr->size = 3;
269 
270 	msg.rate = cpu_to_le32(rate);
271 	msg.resource = cpu_to_le16(clk->rsrc_id);
272 	msg.clk = clk->clk_type;
273 
274 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
275 }
276 
277 static u8 clk_scu_get_parent(struct clk_hw *hw)
278 {
279 	struct clk_scu *clk = to_clk_scu(hw);
280 	struct imx_sc_msg_get_clock_parent msg;
281 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
282 	int ret;
283 
284 	hdr->ver = IMX_SC_RPC_VERSION;
285 	hdr->svc = IMX_SC_RPC_SVC_PM;
286 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
287 	hdr->size = 2;
288 
289 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
290 	msg.data.req.clk = clk->clk_type;
291 
292 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
293 	if (ret) {
294 		pr_err("%s: failed to get clock parent %d\n",
295 		       clk_hw_get_name(hw), ret);
296 		return 0;
297 	}
298 
299 	return msg.data.resp.parent;
300 }
301 
302 static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
303 {
304 	struct clk_scu *clk = to_clk_scu(hw);
305 	struct imx_sc_msg_set_clock_parent msg;
306 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
307 
308 	hdr->ver = IMX_SC_RPC_VERSION;
309 	hdr->svc = IMX_SC_RPC_SVC_PM;
310 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
311 	hdr->size = 2;
312 
313 	msg.resource = cpu_to_le16(clk->rsrc_id);
314 	msg.clk = clk->clk_type;
315 	msg.parent = index;
316 
317 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
318 }
319 
320 static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
321 			      u8 clk, bool enable, bool autog)
322 {
323 	struct imx_sc_msg_req_clock_enable msg;
324 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
325 
326 	hdr->ver = IMX_SC_RPC_VERSION;
327 	hdr->svc = IMX_SC_RPC_SVC_PM;
328 	hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
329 	hdr->size = 3;
330 
331 	msg.resource = cpu_to_le16(resource);
332 	msg.clk = clk;
333 	msg.enable = enable;
334 	msg.autog = autog;
335 
336 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
337 }
338 
339 /*
340  * clk_scu_prepare - Enable a SCU clock
341  * @hw: clock to enable
342  *
343  * Enable the clock at the DSC slice level
344  */
345 static int clk_scu_prepare(struct clk_hw *hw)
346 {
347 	struct clk_scu *clk = to_clk_scu(hw);
348 
349 	return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
350 				  clk->clk_type, true, false);
351 }
352 
353 /*
354  * clk_scu_unprepare - Disable a SCU clock
355  * @hw: clock to enable
356  *
357  * Disable the clock at the DSC slice level
358  */
359 static void clk_scu_unprepare(struct clk_hw *hw)
360 {
361 	struct clk_scu *clk = to_clk_scu(hw);
362 	int ret;
363 
364 	ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
365 				 clk->clk_type, false, false);
366 	if (ret)
367 		pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
368 			ret);
369 }
370 
371 static const struct clk_ops clk_scu_ops = {
372 	.recalc_rate = clk_scu_recalc_rate,
373 	.round_rate = clk_scu_round_rate,
374 	.set_rate = clk_scu_set_rate,
375 	.get_parent = clk_scu_get_parent,
376 	.set_parent = clk_scu_set_parent,
377 	.prepare = clk_scu_prepare,
378 	.unprepare = clk_scu_unprepare,
379 };
380 
381 static const struct clk_ops clk_scu_cpu_ops = {
382 	.recalc_rate = clk_scu_recalc_rate,
383 	.round_rate = clk_scu_round_rate,
384 	.set_rate = clk_scu_atf_set_cpu_rate,
385 	.prepare = clk_scu_prepare,
386 	.unprepare = clk_scu_unprepare,
387 };
388 
389 struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
390 			     const char * const *parents, int num_parents,
391 			     u32 rsrc_id, u8 clk_type)
392 {
393 	struct clk_init_data init;
394 	struct clk_scu *clk;
395 	struct clk_hw *hw;
396 	int ret;
397 
398 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
399 	if (!clk)
400 		return ERR_PTR(-ENOMEM);
401 
402 	clk->rsrc_id = rsrc_id;
403 	clk->clk_type = clk_type;
404 
405 	init.name = name;
406 	init.ops = &clk_scu_ops;
407 	if (rsrc_id == IMX_SC_R_A35)
408 		init.ops = &clk_scu_cpu_ops;
409 	else
410 		init.ops = &clk_scu_ops;
411 	init.parent_names = parents;
412 	init.num_parents = num_parents;
413 
414 	/*
415 	 * Note on MX8, the clocks are tightly coupled with power domain
416 	 * that once the power domain is off, the clock status may be
417 	 * lost. So we make it NOCACHE to let user to retrieve the real
418 	 * clock status from HW instead of using the possible invalid
419 	 * cached rate.
420 	 */
421 	init.flags = CLK_GET_RATE_NOCACHE;
422 	clk->hw.init = &init;
423 
424 	hw = &clk->hw;
425 	ret = clk_hw_register(dev, hw);
426 	if (ret) {
427 		kfree(clk);
428 		hw = ERR_PTR(ret);
429 	}
430 
431 	if (dev)
432 		dev_set_drvdata(dev, clk);
433 
434 	return hw;
435 }
436 
437 struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
438 				      void *data)
439 {
440 	unsigned int rsrc = clkspec->args[0];
441 	unsigned int idx = clkspec->args[1];
442 	struct list_head *scu_clks = data;
443 	struct imx_scu_clk_node *clk;
444 
445 	list_for_each_entry(clk, &scu_clks[rsrc], node) {
446 		if (clk->clk_type == idx)
447 			return clk->hw;
448 	}
449 
450 	return ERR_PTR(-ENODEV);
451 }
452 
453 static int imx_clk_scu_probe(struct platform_device *pdev)
454 {
455 	struct device *dev = &pdev->dev;
456 	struct imx_scu_clk_node *clk = dev_get_platdata(dev);
457 	struct clk_hw *hw;
458 	int ret;
459 
460 	pm_runtime_set_suspended(dev);
461 	pm_runtime_set_autosuspend_delay(dev, 50);
462 	pm_runtime_use_autosuspend(&pdev->dev);
463 	pm_runtime_enable(dev);
464 
465 	ret = pm_runtime_get_sync(dev);
466 	if (ret) {
467 		pm_runtime_disable(dev);
468 		return ret;
469 	}
470 
471 	hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
472 			   clk->rsrc, clk->clk_type);
473 	if (IS_ERR(hw)) {
474 		pm_runtime_disable(dev);
475 		return PTR_ERR(hw);
476 	}
477 
478 	clk->hw = hw;
479 	list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
480 
481 	pm_runtime_mark_last_busy(&pdev->dev);
482 	pm_runtime_put_autosuspend(&pdev->dev);
483 
484 	dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
485 		clk->clk_type);
486 
487 	return 0;
488 }
489 
490 static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
491 {
492 	struct clk_scu *clk = dev_get_drvdata(dev);
493 
494 	clk->rate = clk_hw_get_rate(&clk->hw);
495 	clk->is_enabled = clk_hw_is_enabled(&clk->hw);
496 
497 	if (clk->rate)
498 		dev_dbg(dev, "save rate %d\n", clk->rate);
499 
500 	if (clk->is_enabled)
501 		dev_dbg(dev, "save enabled state\n");
502 
503 	return 0;
504 }
505 
506 static int __maybe_unused imx_clk_scu_resume(struct device *dev)
507 {
508 	struct clk_scu *clk = dev_get_drvdata(dev);
509 	int ret = 0;
510 
511 	if (clk->rate) {
512 		ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
513 		dev_dbg(dev, "restore rate %d %s\n", clk->rate,
514 			!ret ? "success" : "failed");
515 	}
516 
517 	if (clk->is_enabled) {
518 		ret = clk_scu_prepare(&clk->hw);
519 		dev_dbg(dev, "restore enabled state %s\n",
520 			!ret ? "success" : "failed");
521 	}
522 
523 	return ret;
524 }
525 
526 static const struct dev_pm_ops imx_clk_scu_pm_ops = {
527 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
528 				      imx_clk_scu_resume)
529 };
530 
531 static struct platform_driver imx_clk_scu_driver = {
532 	.driver = {
533 		.name = "imx-scu-clk",
534 		.suppress_bind_attrs = true,
535 		.pm = &imx_clk_scu_pm_ops,
536 	},
537 	.probe = imx_clk_scu_probe,
538 };
539 
540 static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
541 {
542 	struct of_phandle_args genpdspec = {
543 		.np = pd_np,
544 		.args_count = 1,
545 		.args[0] = rsrc_id,
546 	};
547 
548 	if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
549 	    rsrc_id == IMX_SC_R_A72)
550 		return 0;
551 
552 	return of_genpd_add_device(&genpdspec, dev);
553 }
554 
555 struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
556 				     const char * const *parents,
557 				     int num_parents, u32 rsrc_id, u8 clk_type)
558 {
559 	struct imx_scu_clk_node clk = {
560 		.name = name,
561 		.rsrc = rsrc_id,
562 		.clk_type = clk_type,
563 		.parents = parents,
564 		.num_parents = num_parents,
565 	};
566 	struct platform_device *pdev;
567 	int ret;
568 
569 	pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
570 	if (!pdev) {
571 		pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
572 		       name, rsrc_id, clk_type);
573 		return ERR_PTR(-ENOMEM);
574 	}
575 
576 	ret = platform_device_add_data(pdev, &clk, sizeof(clk));
577 	if (ret) {
578 		platform_device_put(pdev);
579 		return ERR_PTR(ret);
580 	}
581 
582 	pdev->driver_override = "imx-scu-clk";
583 
584 	ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
585 	if (ret)
586 		pr_warn("%s: failed to attached the power domain %d\n",
587 			name, ret);
588 
589 	platform_device_add(pdev);
590 
591 	/* For API backwards compatiblilty, simply return NULL for success */
592 	return NULL;
593 }
594 
595 void imx_clk_scu_unregister(void)
596 {
597 	struct imx_scu_clk_node *clk;
598 	int i;
599 
600 	for (i = 0; i < IMX_SC_R_LAST; i++) {
601 		list_for_each_entry(clk, &imx_scu_clks[i], node) {
602 			clk_hw_unregister(clk->hw);
603 			kfree(clk);
604 		}
605 	}
606 }
607