xref: /openbmc/linux/drivers/clk/imx/clk-scu.c (revision 00ae4ebc)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP
4  *   Dong Aisheng <aisheng.dong@nxp.com>
5  */
6 
7 #include <dt-bindings/firmware/imx/rsrc.h>
8 #include <linux/arm-smccc.h>
9 #include <linux/clk-provider.h>
10 #include <linux/err.h>
11 #include <linux/of_platform.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/slab.h>
16 
17 #include "clk-scu.h"
18 
19 #define IMX_SIP_CPUFREQ			0xC2000001
20 #define IMX_SIP_SET_CPUFREQ		0x00
21 
22 static struct imx_sc_ipc *ccm_ipc_handle;
23 static struct device_node *pd_np;
24 static struct platform_driver imx_clk_scu_driver;
25 
26 struct imx_scu_clk_node {
27 	const char *name;
28 	u32 rsrc;
29 	u8 clk_type;
30 	const char * const *parents;
31 	int num_parents;
32 
33 	struct clk_hw *hw;
34 	struct list_head node;
35 };
36 
37 struct list_head imx_scu_clks[IMX_SC_R_LAST];
38 
39 /*
40  * struct clk_scu - Description of one SCU clock
41  * @hw: the common clk_hw
42  * @rsrc_id: resource ID of this SCU clock
43  * @clk_type: type of this clock resource
44  */
45 struct clk_scu {
46 	struct clk_hw hw;
47 	u16 rsrc_id;
48 	u8 clk_type;
49 
50 	/* for state save&restore */
51 	bool is_enabled;
52 	u32 rate;
53 };
54 
55 /*
56  * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
57  * @hdr: SCU protocol header
58  * @rate: rate to set
59  * @resource: clock resource to set rate
60  * @clk: clk type of this resource
61  *
62  * This structure describes the SCU protocol of clock rate set
63  */
64 struct imx_sc_msg_req_set_clock_rate {
65 	struct imx_sc_rpc_msg hdr;
66 	__le32 rate;
67 	__le16 resource;
68 	u8 clk;
69 } __packed __aligned(4);
70 
71 struct req_get_clock_rate {
72 	__le16 resource;
73 	u8 clk;
74 } __packed __aligned(4);
75 
76 struct resp_get_clock_rate {
77 	__le32 rate;
78 };
79 
80 /*
81  * struct imx_sc_msg_get_clock_rate - clock get rate protocol
82  * @hdr: SCU protocol header
83  * @req: get rate request protocol
84  * @resp: get rate response protocol
85  *
86  * This structure describes the SCU protocol of clock rate get
87  */
88 struct imx_sc_msg_get_clock_rate {
89 	struct imx_sc_rpc_msg hdr;
90 	union {
91 		struct req_get_clock_rate req;
92 		struct resp_get_clock_rate resp;
93 	} data;
94 };
95 
96 /*
97  * struct imx_sc_msg_get_clock_parent - clock get parent protocol
98  * @hdr: SCU protocol header
99  * @req: get parent request protocol
100  * @resp: get parent response protocol
101  *
102  * This structure describes the SCU protocol of clock get parent
103  */
104 struct imx_sc_msg_get_clock_parent {
105 	struct imx_sc_rpc_msg hdr;
106 	union {
107 		struct req_get_clock_parent {
108 			__le16 resource;
109 			u8 clk;
110 		} __packed __aligned(4) req;
111 		struct resp_get_clock_parent {
112 			u8 parent;
113 		} resp;
114 	} data;
115 };
116 
117 /*
118  * struct imx_sc_msg_set_clock_parent - clock set parent protocol
119  * @hdr: SCU protocol header
120  * @req: set parent request protocol
121  *
122  * This structure describes the SCU protocol of clock set parent
123  */
124 struct imx_sc_msg_set_clock_parent {
125 	struct imx_sc_rpc_msg hdr;
126 	__le16 resource;
127 	u8 clk;
128 	u8 parent;
129 } __packed;
130 
131 /*
132  * struct imx_sc_msg_req_clock_enable - clock gate protocol
133  * @hdr: SCU protocol header
134  * @resource: clock resource to gate
135  * @clk: clk type of this resource
136  * @enable: whether gate off the clock
137  * @autog: HW auto gate enable
138  *
139  * This structure describes the SCU protocol of clock gate
140  */
141 struct imx_sc_msg_req_clock_enable {
142 	struct imx_sc_rpc_msg hdr;
143 	__le16 resource;
144 	u8 clk;
145 	u8 enable;
146 	u8 autog;
147 } __packed __aligned(4);
148 
149 static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
150 {
151 	return container_of(hw, struct clk_scu, hw);
152 }
153 
154 int imx_clk_scu_init(struct device_node *np)
155 {
156 	u32 clk_cells;
157 	int ret, i;
158 
159 	ret = imx_scu_get_handle(&ccm_ipc_handle);
160 	if (ret)
161 		return ret;
162 
163 	of_property_read_u32(np, "#clock-cells", &clk_cells);
164 
165 	if (clk_cells == 2) {
166 		for (i = 0; i < IMX_SC_R_LAST; i++)
167 			INIT_LIST_HEAD(&imx_scu_clks[i]);
168 
169 		/* pd_np will be used to attach power domains later */
170 		pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
171 		if (!pd_np)
172 			return -EINVAL;
173 	}
174 
175 	return platform_driver_register(&imx_clk_scu_driver);
176 }
177 
178 /*
179  * clk_scu_recalc_rate - Get clock rate for a SCU clock
180  * @hw: clock to get rate for
181  * @parent_rate: parent rate provided by common clock framework, not used
182  *
183  * Gets the current clock rate of a SCU clock. Returns the current
184  * clock rate, or zero in failure.
185  */
186 static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
187 					 unsigned long parent_rate)
188 {
189 	struct clk_scu *clk = to_clk_scu(hw);
190 	struct imx_sc_msg_get_clock_rate msg;
191 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
192 	int ret;
193 
194 	hdr->ver = IMX_SC_RPC_VERSION;
195 	hdr->svc = IMX_SC_RPC_SVC_PM;
196 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
197 	hdr->size = 2;
198 
199 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
200 	msg.data.req.clk = clk->clk_type;
201 
202 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
203 	if (ret) {
204 		pr_err("%s: failed to get clock rate %d\n",
205 		       clk_hw_get_name(hw), ret);
206 		return 0;
207 	}
208 
209 	return le32_to_cpu(msg.data.resp.rate);
210 }
211 
212 /*
213  * clk_scu_round_rate - Round clock rate for a SCU clock
214  * @hw: clock to round rate for
215  * @rate: rate to round
216  * @parent_rate: parent rate provided by common clock framework, not used
217  *
218  * Returns the current clock rate, or zero in failure.
219  */
220 static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
221 			       unsigned long *parent_rate)
222 {
223 	/*
224 	 * Assume we support all the requested rate and let the SCU firmware
225 	 * to handle the left work
226 	 */
227 	return rate;
228 }
229 
230 static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
231 				    unsigned long parent_rate)
232 {
233 	struct clk_scu *clk = to_clk_scu(hw);
234 	struct arm_smccc_res res;
235 	unsigned long cluster_id;
236 
237 	if (clk->rsrc_id == IMX_SC_R_A35)
238 		cluster_id = 0;
239 	else
240 		return -EINVAL;
241 
242 	/* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
243 	arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
244 		      cluster_id, rate, 0, 0, 0, 0, &res);
245 
246 	return 0;
247 }
248 
249 /*
250  * clk_scu_set_rate - Set rate for a SCU clock
251  * @hw: clock to change rate for
252  * @rate: target rate for the clock
253  * @parent_rate: rate of the clock parent, not used for SCU clocks
254  *
255  * Sets a clock frequency for a SCU clock. Returns the SCU
256  * protocol status.
257  */
258 static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
259 			    unsigned long parent_rate)
260 {
261 	struct clk_scu *clk = to_clk_scu(hw);
262 	struct imx_sc_msg_req_set_clock_rate msg;
263 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
264 
265 	hdr->ver = IMX_SC_RPC_VERSION;
266 	hdr->svc = IMX_SC_RPC_SVC_PM;
267 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
268 	hdr->size = 3;
269 
270 	msg.rate = cpu_to_le32(rate);
271 	msg.resource = cpu_to_le16(clk->rsrc_id);
272 	msg.clk = clk->clk_type;
273 
274 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
275 }
276 
277 static u8 clk_scu_get_parent(struct clk_hw *hw)
278 {
279 	struct clk_scu *clk = to_clk_scu(hw);
280 	struct imx_sc_msg_get_clock_parent msg;
281 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
282 	int ret;
283 
284 	hdr->ver = IMX_SC_RPC_VERSION;
285 	hdr->svc = IMX_SC_RPC_SVC_PM;
286 	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
287 	hdr->size = 2;
288 
289 	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
290 	msg.data.req.clk = clk->clk_type;
291 
292 	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
293 	if (ret) {
294 		pr_err("%s: failed to get clock parent %d\n",
295 		       clk_hw_get_name(hw), ret);
296 		return 0;
297 	}
298 
299 	return msg.data.resp.parent;
300 }
301 
302 static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
303 {
304 	struct clk_scu *clk = to_clk_scu(hw);
305 	struct imx_sc_msg_set_clock_parent msg;
306 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
307 
308 	hdr->ver = IMX_SC_RPC_VERSION;
309 	hdr->svc = IMX_SC_RPC_SVC_PM;
310 	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
311 	hdr->size = 2;
312 
313 	msg.resource = cpu_to_le16(clk->rsrc_id);
314 	msg.clk = clk->clk_type;
315 	msg.parent = index;
316 
317 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
318 }
319 
320 static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
321 			      u8 clk, bool enable, bool autog)
322 {
323 	struct imx_sc_msg_req_clock_enable msg;
324 	struct imx_sc_rpc_msg *hdr = &msg.hdr;
325 
326 	hdr->ver = IMX_SC_RPC_VERSION;
327 	hdr->svc = IMX_SC_RPC_SVC_PM;
328 	hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
329 	hdr->size = 3;
330 
331 	msg.resource = cpu_to_le16(resource);
332 	msg.clk = clk;
333 	msg.enable = enable;
334 	msg.autog = autog;
335 
336 	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
337 }
338 
339 /*
340  * clk_scu_prepare - Enable a SCU clock
341  * @hw: clock to enable
342  *
343  * Enable the clock at the DSC slice level
344  */
345 static int clk_scu_prepare(struct clk_hw *hw)
346 {
347 	struct clk_scu *clk = to_clk_scu(hw);
348 
349 	return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
350 				  clk->clk_type, true, false);
351 }
352 
353 /*
354  * clk_scu_unprepare - Disable a SCU clock
355  * @hw: clock to enable
356  *
357  * Disable the clock at the DSC slice level
358  */
359 static void clk_scu_unprepare(struct clk_hw *hw)
360 {
361 	struct clk_scu *clk = to_clk_scu(hw);
362 	int ret;
363 
364 	ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
365 				 clk->clk_type, false, false);
366 	if (ret)
367 		pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
368 			ret);
369 }
370 
371 static const struct clk_ops clk_scu_ops = {
372 	.recalc_rate = clk_scu_recalc_rate,
373 	.round_rate = clk_scu_round_rate,
374 	.set_rate = clk_scu_set_rate,
375 	.get_parent = clk_scu_get_parent,
376 	.set_parent = clk_scu_set_parent,
377 	.prepare = clk_scu_prepare,
378 	.unprepare = clk_scu_unprepare,
379 };
380 
381 static const struct clk_ops clk_scu_cpu_ops = {
382 	.recalc_rate = clk_scu_recalc_rate,
383 	.round_rate = clk_scu_round_rate,
384 	.set_rate = clk_scu_atf_set_cpu_rate,
385 	.prepare = clk_scu_prepare,
386 	.unprepare = clk_scu_unprepare,
387 };
388 
389 struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
390 			     const char * const *parents, int num_parents,
391 			     u32 rsrc_id, u8 clk_type)
392 {
393 	struct clk_init_data init;
394 	struct clk_scu *clk;
395 	struct clk_hw *hw;
396 	int ret;
397 
398 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
399 	if (!clk)
400 		return ERR_PTR(-ENOMEM);
401 
402 	clk->rsrc_id = rsrc_id;
403 	clk->clk_type = clk_type;
404 
405 	init.name = name;
406 	init.ops = &clk_scu_ops;
407 	if (rsrc_id == IMX_SC_R_A35)
408 		init.ops = &clk_scu_cpu_ops;
409 	else
410 		init.ops = &clk_scu_ops;
411 	init.parent_names = parents;
412 	init.num_parents = num_parents;
413 
414 	/*
415 	 * Note on MX8, the clocks are tightly coupled with power domain
416 	 * that once the power domain is off, the clock status may be
417 	 * lost. So we make it NOCACHE to let user to retrieve the real
418 	 * clock status from HW instead of using the possible invalid
419 	 * cached rate.
420 	 */
421 	init.flags = CLK_GET_RATE_NOCACHE;
422 	clk->hw.init = &init;
423 
424 	hw = &clk->hw;
425 	ret = clk_hw_register(dev, hw);
426 	if (ret) {
427 		kfree(clk);
428 		hw = ERR_PTR(ret);
429 		return hw;
430 	}
431 
432 	if (dev)
433 		dev_set_drvdata(dev, clk);
434 
435 	return hw;
436 }
437 
438 struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
439 				      void *data)
440 {
441 	unsigned int rsrc = clkspec->args[0];
442 	unsigned int idx = clkspec->args[1];
443 	struct list_head *scu_clks = data;
444 	struct imx_scu_clk_node *clk;
445 
446 	list_for_each_entry(clk, &scu_clks[rsrc], node) {
447 		if (clk->clk_type == idx)
448 			return clk->hw;
449 	}
450 
451 	return ERR_PTR(-ENODEV);
452 }
453 
454 static int imx_clk_scu_probe(struct platform_device *pdev)
455 {
456 	struct device *dev = &pdev->dev;
457 	struct imx_scu_clk_node *clk = dev_get_platdata(dev);
458 	struct clk_hw *hw;
459 	int ret;
460 
461 	pm_runtime_set_suspended(dev);
462 	pm_runtime_set_autosuspend_delay(dev, 50);
463 	pm_runtime_use_autosuspend(&pdev->dev);
464 	pm_runtime_enable(dev);
465 
466 	ret = pm_runtime_get_sync(dev);
467 	if (ret) {
468 		pm_runtime_disable(dev);
469 		return ret;
470 	}
471 
472 	hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
473 			   clk->rsrc, clk->clk_type);
474 	if (IS_ERR(hw)) {
475 		pm_runtime_disable(dev);
476 		return PTR_ERR(hw);
477 	}
478 
479 	clk->hw = hw;
480 	list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
481 
482 	pm_runtime_mark_last_busy(&pdev->dev);
483 	pm_runtime_put_autosuspend(&pdev->dev);
484 
485 	dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
486 		clk->clk_type);
487 
488 	return 0;
489 }
490 
491 static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
492 {
493 	struct clk_scu *clk = dev_get_drvdata(dev);
494 
495 	clk->rate = clk_hw_get_rate(&clk->hw);
496 	clk->is_enabled = clk_hw_is_enabled(&clk->hw);
497 
498 	if (clk->rate)
499 		dev_dbg(dev, "save rate %d\n", clk->rate);
500 
501 	if (clk->is_enabled)
502 		dev_dbg(dev, "save enabled state\n");
503 
504 	return 0;
505 }
506 
507 static int __maybe_unused imx_clk_scu_resume(struct device *dev)
508 {
509 	struct clk_scu *clk = dev_get_drvdata(dev);
510 	int ret = 0;
511 
512 	if (clk->rate) {
513 		ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
514 		dev_dbg(dev, "restore rate %d %s\n", clk->rate,
515 			!ret ? "success" : "failed");
516 	}
517 
518 	if (clk->is_enabled) {
519 		ret = clk_scu_prepare(&clk->hw);
520 		dev_dbg(dev, "restore enabled state %s\n",
521 			!ret ? "success" : "failed");
522 	}
523 
524 	return ret;
525 }
526 
527 static const struct dev_pm_ops imx_clk_scu_pm_ops = {
528 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
529 				      imx_clk_scu_resume)
530 };
531 
532 static struct platform_driver imx_clk_scu_driver = {
533 	.driver = {
534 		.name = "imx-scu-clk",
535 		.suppress_bind_attrs = true,
536 		.pm = &imx_clk_scu_pm_ops,
537 	},
538 	.probe = imx_clk_scu_probe,
539 };
540 
541 static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
542 {
543 	struct of_phandle_args genpdspec = {
544 		.np = pd_np,
545 		.args_count = 1,
546 		.args[0] = rsrc_id,
547 	};
548 
549 	if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
550 	    rsrc_id == IMX_SC_R_A72)
551 		return 0;
552 
553 	return of_genpd_add_device(&genpdspec, dev);
554 }
555 
556 struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
557 				     const char * const *parents,
558 				     int num_parents, u32 rsrc_id, u8 clk_type)
559 {
560 	struct imx_scu_clk_node clk = {
561 		.name = name,
562 		.rsrc = rsrc_id,
563 		.clk_type = clk_type,
564 		.parents = parents,
565 		.num_parents = num_parents,
566 	};
567 	struct platform_device *pdev;
568 	int ret;
569 
570 	pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
571 	if (!pdev) {
572 		pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
573 		       name, rsrc_id, clk_type);
574 		return ERR_PTR(-ENOMEM);
575 	}
576 
577 	ret = platform_device_add_data(pdev, &clk, sizeof(clk));
578 	if (ret) {
579 		platform_device_put(pdev);
580 		return ERR_PTR(ret);
581 	}
582 
583 	pdev->driver_override = "imx-scu-clk";
584 
585 	ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
586 	if (ret)
587 		pr_warn("%s: failed to attached the power domain %d\n",
588 			name, ret);
589 
590 	platform_device_add(pdev);
591 
592 	/* For API backwards compatiblilty, simply return NULL for success */
593 	return NULL;
594 }
595 
596 void imx_clk_scu_unregister(void)
597 {
598 	struct imx_scu_clk_node *clk;
599 	int i;
600 
601 	for (i = 0; i < IMX_SC_R_LAST; i++) {
602 		list_for_each_entry(clk, &imx_scu_clks[i], node) {
603 			clk_hw_unregister(clk->hw);
604 			kfree(clk);
605 		}
606 	}
607 }
608