xref: /openbmc/linux/drivers/clk/imx/clk-scu.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * Copyright 2018-2021 NXP
4   *   Dong Aisheng <aisheng.dong@nxp.com>
5   */
6  
7  #include <dt-bindings/firmware/imx/rsrc.h>
8  #include <linux/arm-smccc.h>
9  #include <linux/bsearch.h>
10  #include <linux/clk-provider.h>
11  #include <linux/err.h>
12  #include <linux/of.h>
13  #include <linux/platform_device.h>
14  #include <linux/pm_domain.h>
15  #include <linux/pm_runtime.h>
16  #include <linux/slab.h>
17  
18  #include "clk-scu.h"
19  
20  #define IMX_SIP_CPUFREQ			0xC2000001
21  #define IMX_SIP_SET_CPUFREQ		0x00
22  
23  static struct imx_sc_ipc *ccm_ipc_handle;
24  static struct device_node *pd_np;
25  static struct platform_driver imx_clk_scu_driver;
26  static const struct imx_clk_scu_rsrc_table *rsrc_table;
27  
28  struct imx_scu_clk_node {
29  	const char *name;
30  	u32 rsrc;
31  	u8 clk_type;
32  	const char * const *parents;
33  	int num_parents;
34  
35  	struct clk_hw *hw;
36  	struct list_head node;
37  };
38  
39  struct list_head imx_scu_clks[IMX_SC_R_LAST];
40  
41  /*
42   * struct clk_scu - Description of one SCU clock
43   * @hw: the common clk_hw
44   * @rsrc_id: resource ID of this SCU clock
45   * @clk_type: type of this clock resource
46   */
47  struct clk_scu {
48  	struct clk_hw hw;
49  	u16 rsrc_id;
50  	u8 clk_type;
51  
52  	/* for state save&restore */
53  	struct clk_hw *parent;
54  	u8 parent_index;
55  	bool is_enabled;
56  	u32 rate;
57  };
58  
59  /*
60   * struct clk_gpr_scu - Description of one SCU GPR clock
61   * @hw: the common clk_hw
62   * @rsrc_id: resource ID of this SCU clock
63   * @gpr_id: GPR ID index to control the divider
64   */
65  struct clk_gpr_scu {
66  	struct clk_hw hw;
67  	u16 rsrc_id;
68  	u8 gpr_id;
69  	u8 flags;
70  	bool gate_invert;
71  };
72  
73  #define to_clk_gpr_scu(_hw) container_of(_hw, struct clk_gpr_scu, hw)
74  
75  /*
76   * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
77   * @hdr: SCU protocol header
78   * @rate: rate to set
79   * @resource: clock resource to set rate
80   * @clk: clk type of this resource
81   *
82   * This structure describes the SCU protocol of clock rate set
83   */
84  struct imx_sc_msg_req_set_clock_rate {
85  	struct imx_sc_rpc_msg hdr;
86  	__le32 rate;
87  	__le16 resource;
88  	u8 clk;
89  } __packed __aligned(4);
90  
91  struct req_get_clock_rate {
92  	__le16 resource;
93  	u8 clk;
94  } __packed __aligned(4);
95  
96  struct resp_get_clock_rate {
97  	__le32 rate;
98  };
99  
100  /*
101   * struct imx_sc_msg_get_clock_rate - clock get rate protocol
102   * @hdr: SCU protocol header
103   * @req: get rate request protocol
104   * @resp: get rate response protocol
105   *
106   * This structure describes the SCU protocol of clock rate get
107   */
108  struct imx_sc_msg_get_clock_rate {
109  	struct imx_sc_rpc_msg hdr;
110  	union {
111  		struct req_get_clock_rate req;
112  		struct resp_get_clock_rate resp;
113  	} data;
114  };
115  
116  /*
117   * struct imx_sc_msg_get_clock_parent - clock get parent protocol
118   * @hdr: SCU protocol header
119   * @req: get parent request protocol
120   * @resp: get parent response protocol
121   *
122   * This structure describes the SCU protocol of clock get parent
123   */
124  struct imx_sc_msg_get_clock_parent {
125  	struct imx_sc_rpc_msg hdr;
126  	union {
127  		struct req_get_clock_parent {
128  			__le16 resource;
129  			u8 clk;
130  		} __packed __aligned(4) req;
131  		struct resp_get_clock_parent {
132  			u8 parent;
133  		} resp;
134  	} data;
135  };
136  
137  /*
138   * struct imx_sc_msg_set_clock_parent - clock set parent protocol
139   * @hdr: SCU protocol header
140   * @req: set parent request protocol
141   *
142   * This structure describes the SCU protocol of clock set parent
143   */
144  struct imx_sc_msg_set_clock_parent {
145  	struct imx_sc_rpc_msg hdr;
146  	__le16 resource;
147  	u8 clk;
148  	u8 parent;
149  } __packed;
150  
151  /*
152   * struct imx_sc_msg_req_clock_enable - clock gate protocol
153   * @hdr: SCU protocol header
154   * @resource: clock resource to gate
155   * @clk: clk type of this resource
156   * @enable: whether gate off the clock
157   * @autog: HW auto gate enable
158   *
159   * This structure describes the SCU protocol of clock gate
160   */
161  struct imx_sc_msg_req_clock_enable {
162  	struct imx_sc_rpc_msg hdr;
163  	__le16 resource;
164  	u8 clk;
165  	u8 enable;
166  	u8 autog;
167  } __packed __aligned(4);
168  
to_clk_scu(struct clk_hw * hw)169  static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
170  {
171  	return container_of(hw, struct clk_scu, hw);
172  }
173  
imx_scu_clk_search_cmp(const void * rsrc,const void * rsrc_p)174  static inline int imx_scu_clk_search_cmp(const void *rsrc, const void *rsrc_p)
175  {
176  	return *(u32 *)rsrc - *(u32 *)rsrc_p;
177  }
178  
imx_scu_clk_is_valid(u32 rsrc_id)179  static bool imx_scu_clk_is_valid(u32 rsrc_id)
180  {
181  	void *p;
182  
183  	if (!rsrc_table)
184  		return true;
185  
186  	p = bsearch(&rsrc_id, rsrc_table->rsrc, rsrc_table->num,
187  		    sizeof(rsrc_table->rsrc[0]), imx_scu_clk_search_cmp);
188  
189  	return p != NULL;
190  }
191  
imx_clk_scu_init(struct device_node * np,const struct imx_clk_scu_rsrc_table * data)192  int imx_clk_scu_init(struct device_node *np,
193  		     const struct imx_clk_scu_rsrc_table *data)
194  {
195  	u32 clk_cells;
196  	int ret, i;
197  
198  	ret = imx_scu_get_handle(&ccm_ipc_handle);
199  	if (ret)
200  		return ret;
201  
202  	of_property_read_u32(np, "#clock-cells", &clk_cells);
203  
204  	if (clk_cells == 2) {
205  		for (i = 0; i < IMX_SC_R_LAST; i++)
206  			INIT_LIST_HEAD(&imx_scu_clks[i]);
207  
208  		/* pd_np will be used to attach power domains later */
209  		pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
210  		if (!pd_np)
211  			return -EINVAL;
212  
213  		rsrc_table = data;
214  	}
215  
216  	return platform_driver_register(&imx_clk_scu_driver);
217  }
218  
219  /*
220   * clk_scu_recalc_rate - Get clock rate for a SCU clock
221   * @hw: clock to get rate for
222   * @parent_rate: parent rate provided by common clock framework, not used
223   *
224   * Gets the current clock rate of a SCU clock. Returns the current
225   * clock rate, or zero in failure.
226   */
clk_scu_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)227  static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
228  					 unsigned long parent_rate)
229  {
230  	struct clk_scu *clk = to_clk_scu(hw);
231  	struct imx_sc_msg_get_clock_rate msg;
232  	struct imx_sc_rpc_msg *hdr = &msg.hdr;
233  	int ret;
234  
235  	hdr->ver = IMX_SC_RPC_VERSION;
236  	hdr->svc = IMX_SC_RPC_SVC_PM;
237  	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
238  	hdr->size = 2;
239  
240  	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
241  	msg.data.req.clk = clk->clk_type;
242  
243  	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
244  	if (ret) {
245  		pr_err("%s: failed to get clock rate %d\n",
246  		       clk_hw_get_name(hw), ret);
247  		return 0;
248  	}
249  
250  	return le32_to_cpu(msg.data.resp.rate);
251  }
252  
253  /*
254   * clk_scu_determine_rate - Returns the closest rate for a SCU clock
255   * @hw: clock to round rate for
256   * @req: clock rate request
257   *
258   * Returns 0 on success, a negative error on failure
259   */
clk_scu_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)260  static int clk_scu_determine_rate(struct clk_hw *hw,
261  				  struct clk_rate_request *req)
262  {
263  	/*
264  	 * Assume we support all the requested rate and let the SCU firmware
265  	 * to handle the left work
266  	 */
267  	return 0;
268  }
269  
270  /*
271   * clk_scu_round_rate - Round clock rate for a SCU clock
272   * @hw: clock to round rate for
273   * @rate: rate to round
274   * @parent_rate: parent rate provided by common clock framework, not used
275   *
276   * Returns the current clock rate, or zero in failure.
277   */
clk_scu_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)278  static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
279  			       unsigned long *parent_rate)
280  {
281  	/*
282  	 * Assume we support all the requested rate and let the SCU firmware
283  	 * to handle the left work
284  	 */
285  	return rate;
286  }
287  
clk_scu_atf_set_cpu_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)288  static int clk_scu_atf_set_cpu_rate(struct clk_hw *hw, unsigned long rate,
289  				    unsigned long parent_rate)
290  {
291  	struct clk_scu *clk = to_clk_scu(hw);
292  	struct arm_smccc_res res;
293  	unsigned long cluster_id;
294  
295  	if (clk->rsrc_id == IMX_SC_R_A35 || clk->rsrc_id == IMX_SC_R_A53)
296  		cluster_id = 0;
297  	else if (clk->rsrc_id == IMX_SC_R_A72)
298  		cluster_id = 1;
299  	else
300  		return -EINVAL;
301  
302  	/* CPU frequency scaling can ONLY be done by ARM-Trusted-Firmware */
303  	arm_smccc_smc(IMX_SIP_CPUFREQ, IMX_SIP_SET_CPUFREQ,
304  		      cluster_id, rate, 0, 0, 0, 0, &res);
305  
306  	return 0;
307  }
308  
309  /*
310   * clk_scu_set_rate - Set rate for a SCU clock
311   * @hw: clock to change rate for
312   * @rate: target rate for the clock
313   * @parent_rate: rate of the clock parent, not used for SCU clocks
314   *
315   * Sets a clock frequency for a SCU clock. Returns the SCU
316   * protocol status.
317   */
clk_scu_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)318  static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
319  			    unsigned long parent_rate)
320  {
321  	struct clk_scu *clk = to_clk_scu(hw);
322  	struct imx_sc_msg_req_set_clock_rate msg;
323  	struct imx_sc_rpc_msg *hdr = &msg.hdr;
324  
325  	hdr->ver = IMX_SC_RPC_VERSION;
326  	hdr->svc = IMX_SC_RPC_SVC_PM;
327  	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
328  	hdr->size = 3;
329  
330  	msg.rate = cpu_to_le32(rate);
331  	msg.resource = cpu_to_le16(clk->rsrc_id);
332  	msg.clk = clk->clk_type;
333  
334  	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
335  }
336  
clk_scu_get_parent(struct clk_hw * hw)337  static u8 clk_scu_get_parent(struct clk_hw *hw)
338  {
339  	struct clk_scu *clk = to_clk_scu(hw);
340  	struct imx_sc_msg_get_clock_parent msg;
341  	struct imx_sc_rpc_msg *hdr = &msg.hdr;
342  	int ret;
343  
344  	hdr->ver = IMX_SC_RPC_VERSION;
345  	hdr->svc = IMX_SC_RPC_SVC_PM;
346  	hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_PARENT;
347  	hdr->size = 2;
348  
349  	msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
350  	msg.data.req.clk = clk->clk_type;
351  
352  	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
353  	if (ret) {
354  		pr_err("%s: failed to get clock parent %d\n",
355  		       clk_hw_get_name(hw), ret);
356  		return 0;
357  	}
358  
359  	clk->parent_index = msg.data.resp.parent;
360  
361  	return msg.data.resp.parent;
362  }
363  
clk_scu_set_parent(struct clk_hw * hw,u8 index)364  static int clk_scu_set_parent(struct clk_hw *hw, u8 index)
365  {
366  	struct clk_scu *clk = to_clk_scu(hw);
367  	struct imx_sc_msg_set_clock_parent msg;
368  	struct imx_sc_rpc_msg *hdr = &msg.hdr;
369  	int ret;
370  
371  	hdr->ver = IMX_SC_RPC_VERSION;
372  	hdr->svc = IMX_SC_RPC_SVC_PM;
373  	hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_PARENT;
374  	hdr->size = 2;
375  
376  	msg.resource = cpu_to_le16(clk->rsrc_id);
377  	msg.clk = clk->clk_type;
378  	msg.parent = index;
379  
380  	ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
381  	if (ret) {
382  		pr_err("%s: failed to set clock parent %d\n",
383  		       clk_hw_get_name(hw), ret);
384  		return ret;
385  	}
386  
387  	clk->parent_index = index;
388  
389  	return 0;
390  }
391  
sc_pm_clock_enable(struct imx_sc_ipc * ipc,u16 resource,u8 clk,bool enable,bool autog)392  static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
393  			      u8 clk, bool enable, bool autog)
394  {
395  	struct imx_sc_msg_req_clock_enable msg;
396  	struct imx_sc_rpc_msg *hdr = &msg.hdr;
397  
398  	hdr->ver = IMX_SC_RPC_VERSION;
399  	hdr->svc = IMX_SC_RPC_SVC_PM;
400  	hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
401  	hdr->size = 3;
402  
403  	msg.resource = cpu_to_le16(resource);
404  	msg.clk = clk;
405  	msg.enable = enable;
406  	msg.autog = autog;
407  
408  	return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
409  }
410  
411  /*
412   * clk_scu_prepare - Enable a SCU clock
413   * @hw: clock to enable
414   *
415   * Enable the clock at the DSC slice level
416   */
clk_scu_prepare(struct clk_hw * hw)417  static int clk_scu_prepare(struct clk_hw *hw)
418  {
419  	struct clk_scu *clk = to_clk_scu(hw);
420  
421  	return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
422  				  clk->clk_type, true, false);
423  }
424  
425  /*
426   * clk_scu_unprepare - Disable a SCU clock
427   * @hw: clock to enable
428   *
429   * Disable the clock at the DSC slice level
430   */
clk_scu_unprepare(struct clk_hw * hw)431  static void clk_scu_unprepare(struct clk_hw *hw)
432  {
433  	struct clk_scu *clk = to_clk_scu(hw);
434  	int ret;
435  
436  	ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
437  				 clk->clk_type, false, false);
438  	if (ret)
439  		pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
440  			ret);
441  }
442  
443  static const struct clk_ops clk_scu_ops = {
444  	.recalc_rate = clk_scu_recalc_rate,
445  	.determine_rate = clk_scu_determine_rate,
446  	.set_rate = clk_scu_set_rate,
447  	.get_parent = clk_scu_get_parent,
448  	.set_parent = clk_scu_set_parent,
449  	.prepare = clk_scu_prepare,
450  	.unprepare = clk_scu_unprepare,
451  };
452  
453  static const struct clk_ops clk_scu_cpu_ops = {
454  	.recalc_rate = clk_scu_recalc_rate,
455  	.round_rate = clk_scu_round_rate,
456  	.set_rate = clk_scu_atf_set_cpu_rate,
457  	.prepare = clk_scu_prepare,
458  	.unprepare = clk_scu_unprepare,
459  };
460  
461  static const struct clk_ops clk_scu_pi_ops = {
462  	.recalc_rate = clk_scu_recalc_rate,
463  	.round_rate  = clk_scu_round_rate,
464  	.set_rate    = clk_scu_set_rate,
465  };
466  
__imx_clk_scu(struct device * dev,const char * name,const char * const * parents,int num_parents,u32 rsrc_id,u8 clk_type)467  struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
468  			     const char * const *parents, int num_parents,
469  			     u32 rsrc_id, u8 clk_type)
470  {
471  	struct clk_init_data init;
472  	struct clk_scu *clk;
473  	struct clk_hw *hw;
474  	int ret;
475  
476  	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
477  	if (!clk)
478  		return ERR_PTR(-ENOMEM);
479  
480  	clk->rsrc_id = rsrc_id;
481  	clk->clk_type = clk_type;
482  
483  	init.name = name;
484  	init.ops = &clk_scu_ops;
485  	if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 || rsrc_id == IMX_SC_R_A72)
486  		init.ops = &clk_scu_cpu_ops;
487  	else if (rsrc_id == IMX_SC_R_PI_0_PLL)
488  		init.ops = &clk_scu_pi_ops;
489  	else
490  		init.ops = &clk_scu_ops;
491  	init.parent_names = parents;
492  	init.num_parents = num_parents;
493  
494  	/*
495  	 * Note on MX8, the clocks are tightly coupled with power domain
496  	 * that once the power domain is off, the clock status may be
497  	 * lost. So we make it NOCACHE to let user to retrieve the real
498  	 * clock status from HW instead of using the possible invalid
499  	 * cached rate.
500  	 */
501  	init.flags = CLK_GET_RATE_NOCACHE;
502  	clk->hw.init = &init;
503  
504  	hw = &clk->hw;
505  	ret = clk_hw_register(dev, hw);
506  	if (ret) {
507  		kfree(clk);
508  		hw = ERR_PTR(ret);
509  		return hw;
510  	}
511  
512  	if (dev)
513  		dev_set_drvdata(dev, clk);
514  
515  	return hw;
516  }
517  
imx_scu_of_clk_src_get(struct of_phandle_args * clkspec,void * data)518  struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
519  				      void *data)
520  {
521  	unsigned int rsrc = clkspec->args[0];
522  	unsigned int idx = clkspec->args[1];
523  	struct list_head *scu_clks = data;
524  	struct imx_scu_clk_node *clk;
525  
526  	list_for_each_entry(clk, &scu_clks[rsrc], node) {
527  		if (clk->clk_type == idx)
528  			return clk->hw;
529  	}
530  
531  	return ERR_PTR(-ENODEV);
532  }
533  
imx_clk_scu_probe(struct platform_device * pdev)534  static int imx_clk_scu_probe(struct platform_device *pdev)
535  {
536  	struct device *dev = &pdev->dev;
537  	struct imx_scu_clk_node *clk = dev_get_platdata(dev);
538  	struct clk_hw *hw;
539  	int ret;
540  
541  	if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
542  	    (clk->rsrc == IMX_SC_R_A72))) {
543  		pm_runtime_set_suspended(dev);
544  		pm_runtime_set_autosuspend_delay(dev, 50);
545  		pm_runtime_use_autosuspend(&pdev->dev);
546  		pm_runtime_enable(dev);
547  
548  		ret = pm_runtime_resume_and_get(dev);
549  		if (ret) {
550  			pm_genpd_remove_device(dev);
551  			pm_runtime_disable(dev);
552  			return ret;
553  		}
554  	}
555  
556  	hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
557  			   clk->rsrc, clk->clk_type);
558  	if (IS_ERR(hw)) {
559  		pm_runtime_disable(dev);
560  		return PTR_ERR(hw);
561  	}
562  
563  	clk->hw = hw;
564  	list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
565  
566  	if (!((clk->rsrc == IMX_SC_R_A35) || (clk->rsrc == IMX_SC_R_A53) ||
567  	    (clk->rsrc == IMX_SC_R_A72))) {
568  		pm_runtime_mark_last_busy(&pdev->dev);
569  		pm_runtime_put_autosuspend(&pdev->dev);
570  	}
571  
572  	dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
573  		clk->clk_type);
574  
575  	return 0;
576  }
577  
imx_clk_scu_suspend(struct device * dev)578  static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
579  {
580  	struct clk_scu *clk = dev_get_drvdata(dev);
581  	u32 rsrc_id = clk->rsrc_id;
582  
583  	if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
584  	    (rsrc_id == IMX_SC_R_A72))
585  		return 0;
586  
587  	clk->parent = clk_hw_get_parent(&clk->hw);
588  
589  	/* DC SS needs to handle bypass clock using non-cached clock rate */
590  	if (clk->rsrc_id == IMX_SC_R_DC_0_VIDEO0 ||
591  		clk->rsrc_id == IMX_SC_R_DC_0_VIDEO1 ||
592  		clk->rsrc_id == IMX_SC_R_DC_1_VIDEO0 ||
593  		clk->rsrc_id == IMX_SC_R_DC_1_VIDEO1)
594  		clk->rate = clk_scu_recalc_rate(&clk->hw, 0);
595  	else
596  		clk->rate = clk_hw_get_rate(&clk->hw);
597  	clk->is_enabled = clk_hw_is_prepared(&clk->hw);
598  
599  	if (clk->parent)
600  		dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent),
601  			clk->parent_index);
602  
603  	if (clk->rate)
604  		dev_dbg(dev, "save rate %d\n", clk->rate);
605  
606  	if (clk->is_enabled)
607  		dev_dbg(dev, "save enabled state\n");
608  
609  	return 0;
610  }
611  
imx_clk_scu_resume(struct device * dev)612  static int __maybe_unused imx_clk_scu_resume(struct device *dev)
613  {
614  	struct clk_scu *clk = dev_get_drvdata(dev);
615  	u32 rsrc_id = clk->rsrc_id;
616  	int ret = 0;
617  
618  	if ((rsrc_id == IMX_SC_R_A35) || (rsrc_id == IMX_SC_R_A53) ||
619  	    (rsrc_id == IMX_SC_R_A72))
620  		return 0;
621  
622  	if (clk->parent) {
623  		ret = clk_scu_set_parent(&clk->hw, clk->parent_index);
624  		dev_dbg(dev, "restore parent %s idx %u %s\n",
625  			clk_hw_get_name(clk->parent),
626  			clk->parent_index, !ret ? "success" : "failed");
627  	}
628  
629  	if (clk->rate) {
630  		ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
631  		dev_dbg(dev, "restore rate %d %s\n", clk->rate,
632  			!ret ? "success" : "failed");
633  	}
634  
635  	if (clk->is_enabled && rsrc_id != IMX_SC_R_PI_0_PLL) {
636  		ret = clk_scu_prepare(&clk->hw);
637  		dev_dbg(dev, "restore enabled state %s\n",
638  			!ret ? "success" : "failed");
639  	}
640  
641  	return ret;
642  }
643  
644  static const struct dev_pm_ops imx_clk_scu_pm_ops = {
645  	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
646  				      imx_clk_scu_resume)
647  };
648  
649  static struct platform_driver imx_clk_scu_driver = {
650  	.driver = {
651  		.name = "imx-scu-clk",
652  		.suppress_bind_attrs = true,
653  		.pm = &imx_clk_scu_pm_ops,
654  	},
655  	.probe = imx_clk_scu_probe,
656  };
657  
imx_clk_scu_attach_pd(struct device * dev,u32 rsrc_id)658  static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
659  {
660  	struct of_phandle_args genpdspec = {
661  		.np = pd_np,
662  		.args_count = 1,
663  		.args[0] = rsrc_id,
664  	};
665  
666  	if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
667  	    rsrc_id == IMX_SC_R_A72)
668  		return 0;
669  
670  	return of_genpd_add_device(&genpdspec, dev);
671  }
672  
imx_clk_scu_alloc_dev(const char * name,const char * const * parents,int num_parents,u32 rsrc_id,u8 clk_type)673  struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
674  				     const char * const *parents,
675  				     int num_parents, u32 rsrc_id, u8 clk_type)
676  {
677  	struct imx_scu_clk_node clk = {
678  		.name = name,
679  		.rsrc = rsrc_id,
680  		.clk_type = clk_type,
681  		.parents = parents,
682  		.num_parents = num_parents,
683  	};
684  	struct platform_device *pdev;
685  	int ret;
686  
687  	if (!imx_scu_clk_is_valid(rsrc_id))
688  		return ERR_PTR(-EINVAL);
689  
690  	pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
691  	if (!pdev) {
692  		pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
693  		       name, rsrc_id, clk_type);
694  		return ERR_PTR(-ENOMEM);
695  	}
696  
697  	ret = platform_device_add_data(pdev, &clk, sizeof(clk));
698  	if (ret) {
699  		platform_device_put(pdev);
700  		return ERR_PTR(ret);
701  	}
702  
703  	ret = driver_set_override(&pdev->dev, &pdev->driver_override,
704  				  "imx-scu-clk", strlen("imx-scu-clk"));
705  	if (ret) {
706  		platform_device_put(pdev);
707  		return ERR_PTR(ret);
708  	}
709  
710  	ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
711  	if (ret)
712  		pr_warn("%s: failed to attached the power domain %d\n",
713  			name, ret);
714  
715  	ret = platform_device_add(pdev);
716  	if (ret) {
717  		platform_device_put(pdev);
718  		return ERR_PTR(ret);
719  	}
720  
721  	/* For API backwards compatiblilty, simply return NULL for success */
722  	return NULL;
723  }
724  
imx_clk_scu_unregister(void)725  void imx_clk_scu_unregister(void)
726  {
727  	struct imx_scu_clk_node *clk, *n;
728  	int i;
729  
730  	for (i = 0; i < IMX_SC_R_LAST; i++) {
731  		list_for_each_entry_safe(clk, n, &imx_scu_clks[i], node) {
732  			clk_hw_unregister(clk->hw);
733  			kfree(clk);
734  		}
735  	}
736  }
737  
clk_gpr_div_scu_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)738  static unsigned long clk_gpr_div_scu_recalc_rate(struct clk_hw *hw,
739  						 unsigned long parent_rate)
740  {
741  	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
742  	unsigned long rate = 0;
743  	u32 val;
744  	int err;
745  
746  	err = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
747  				      clk->gpr_id, &val);
748  
749  	rate  = val ? parent_rate / 2 : parent_rate;
750  
751  	return err ? 0 : rate;
752  }
753  
clk_gpr_div_scu_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)754  static long clk_gpr_div_scu_round_rate(struct clk_hw *hw, unsigned long rate,
755  				   unsigned long *prate)
756  {
757  	if (rate < *prate)
758  		rate = *prate / 2;
759  	else
760  		rate = *prate;
761  
762  	return rate;
763  }
764  
clk_gpr_div_scu_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)765  static int clk_gpr_div_scu_set_rate(struct clk_hw *hw, unsigned long rate,
766  				    unsigned long parent_rate)
767  {
768  	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
769  	uint32_t val;
770  	int err;
771  
772  	val = (rate < parent_rate) ? 1 : 0;
773  	err = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
774  				      clk->gpr_id, val);
775  
776  	return err ? -EINVAL : 0;
777  }
778  
779  static const struct clk_ops clk_gpr_div_scu_ops = {
780  	.recalc_rate = clk_gpr_div_scu_recalc_rate,
781  	.round_rate = clk_gpr_div_scu_round_rate,
782  	.set_rate = clk_gpr_div_scu_set_rate,
783  };
784  
clk_gpr_mux_scu_get_parent(struct clk_hw * hw)785  static u8 clk_gpr_mux_scu_get_parent(struct clk_hw *hw)
786  {
787  	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
788  	u32 val = 0;
789  
790  	imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
791  				clk->gpr_id, &val);
792  
793  	return (u8)val;
794  }
795  
clk_gpr_mux_scu_set_parent(struct clk_hw * hw,u8 index)796  static int clk_gpr_mux_scu_set_parent(struct clk_hw *hw, u8 index)
797  {
798  	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
799  
800  	return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
801  				       clk->gpr_id, index);
802  }
803  
804  static const struct clk_ops clk_gpr_mux_scu_ops = {
805  	.determine_rate = clk_hw_determine_rate_no_reparent,
806  	.get_parent = clk_gpr_mux_scu_get_parent,
807  	.set_parent = clk_gpr_mux_scu_set_parent,
808  };
809  
clk_gpr_gate_scu_prepare(struct clk_hw * hw)810  static int clk_gpr_gate_scu_prepare(struct clk_hw *hw)
811  {
812  	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
813  
814  	return imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
815  				       clk->gpr_id, !clk->gate_invert);
816  }
817  
clk_gpr_gate_scu_unprepare(struct clk_hw * hw)818  static void clk_gpr_gate_scu_unprepare(struct clk_hw *hw)
819  {
820  	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
821  	int ret;
822  
823  	ret = imx_sc_misc_set_control(ccm_ipc_handle, clk->rsrc_id,
824  				      clk->gpr_id, clk->gate_invert);
825  	if (ret)
826  		pr_err("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
827  		       ret);
828  }
829  
clk_gpr_gate_scu_is_prepared(struct clk_hw * hw)830  static int clk_gpr_gate_scu_is_prepared(struct clk_hw *hw)
831  {
832  	struct clk_gpr_scu *clk = to_clk_gpr_scu(hw);
833  	int ret;
834  	u32 val;
835  
836  	ret = imx_sc_misc_get_control(ccm_ipc_handle, clk->rsrc_id,
837  				      clk->gpr_id, &val);
838  	if (ret)
839  		return ret;
840  
841  	return clk->gate_invert ? !val : val;
842  }
843  
844  static const struct clk_ops clk_gpr_gate_scu_ops = {
845  	.prepare = clk_gpr_gate_scu_prepare,
846  	.unprepare = clk_gpr_gate_scu_unprepare,
847  	.is_prepared = clk_gpr_gate_scu_is_prepared,
848  };
849  
__imx_clk_gpr_scu(const char * name,const char * const * parent_name,int num_parents,u32 rsrc_id,u8 gpr_id,u8 flags,bool invert)850  struct clk_hw *__imx_clk_gpr_scu(const char *name, const char * const *parent_name,
851  				 int num_parents, u32 rsrc_id, u8 gpr_id, u8 flags,
852  				 bool invert)
853  {
854  	struct imx_scu_clk_node *clk_node;
855  	struct clk_gpr_scu *clk;
856  	struct clk_hw *hw;
857  	struct clk_init_data init;
858  	int ret;
859  
860  	if (rsrc_id >= IMX_SC_R_LAST || gpr_id >= IMX_SC_C_LAST)
861  		return ERR_PTR(-EINVAL);
862  
863  	clk_node = kzalloc(sizeof(*clk_node), GFP_KERNEL);
864  	if (!clk_node)
865  		return ERR_PTR(-ENOMEM);
866  
867  	if (!imx_scu_clk_is_valid(rsrc_id)) {
868  		kfree(clk_node);
869  		return ERR_PTR(-EINVAL);
870  	}
871  
872  	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
873  	if (!clk) {
874  		kfree(clk_node);
875  		return ERR_PTR(-ENOMEM);
876  	}
877  
878  	clk->rsrc_id = rsrc_id;
879  	clk->gpr_id = gpr_id;
880  	clk->flags = flags;
881  	clk->gate_invert = invert;
882  
883  	if (flags & IMX_SCU_GPR_CLK_GATE)
884  		init.ops = &clk_gpr_gate_scu_ops;
885  
886  	if (flags & IMX_SCU_GPR_CLK_DIV)
887  		init.ops = &clk_gpr_div_scu_ops;
888  
889  	if (flags & IMX_SCU_GPR_CLK_MUX)
890  		init.ops = &clk_gpr_mux_scu_ops;
891  
892  	init.flags = 0;
893  	init.name = name;
894  	init.parent_names = parent_name;
895  	init.num_parents = num_parents;
896  
897  	clk->hw.init = &init;
898  
899  	hw = &clk->hw;
900  	ret = clk_hw_register(NULL, hw);
901  	if (ret) {
902  		kfree(clk);
903  		kfree(clk_node);
904  		hw = ERR_PTR(ret);
905  	} else {
906  		clk_node->hw = hw;
907  		clk_node->clk_type = gpr_id;
908  		list_add_tail(&clk_node->node, &imx_scu_clks[rsrc_id]);
909  	}
910  
911  	return hw;
912  }
913