xref: /openbmc/linux/drivers/clk/clk-bulk.c (revision 009cb7d5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2017 NXP
4  *
5  * Dong Aisheng <aisheng.dong@nxp.com>
6  */
7 
8 #include <linux/clk.h>
9 #include <linux/clk-provider.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/of.h>
13 #include <linux/slab.h>
14 
15 static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
16 					struct clk_bulk_data *clks)
17 {
18 	int ret;
19 	int i;
20 
21 	for (i = 0; i < num_clks; i++)
22 		clks[i].clk = NULL;
23 
24 	for (i = 0; i < num_clks; i++) {
25 		clks[i].clk = of_clk_get(np, i);
26 		if (IS_ERR(clks[i].clk)) {
27 			ret = PTR_ERR(clks[i].clk);
28 			pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
29 			       np, i, ret);
30 			clks[i].clk = NULL;
31 			goto err;
32 		}
33 	}
34 
35 	return 0;
36 
37 err:
38 	clk_bulk_put(i, clks);
39 
40 	return ret;
41 }
42 
43 static int __must_check of_clk_bulk_get_all(struct device_node *np,
44 					    struct clk_bulk_data **clks)
45 {
46 	struct clk_bulk_data *clk_bulk;
47 	int num_clks;
48 	int ret;
49 
50 	num_clks = of_clk_get_parent_count(np);
51 	if (!num_clks)
52 		return 0;
53 
54 	clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
55 	if (!clk_bulk)
56 		return -ENOMEM;
57 
58 	ret = of_clk_bulk_get(np, num_clks, clk_bulk);
59 	if (ret) {
60 		kfree(clk_bulk);
61 		return ret;
62 	}
63 
64 	*clks = clk_bulk;
65 
66 	return num_clks;
67 }
68 
69 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
70 {
71 	while (--num_clks >= 0) {
72 		clk_put(clks[num_clks].clk);
73 		clks[num_clks].clk = NULL;
74 	}
75 }
76 EXPORT_SYMBOL_GPL(clk_bulk_put);
77 
78 static int __clk_bulk_get(struct device *dev, int num_clks,
79 			  struct clk_bulk_data *clks, bool optional)
80 {
81 	int ret;
82 	int i;
83 
84 	for (i = 0; i < num_clks; i++)
85 		clks[i].clk = NULL;
86 
87 	for (i = 0; i < num_clks; i++) {
88 		clks[i].clk = clk_get(dev, clks[i].id);
89 		if (IS_ERR(clks[i].clk)) {
90 			ret = PTR_ERR(clks[i].clk);
91 			clks[i].clk = NULL;
92 
93 			if (ret == -ENOENT && optional)
94 				continue;
95 
96 			if (ret != -EPROBE_DEFER)
97 				dev_err(dev, "Failed to get clk '%s': %d\n",
98 					clks[i].id, ret);
99 			goto err;
100 		}
101 	}
102 
103 	return 0;
104 
105 err:
106 	clk_bulk_put(i, clks);
107 
108 	return ret;
109 }
110 
111 int __must_check clk_bulk_get(struct device *dev, int num_clks,
112 			      struct clk_bulk_data *clks)
113 {
114 	return __clk_bulk_get(dev, num_clks, clks, false);
115 }
116 EXPORT_SYMBOL(clk_bulk_get);
117 
118 int __must_check clk_bulk_get_optional(struct device *dev, int num_clks,
119 				       struct clk_bulk_data *clks)
120 {
121 	return __clk_bulk_get(dev, num_clks, clks, true);
122 }
123 EXPORT_SYMBOL_GPL(clk_bulk_get_optional);
124 
125 void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
126 {
127 	if (IS_ERR_OR_NULL(clks))
128 		return;
129 
130 	clk_bulk_put(num_clks, clks);
131 
132 	kfree(clks);
133 }
134 EXPORT_SYMBOL(clk_bulk_put_all);
135 
136 int __must_check clk_bulk_get_all(struct device *dev,
137 				  struct clk_bulk_data **clks)
138 {
139 	struct device_node *np = dev_of_node(dev);
140 
141 	if (!np)
142 		return 0;
143 
144 	return of_clk_bulk_get_all(np, clks);
145 }
146 EXPORT_SYMBOL(clk_bulk_get_all);
147 
148 #ifdef CONFIG_HAVE_CLK_PREPARE
149 
150 /**
151  * clk_bulk_unprepare - undo preparation of a set of clock sources
152  * @num_clks: the number of clk_bulk_data
153  * @clks: the clk_bulk_data table being unprepared
154  *
155  * clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
156  * Returns 0 on success, -EERROR otherwise.
157  */
158 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
159 {
160 	while (--num_clks >= 0)
161 		clk_unprepare(clks[num_clks].clk);
162 }
163 EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
164 
165 /**
166  * clk_bulk_prepare - prepare a set of clocks
167  * @num_clks: the number of clk_bulk_data
168  * @clks: the clk_bulk_data table being prepared
169  *
170  * clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
171  * Returns 0 on success, -EERROR otherwise.
172  */
173 int __must_check clk_bulk_prepare(int num_clks,
174 				  const struct clk_bulk_data *clks)
175 {
176 	int ret;
177 	int i;
178 
179 	for (i = 0; i < num_clks; i++) {
180 		ret = clk_prepare(clks[i].clk);
181 		if (ret) {
182 			pr_err("Failed to prepare clk '%s': %d\n",
183 				clks[i].id, ret);
184 			goto err;
185 		}
186 	}
187 
188 	return 0;
189 
190 err:
191 	clk_bulk_unprepare(i, clks);
192 
193 	return  ret;
194 }
195 EXPORT_SYMBOL_GPL(clk_bulk_prepare);
196 
197 #endif /* CONFIG_HAVE_CLK_PREPARE */
198 
199 /**
200  * clk_bulk_disable - gate a set of clocks
201  * @num_clks: the number of clk_bulk_data
202  * @clks: the clk_bulk_data table being gated
203  *
204  * clk_bulk_disable must not sleep, which differentiates it from
205  * clk_bulk_unprepare. clk_bulk_disable must be called before
206  * clk_bulk_unprepare.
207  */
208 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
209 {
210 
211 	while (--num_clks >= 0)
212 		clk_disable(clks[num_clks].clk);
213 }
214 EXPORT_SYMBOL_GPL(clk_bulk_disable);
215 
216 /**
217  * clk_bulk_enable - ungate a set of clocks
218  * @num_clks: the number of clk_bulk_data
219  * @clks: the clk_bulk_data table being ungated
220  *
221  * clk_bulk_enable must not sleep
222  * Returns 0 on success, -EERROR otherwise.
223  */
224 int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
225 {
226 	int ret;
227 	int i;
228 
229 	for (i = 0; i < num_clks; i++) {
230 		ret = clk_enable(clks[i].clk);
231 		if (ret) {
232 			pr_err("Failed to enable clk '%s': %d\n",
233 				clks[i].id, ret);
234 			goto err;
235 		}
236 	}
237 
238 	return 0;
239 
240 err:
241 	clk_bulk_disable(i, clks);
242 
243 	return  ret;
244 }
245 EXPORT_SYMBOL_GPL(clk_bulk_enable);
246