1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018 ARM Ltd.
6  */
7 
8 #include "common.h"
9 
10 enum scmi_clock_protocol_cmd {
11 	CLOCK_ATTRIBUTES = 0x3,
12 	CLOCK_DESCRIBE_RATES = 0x4,
13 	CLOCK_RATE_SET = 0x5,
14 	CLOCK_RATE_GET = 0x6,
15 	CLOCK_CONFIG_SET = 0x7,
16 };
17 
18 struct scmi_msg_resp_clock_protocol_attributes {
19 	__le16 num_clocks;
20 	u8 max_async_req;
21 	u8 reserved;
22 };
23 
24 struct scmi_msg_resp_clock_attributes {
25 	__le32 attributes;
26 #define	CLOCK_ENABLE	BIT(0)
27 	    u8 name[SCMI_MAX_STR_SIZE];
28 };
29 
30 struct scmi_clock_set_config {
31 	__le32 id;
32 	__le32 attributes;
33 };
34 
35 struct scmi_msg_clock_describe_rates {
36 	__le32 id;
37 	__le32 rate_index;
38 };
39 
40 struct scmi_msg_resp_clock_describe_rates {
41 	__le32 num_rates_flags;
42 #define NUM_RETURNED(x)		((x) & 0xfff)
43 #define RATE_DISCRETE(x)	!((x) & BIT(12))
44 #define NUM_REMAINING(x)	((x) >> 16)
45 	struct {
46 		__le32 value_low;
47 		__le32 value_high;
48 	} rate[0];
49 #define RATE_TO_U64(X)		\
50 ({				\
51 	typeof(X) x = (X);	\
52 	le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
53 })
54 };
55 
56 struct scmi_clock_set_rate {
57 	__le32 flags;
58 #define CLOCK_SET_ASYNC		BIT(0)
59 #define CLOCK_SET_DELAYED	BIT(1)
60 #define CLOCK_SET_ROUND_UP	BIT(2)
61 #define CLOCK_SET_ROUND_AUTO	BIT(3)
62 	__le32 id;
63 	__le32 value_low;
64 	__le32 value_high;
65 };
66 
67 struct clock_info {
68 	int num_clocks;
69 	int max_async_req;
70 	struct scmi_clock_info *clk;
71 };
72 
73 static int scmi_clock_protocol_attributes_get(const struct scmi_handle *handle,
74 					      struct clock_info *ci)
75 {
76 	int ret;
77 	struct scmi_xfer *t;
78 	struct scmi_msg_resp_clock_protocol_attributes *attr;
79 
80 	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
81 				 SCMI_PROTOCOL_CLOCK, 0, sizeof(*attr), &t);
82 	if (ret)
83 		return ret;
84 
85 	attr = t->rx.buf;
86 
87 	ret = scmi_do_xfer(handle, t);
88 	if (!ret) {
89 		ci->num_clocks = le16_to_cpu(attr->num_clocks);
90 		ci->max_async_req = attr->max_async_req;
91 	}
92 
93 	scmi_xfer_put(handle, t);
94 	return ret;
95 }
96 
97 static int scmi_clock_attributes_get(const struct scmi_handle *handle,
98 				     u32 clk_id, struct scmi_clock_info *clk)
99 {
100 	int ret;
101 	struct scmi_xfer *t;
102 	struct scmi_msg_resp_clock_attributes *attr;
103 
104 	ret = scmi_xfer_get_init(handle, CLOCK_ATTRIBUTES, SCMI_PROTOCOL_CLOCK,
105 				 sizeof(clk_id), sizeof(*attr), &t);
106 	if (ret)
107 		return ret;
108 
109 	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
110 	attr = t->rx.buf;
111 
112 	ret = scmi_do_xfer(handle, t);
113 	if (!ret)
114 		strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
115 	else
116 		clk->name[0] = '\0';
117 
118 	scmi_xfer_put(handle, t);
119 	return ret;
120 }
121 
122 static int
123 scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
124 			      struct scmi_clock_info *clk)
125 {
126 	u64 *rate;
127 	int ret, cnt;
128 	bool rate_discrete = false;
129 	u32 tot_rate_cnt = 0, rates_flag;
130 	u16 num_returned, num_remaining;
131 	struct scmi_xfer *t;
132 	struct scmi_msg_clock_describe_rates *clk_desc;
133 	struct scmi_msg_resp_clock_describe_rates *rlist;
134 
135 	ret = scmi_xfer_get_init(handle, CLOCK_DESCRIBE_RATES,
136 				 SCMI_PROTOCOL_CLOCK, sizeof(*clk_desc), 0, &t);
137 	if (ret)
138 		return ret;
139 
140 	clk_desc = t->tx.buf;
141 	rlist = t->rx.buf;
142 
143 	do {
144 		clk_desc->id = cpu_to_le32(clk_id);
145 		/* Set the number of rates to be skipped/already read */
146 		clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
147 
148 		ret = scmi_do_xfer(handle, t);
149 		if (ret)
150 			goto err;
151 
152 		rates_flag = le32_to_cpu(rlist->num_rates_flags);
153 		num_remaining = NUM_REMAINING(rates_flag);
154 		rate_discrete = RATE_DISCRETE(rates_flag);
155 		num_returned = NUM_RETURNED(rates_flag);
156 
157 		if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
158 			dev_err(handle->dev, "No. of rates > MAX_NUM_RATES");
159 			break;
160 		}
161 
162 		if (!rate_discrete) {
163 			clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
164 			clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
165 			clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
166 			dev_dbg(handle->dev, "Min %llu Max %llu Step %llu Hz\n",
167 				clk->range.min_rate, clk->range.max_rate,
168 				clk->range.step_size);
169 			break;
170 		}
171 
172 		rate = &clk->list.rates[tot_rate_cnt];
173 		for (cnt = 0; cnt < num_returned; cnt++, rate++) {
174 			*rate = RATE_TO_U64(rlist->rate[cnt]);
175 			dev_dbg(handle->dev, "Rate %llu Hz\n", *rate);
176 		}
177 
178 		tot_rate_cnt += num_returned;
179 		/*
180 		 * check for both returned and remaining to avoid infinite
181 		 * loop due to buggy firmware
182 		 */
183 	} while (num_returned && num_remaining);
184 
185 	if (rate_discrete)
186 		clk->list.num_rates = tot_rate_cnt;
187 
188 	clk->rate_discrete = rate_discrete;
189 
190 err:
191 	scmi_xfer_put(handle, t);
192 	return ret;
193 }
194 
195 static int
196 scmi_clock_rate_get(const struct scmi_handle *handle, u32 clk_id, u64 *value)
197 {
198 	int ret;
199 	struct scmi_xfer *t;
200 
201 	ret = scmi_xfer_get_init(handle, CLOCK_RATE_GET, SCMI_PROTOCOL_CLOCK,
202 				 sizeof(__le32), sizeof(u64), &t);
203 	if (ret)
204 		return ret;
205 
206 	*(__le32 *)t->tx.buf = cpu_to_le32(clk_id);
207 
208 	ret = scmi_do_xfer(handle, t);
209 	if (!ret) {
210 		__le32 *pval = t->rx.buf;
211 
212 		*value = le32_to_cpu(*pval);
213 		*value |= (u64)le32_to_cpu(*(pval + 1)) << 32;
214 	}
215 
216 	scmi_xfer_put(handle, t);
217 	return ret;
218 }
219 
220 static int scmi_clock_rate_set(const struct scmi_handle *handle, u32 clk_id,
221 			       u32 config, u64 rate)
222 {
223 	int ret;
224 	struct scmi_xfer *t;
225 	struct scmi_clock_set_rate *cfg;
226 
227 	ret = scmi_xfer_get_init(handle, CLOCK_RATE_SET, SCMI_PROTOCOL_CLOCK,
228 				 sizeof(*cfg), 0, &t);
229 	if (ret)
230 		return ret;
231 
232 	cfg = t->tx.buf;
233 	cfg->flags = cpu_to_le32(config);
234 	cfg->id = cpu_to_le32(clk_id);
235 	cfg->value_low = cpu_to_le32(rate & 0xffffffff);
236 	cfg->value_high = cpu_to_le32(rate >> 32);
237 
238 	ret = scmi_do_xfer(handle, t);
239 
240 	scmi_xfer_put(handle, t);
241 	return ret;
242 }
243 
244 static int
245 scmi_clock_config_set(const struct scmi_handle *handle, u32 clk_id, u32 config)
246 {
247 	int ret;
248 	struct scmi_xfer *t;
249 	struct scmi_clock_set_config *cfg;
250 
251 	ret = scmi_xfer_get_init(handle, CLOCK_CONFIG_SET, SCMI_PROTOCOL_CLOCK,
252 				 sizeof(*cfg), 0, &t);
253 	if (ret)
254 		return ret;
255 
256 	cfg = t->tx.buf;
257 	cfg->id = cpu_to_le32(clk_id);
258 	cfg->attributes = cpu_to_le32(config);
259 
260 	ret = scmi_do_xfer(handle, t);
261 
262 	scmi_xfer_put(handle, t);
263 	return ret;
264 }
265 
266 static int scmi_clock_enable(const struct scmi_handle *handle, u32 clk_id)
267 {
268 	return scmi_clock_config_set(handle, clk_id, CLOCK_ENABLE);
269 }
270 
271 static int scmi_clock_disable(const struct scmi_handle *handle, u32 clk_id)
272 {
273 	return scmi_clock_config_set(handle, clk_id, 0);
274 }
275 
276 static int scmi_clock_count_get(const struct scmi_handle *handle)
277 {
278 	struct clock_info *ci = handle->clk_priv;
279 
280 	return ci->num_clocks;
281 }
282 
283 static const struct scmi_clock_info *
284 scmi_clock_info_get(const struct scmi_handle *handle, u32 clk_id)
285 {
286 	struct clock_info *ci = handle->clk_priv;
287 	struct scmi_clock_info *clk = ci->clk + clk_id;
288 
289 	if (!clk->name[0])
290 		return NULL;
291 
292 	return clk;
293 }
294 
295 static struct scmi_clk_ops clk_ops = {
296 	.count_get = scmi_clock_count_get,
297 	.info_get = scmi_clock_info_get,
298 	.rate_get = scmi_clock_rate_get,
299 	.rate_set = scmi_clock_rate_set,
300 	.enable = scmi_clock_enable,
301 	.disable = scmi_clock_disable,
302 };
303 
304 static int scmi_clock_protocol_init(struct scmi_handle *handle)
305 {
306 	u32 version;
307 	int clkid, ret;
308 	struct clock_info *cinfo;
309 
310 	scmi_version_get(handle, SCMI_PROTOCOL_CLOCK, &version);
311 
312 	dev_dbg(handle->dev, "Clock Version %d.%d\n",
313 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
314 
315 	cinfo = devm_kzalloc(handle->dev, sizeof(*cinfo), GFP_KERNEL);
316 	if (!cinfo)
317 		return -ENOMEM;
318 
319 	scmi_clock_protocol_attributes_get(handle, cinfo);
320 
321 	cinfo->clk = devm_kcalloc(handle->dev, cinfo->num_clocks,
322 				  sizeof(*cinfo->clk), GFP_KERNEL);
323 	if (!cinfo->clk)
324 		return -ENOMEM;
325 
326 	for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
327 		struct scmi_clock_info *clk = cinfo->clk + clkid;
328 
329 		ret = scmi_clock_attributes_get(handle, clkid, clk);
330 		if (!ret)
331 			scmi_clock_describe_rates_get(handle, clkid, clk);
332 	}
333 
334 	handle->clk_ops = &clk_ops;
335 	handle->clk_priv = cinfo;
336 
337 	return 0;
338 }
339 
340 static int __init scmi_clock_init(void)
341 {
342 	return scmi_protocol_register(SCMI_PROTOCOL_CLOCK,
343 				      &scmi_clock_protocol_init);
344 }
345 subsys_initcall(scmi_clock_init);
346