xref: /openbmc/linux/drivers/firmware/arm_scmi/perf.c (revision 151f4e2b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Performance Protocol
4  *
5  * Copyright (C) 2018 ARM Ltd.
6  */
7 
8 #include <linux/of.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_opp.h>
11 #include <linux/sort.h>
12 
13 #include "common.h"
14 
15 enum scmi_performance_protocol_cmd {
16 	PERF_DOMAIN_ATTRIBUTES = 0x3,
17 	PERF_DESCRIBE_LEVELS = 0x4,
18 	PERF_LIMITS_SET = 0x5,
19 	PERF_LIMITS_GET = 0x6,
20 	PERF_LEVEL_SET = 0x7,
21 	PERF_LEVEL_GET = 0x8,
22 	PERF_NOTIFY_LIMITS = 0x9,
23 	PERF_NOTIFY_LEVEL = 0xa,
24 };
25 
26 struct scmi_opp {
27 	u32 perf;
28 	u32 power;
29 	u32 trans_latency_us;
30 };
31 
32 struct scmi_msg_resp_perf_attributes {
33 	__le16 num_domains;
34 	__le16 flags;
35 #define POWER_SCALE_IN_MILLIWATT(x)	((x) & BIT(0))
36 	__le32 stats_addr_low;
37 	__le32 stats_addr_high;
38 	__le32 stats_size;
39 };
40 
41 struct scmi_msg_resp_perf_domain_attributes {
42 	__le32 flags;
43 #define SUPPORTS_SET_LIMITS(x)		((x) & BIT(31))
44 #define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
45 #define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
46 #define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
47 	__le32 rate_limit_us;
48 	__le32 sustained_freq_khz;
49 	__le32 sustained_perf_level;
50 	    u8 name[SCMI_MAX_STR_SIZE];
51 };
52 
53 struct scmi_msg_perf_describe_levels {
54 	__le32 domain;
55 	__le32 level_index;
56 };
57 
58 struct scmi_perf_set_limits {
59 	__le32 domain;
60 	__le32 max_level;
61 	__le32 min_level;
62 };
63 
64 struct scmi_perf_get_limits {
65 	__le32 max_level;
66 	__le32 min_level;
67 };
68 
69 struct scmi_perf_set_level {
70 	__le32 domain;
71 	__le32 level;
72 };
73 
74 struct scmi_perf_notify_level_or_limits {
75 	__le32 domain;
76 	__le32 notify_enable;
77 };
78 
79 struct scmi_msg_resp_perf_describe_levels {
80 	__le16 num_returned;
81 	__le16 num_remaining;
82 	struct {
83 		__le32 perf_val;
84 		__le32 power;
85 		__le16 transition_latency_us;
86 		__le16 reserved;
87 	} opp[0];
88 };
89 
90 struct perf_dom_info {
91 	bool set_limits;
92 	bool set_perf;
93 	bool perf_limit_notify;
94 	bool perf_level_notify;
95 	u32 opp_count;
96 	u32 sustained_freq_khz;
97 	u32 sustained_perf_level;
98 	u32 mult_factor;
99 	char name[SCMI_MAX_STR_SIZE];
100 	struct scmi_opp opp[MAX_OPPS];
101 };
102 
103 struct scmi_perf_info {
104 	int num_domains;
105 	bool power_scale_mw;
106 	u64 stats_addr;
107 	u32 stats_size;
108 	struct perf_dom_info *dom_info;
109 };
110 
111 static int scmi_perf_attributes_get(const struct scmi_handle *handle,
112 				    struct scmi_perf_info *pi)
113 {
114 	int ret;
115 	struct scmi_xfer *t;
116 	struct scmi_msg_resp_perf_attributes *attr;
117 
118 	ret = scmi_xfer_get_init(handle, PROTOCOL_ATTRIBUTES,
119 				 SCMI_PROTOCOL_PERF, 0, sizeof(*attr), &t);
120 	if (ret)
121 		return ret;
122 
123 	attr = t->rx.buf;
124 
125 	ret = scmi_do_xfer(handle, t);
126 	if (!ret) {
127 		u16 flags = le16_to_cpu(attr->flags);
128 
129 		pi->num_domains = le16_to_cpu(attr->num_domains);
130 		pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
131 		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
132 				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
133 		pi->stats_size = le32_to_cpu(attr->stats_size);
134 	}
135 
136 	scmi_xfer_put(handle, t);
137 	return ret;
138 }
139 
140 static int
141 scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
142 				struct perf_dom_info *dom_info)
143 {
144 	int ret;
145 	struct scmi_xfer *t;
146 	struct scmi_msg_resp_perf_domain_attributes *attr;
147 
148 	ret = scmi_xfer_get_init(handle, PERF_DOMAIN_ATTRIBUTES,
149 				 SCMI_PROTOCOL_PERF, sizeof(domain),
150 				 sizeof(*attr), &t);
151 	if (ret)
152 		return ret;
153 
154 	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
155 	attr = t->rx.buf;
156 
157 	ret = scmi_do_xfer(handle, t);
158 	if (!ret) {
159 		u32 flags = le32_to_cpu(attr->flags);
160 
161 		dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
162 		dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
163 		dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
164 		dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
165 		dom_info->sustained_freq_khz =
166 					le32_to_cpu(attr->sustained_freq_khz);
167 		dom_info->sustained_perf_level =
168 					le32_to_cpu(attr->sustained_perf_level);
169 		if (!dom_info->sustained_freq_khz ||
170 		    !dom_info->sustained_perf_level)
171 			/* CPUFreq converts to kHz, hence default 1000 */
172 			dom_info->mult_factor =	1000;
173 		else
174 			dom_info->mult_factor =
175 					(dom_info->sustained_freq_khz * 1000) /
176 					dom_info->sustained_perf_level;
177 		strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
178 	}
179 
180 	scmi_xfer_put(handle, t);
181 	return ret;
182 }
183 
184 static int opp_cmp_func(const void *opp1, const void *opp2)
185 {
186 	const struct scmi_opp *t1 = opp1, *t2 = opp2;
187 
188 	return t1->perf - t2->perf;
189 }
190 
191 static int
192 scmi_perf_describe_levels_get(const struct scmi_handle *handle, u32 domain,
193 			      struct perf_dom_info *perf_dom)
194 {
195 	int ret, cnt;
196 	u32 tot_opp_cnt = 0;
197 	u16 num_returned, num_remaining;
198 	struct scmi_xfer *t;
199 	struct scmi_opp *opp;
200 	struct scmi_msg_perf_describe_levels *dom_info;
201 	struct scmi_msg_resp_perf_describe_levels *level_info;
202 
203 	ret = scmi_xfer_get_init(handle, PERF_DESCRIBE_LEVELS,
204 				 SCMI_PROTOCOL_PERF, sizeof(*dom_info), 0, &t);
205 	if (ret)
206 		return ret;
207 
208 	dom_info = t->tx.buf;
209 	level_info = t->rx.buf;
210 
211 	do {
212 		dom_info->domain = cpu_to_le32(domain);
213 		/* Set the number of OPPs to be skipped/already read */
214 		dom_info->level_index = cpu_to_le32(tot_opp_cnt);
215 
216 		ret = scmi_do_xfer(handle, t);
217 		if (ret)
218 			break;
219 
220 		num_returned = le16_to_cpu(level_info->num_returned);
221 		num_remaining = le16_to_cpu(level_info->num_remaining);
222 		if (tot_opp_cnt + num_returned > MAX_OPPS) {
223 			dev_err(handle->dev, "No. of OPPs exceeded MAX_OPPS");
224 			break;
225 		}
226 
227 		opp = &perf_dom->opp[tot_opp_cnt];
228 		for (cnt = 0; cnt < num_returned; cnt++, opp++) {
229 			opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
230 			opp->power = le32_to_cpu(level_info->opp[cnt].power);
231 			opp->trans_latency_us = le16_to_cpu
232 				(level_info->opp[cnt].transition_latency_us);
233 
234 			dev_dbg(handle->dev, "Level %d Power %d Latency %dus\n",
235 				opp->perf, opp->power, opp->trans_latency_us);
236 		}
237 
238 		tot_opp_cnt += num_returned;
239 		/*
240 		 * check for both returned and remaining to avoid infinite
241 		 * loop due to buggy firmware
242 		 */
243 	} while (num_returned && num_remaining);
244 
245 	perf_dom->opp_count = tot_opp_cnt;
246 	scmi_xfer_put(handle, t);
247 
248 	sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
249 	return ret;
250 }
251 
252 static int scmi_perf_limits_set(const struct scmi_handle *handle, u32 domain,
253 				u32 max_perf, u32 min_perf)
254 {
255 	int ret;
256 	struct scmi_xfer *t;
257 	struct scmi_perf_set_limits *limits;
258 
259 	ret = scmi_xfer_get_init(handle, PERF_LIMITS_SET, SCMI_PROTOCOL_PERF,
260 				 sizeof(*limits), 0, &t);
261 	if (ret)
262 		return ret;
263 
264 	limits = t->tx.buf;
265 	limits->domain = cpu_to_le32(domain);
266 	limits->max_level = cpu_to_le32(max_perf);
267 	limits->min_level = cpu_to_le32(min_perf);
268 
269 	ret = scmi_do_xfer(handle, t);
270 
271 	scmi_xfer_put(handle, t);
272 	return ret;
273 }
274 
275 static int scmi_perf_limits_get(const struct scmi_handle *handle, u32 domain,
276 				u32 *max_perf, u32 *min_perf)
277 {
278 	int ret;
279 	struct scmi_xfer *t;
280 	struct scmi_perf_get_limits *limits;
281 
282 	ret = scmi_xfer_get_init(handle, PERF_LIMITS_GET, SCMI_PROTOCOL_PERF,
283 				 sizeof(__le32), 0, &t);
284 	if (ret)
285 		return ret;
286 
287 	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
288 
289 	ret = scmi_do_xfer(handle, t);
290 	if (!ret) {
291 		limits = t->rx.buf;
292 
293 		*max_perf = le32_to_cpu(limits->max_level);
294 		*min_perf = le32_to_cpu(limits->min_level);
295 	}
296 
297 	scmi_xfer_put(handle, t);
298 	return ret;
299 }
300 
301 static int scmi_perf_level_set(const struct scmi_handle *handle, u32 domain,
302 			       u32 level, bool poll)
303 {
304 	int ret;
305 	struct scmi_xfer *t;
306 	struct scmi_perf_set_level *lvl;
307 
308 	ret = scmi_xfer_get_init(handle, PERF_LEVEL_SET, SCMI_PROTOCOL_PERF,
309 				 sizeof(*lvl), 0, &t);
310 	if (ret)
311 		return ret;
312 
313 	t->hdr.poll_completion = poll;
314 	lvl = t->tx.buf;
315 	lvl->domain = cpu_to_le32(domain);
316 	lvl->level = cpu_to_le32(level);
317 
318 	ret = scmi_do_xfer(handle, t);
319 
320 	scmi_xfer_put(handle, t);
321 	return ret;
322 }
323 
324 static int scmi_perf_level_get(const struct scmi_handle *handle, u32 domain,
325 			       u32 *level, bool poll)
326 {
327 	int ret;
328 	struct scmi_xfer *t;
329 
330 	ret = scmi_xfer_get_init(handle, PERF_LEVEL_GET, SCMI_PROTOCOL_PERF,
331 				 sizeof(u32), sizeof(u32), &t);
332 	if (ret)
333 		return ret;
334 
335 	t->hdr.poll_completion = poll;
336 	*(__le32 *)t->tx.buf = cpu_to_le32(domain);
337 
338 	ret = scmi_do_xfer(handle, t);
339 	if (!ret)
340 		*level = le32_to_cpu(*(__le32 *)t->rx.buf);
341 
342 	scmi_xfer_put(handle, t);
343 	return ret;
344 }
345 
346 /* Device specific ops */
347 static int scmi_dev_domain_id(struct device *dev)
348 {
349 	struct of_phandle_args clkspec;
350 
351 	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
352 				       0, &clkspec))
353 		return -EINVAL;
354 
355 	return clkspec.args[0];
356 }
357 
358 static int scmi_dvfs_device_opps_add(const struct scmi_handle *handle,
359 				     struct device *dev)
360 {
361 	int idx, ret, domain;
362 	unsigned long freq;
363 	struct scmi_opp *opp;
364 	struct perf_dom_info *dom;
365 	struct scmi_perf_info *pi = handle->perf_priv;
366 
367 	domain = scmi_dev_domain_id(dev);
368 	if (domain < 0)
369 		return domain;
370 
371 	dom = pi->dom_info + domain;
372 
373 	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
374 		freq = opp->perf * dom->mult_factor;
375 
376 		ret = dev_pm_opp_add(dev, freq, 0);
377 		if (ret) {
378 			dev_warn(dev, "failed to add opp %luHz\n", freq);
379 
380 			while (idx-- > 0) {
381 				freq = (--opp)->perf * dom->mult_factor;
382 				dev_pm_opp_remove(dev, freq);
383 			}
384 			return ret;
385 		}
386 	}
387 	return 0;
388 }
389 
390 static int scmi_dvfs_transition_latency_get(const struct scmi_handle *handle,
391 					    struct device *dev)
392 {
393 	struct perf_dom_info *dom;
394 	struct scmi_perf_info *pi = handle->perf_priv;
395 	int domain = scmi_dev_domain_id(dev);
396 
397 	if (domain < 0)
398 		return domain;
399 
400 	dom = pi->dom_info + domain;
401 	/* uS to nS */
402 	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
403 }
404 
405 static int scmi_dvfs_freq_set(const struct scmi_handle *handle, u32 domain,
406 			      unsigned long freq, bool poll)
407 {
408 	struct scmi_perf_info *pi = handle->perf_priv;
409 	struct perf_dom_info *dom = pi->dom_info + domain;
410 
411 	return scmi_perf_level_set(handle, domain, freq / dom->mult_factor,
412 				   poll);
413 }
414 
415 static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain,
416 			      unsigned long *freq, bool poll)
417 {
418 	int ret;
419 	u32 level;
420 	struct scmi_perf_info *pi = handle->perf_priv;
421 	struct perf_dom_info *dom = pi->dom_info + domain;
422 
423 	ret = scmi_perf_level_get(handle, domain, &level, poll);
424 	if (!ret)
425 		*freq = level * dom->mult_factor;
426 
427 	return ret;
428 }
429 
430 static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain,
431 				   unsigned long *freq, unsigned long *power)
432 {
433 	struct scmi_perf_info *pi = handle->perf_priv;
434 	struct perf_dom_info *dom;
435 	unsigned long opp_freq;
436 	int idx, ret = -EINVAL;
437 	struct scmi_opp *opp;
438 
439 	dom = pi->dom_info + domain;
440 	if (!dom)
441 		return -EIO;
442 
443 	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
444 		opp_freq = opp->perf * dom->mult_factor;
445 		if (opp_freq < *freq)
446 			continue;
447 
448 		*freq = opp_freq;
449 		*power = opp->power;
450 		ret = 0;
451 		break;
452 	}
453 
454 	return ret;
455 }
456 
457 static struct scmi_perf_ops perf_ops = {
458 	.limits_set = scmi_perf_limits_set,
459 	.limits_get = scmi_perf_limits_get,
460 	.level_set = scmi_perf_level_set,
461 	.level_get = scmi_perf_level_get,
462 	.device_domain_id = scmi_dev_domain_id,
463 	.transition_latency_get = scmi_dvfs_transition_latency_get,
464 	.device_opps_add = scmi_dvfs_device_opps_add,
465 	.freq_set = scmi_dvfs_freq_set,
466 	.freq_get = scmi_dvfs_freq_get,
467 	.est_power_get = scmi_dvfs_est_power_get,
468 };
469 
470 static int scmi_perf_protocol_init(struct scmi_handle *handle)
471 {
472 	int domain;
473 	u32 version;
474 	struct scmi_perf_info *pinfo;
475 
476 	scmi_version_get(handle, SCMI_PROTOCOL_PERF, &version);
477 
478 	dev_dbg(handle->dev, "Performance Version %d.%d\n",
479 		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
480 
481 	pinfo = devm_kzalloc(handle->dev, sizeof(*pinfo), GFP_KERNEL);
482 	if (!pinfo)
483 		return -ENOMEM;
484 
485 	scmi_perf_attributes_get(handle, pinfo);
486 
487 	pinfo->dom_info = devm_kcalloc(handle->dev, pinfo->num_domains,
488 				       sizeof(*pinfo->dom_info), GFP_KERNEL);
489 	if (!pinfo->dom_info)
490 		return -ENOMEM;
491 
492 	for (domain = 0; domain < pinfo->num_domains; domain++) {
493 		struct perf_dom_info *dom = pinfo->dom_info + domain;
494 
495 		scmi_perf_domain_attributes_get(handle, domain, dom);
496 		scmi_perf_describe_levels_get(handle, domain, dom);
497 	}
498 
499 	handle->perf_ops = &perf_ops;
500 	handle->perf_priv = pinfo;
501 
502 	return 0;
503 }
504 
505 static int __init scmi_perf_init(void)
506 {
507 	return scmi_protocol_register(SCMI_PROTOCOL_PERF,
508 				      &scmi_perf_protocol_init);
509 }
510 subsys_initcall(scmi_perf_init);
511