xref: /openbmc/linux/drivers/soc/qcom/rpmh.c (revision ebd09753)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/atomic.h>
7 #include <linux/bug.h>
8 #include <linux/interrupt.h>
9 #include <linux/jiffies.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 #include <linux/wait.h>
19 
20 #include <soc/qcom/rpmh.h>
21 
22 #include "rpmh-internal.h"
23 
24 #define RPMH_TIMEOUT_MS			msecs_to_jiffies(10000)
25 
26 #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name)	\
27 	struct rpmh_request name = {			\
28 		.msg = {				\
29 			.state = s,			\
30 			.cmds = name.cmd,		\
31 			.num_cmds = 0,			\
32 			.wait_for_compl = true,		\
33 		},					\
34 		.cmd = { { 0 } },			\
35 		.completion = q,			\
36 		.dev = dev,				\
37 		.needs_free = false,				\
38 	}
39 
40 #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
41 
42 /**
43  * struct cache_req: the request object for caching
44  *
45  * @addr: the address of the resource
46  * @sleep_val: the sleep vote
47  * @wake_val: the wake vote
48  * @list: linked list obj
49  */
50 struct cache_req {
51 	u32 addr;
52 	u32 sleep_val;
53 	u32 wake_val;
54 	struct list_head list;
55 };
56 
57 /**
58  * struct batch_cache_req - An entry in our batch catch
59  *
60  * @list: linked list obj
61  * @count: number of messages
62  * @rpm_msgs: the messages
63  */
64 
65 struct batch_cache_req {
66 	struct list_head list;
67 	int count;
68 	struct rpmh_request rpm_msgs[];
69 };
70 
71 static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
72 {
73 	struct rsc_drv *drv = dev_get_drvdata(dev->parent);
74 
75 	return &drv->client;
76 }
77 
78 void rpmh_tx_done(const struct tcs_request *msg, int r)
79 {
80 	struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
81 						    msg);
82 	struct completion *compl = rpm_msg->completion;
83 
84 	rpm_msg->err = r;
85 
86 	if (r)
87 		dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
88 			rpm_msg->msg.cmds[0].addr, r);
89 
90 	if (!compl)
91 		goto exit;
92 
93 	/* Signal the blocking thread we are done */
94 	complete(compl);
95 
96 exit:
97 	if (rpm_msg->needs_free)
98 		kfree(rpm_msg);
99 }
100 
101 static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
102 {
103 	struct cache_req *p, *req = NULL;
104 
105 	list_for_each_entry(p, &ctrlr->cache, list) {
106 		if (p->addr == addr) {
107 			req = p;
108 			break;
109 		}
110 	}
111 
112 	return req;
113 }
114 
115 static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
116 					   enum rpmh_state state,
117 					   struct tcs_cmd *cmd)
118 {
119 	struct cache_req *req;
120 	unsigned long flags;
121 
122 	spin_lock_irqsave(&ctrlr->cache_lock, flags);
123 	req = __find_req(ctrlr, cmd->addr);
124 	if (req)
125 		goto existing;
126 
127 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
128 	if (!req) {
129 		req = ERR_PTR(-ENOMEM);
130 		goto unlock;
131 	}
132 
133 	req->addr = cmd->addr;
134 	req->sleep_val = req->wake_val = UINT_MAX;
135 	INIT_LIST_HEAD(&req->list);
136 	list_add_tail(&req->list, &ctrlr->cache);
137 
138 existing:
139 	switch (state) {
140 	case RPMH_ACTIVE_ONLY_STATE:
141 		if (req->sleep_val != UINT_MAX)
142 			req->wake_val = cmd->data;
143 		break;
144 	case RPMH_WAKE_ONLY_STATE:
145 		req->wake_val = cmd->data;
146 		break;
147 	case RPMH_SLEEP_STATE:
148 		req->sleep_val = cmd->data;
149 		break;
150 	default:
151 		break;
152 	}
153 
154 	ctrlr->dirty = true;
155 unlock:
156 	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
157 
158 	return req;
159 }
160 
161 /**
162  * __rpmh_write: Cache and send the RPMH request
163  *
164  * @dev: The device making the request
165  * @state: Active/Sleep request type
166  * @rpm_msg: The data that needs to be sent (cmds).
167  *
168  * Cache the RPMH request and send if the state is ACTIVE_ONLY.
169  * SLEEP/WAKE_ONLY requests are not sent to the controller at
170  * this time. Use rpmh_flush() to send them to the controller.
171  */
172 static int __rpmh_write(const struct device *dev, enum rpmh_state state,
173 			struct rpmh_request *rpm_msg)
174 {
175 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
176 	int ret = -EINVAL;
177 	struct cache_req *req;
178 	int i;
179 
180 	rpm_msg->msg.state = state;
181 
182 	/* Cache the request in our store and link the payload */
183 	for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
184 		req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
185 		if (IS_ERR(req))
186 			return PTR_ERR(req);
187 	}
188 
189 	rpm_msg->msg.state = state;
190 
191 	if (state == RPMH_ACTIVE_ONLY_STATE) {
192 		WARN_ON(irqs_disabled());
193 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
194 	} else {
195 		ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
196 				&rpm_msg->msg);
197 		/* Clean up our call by spoofing tx_done */
198 		rpmh_tx_done(&rpm_msg->msg, ret);
199 	}
200 
201 	return ret;
202 }
203 
204 static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
205 		const struct tcs_cmd *cmd, u32 n)
206 {
207 	if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
208 		return -EINVAL;
209 
210 	memcpy(req->cmd, cmd, n * sizeof(*cmd));
211 
212 	req->msg.state = state;
213 	req->msg.cmds = req->cmd;
214 	req->msg.num_cmds = n;
215 
216 	return 0;
217 }
218 
219 /**
220  * rpmh_write_async: Write a set of RPMH commands
221  *
222  * @dev: The device making the request
223  * @state: Active/sleep set
224  * @cmd: The payload data
225  * @n: The number of elements in payload
226  *
227  * Write a set of RPMH commands, the order of commands is maintained
228  * and will be sent as a single shot.
229  */
230 int rpmh_write_async(const struct device *dev, enum rpmh_state state,
231 		     const struct tcs_cmd *cmd, u32 n)
232 {
233 	struct rpmh_request *rpm_msg;
234 	int ret;
235 
236 	rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
237 	if (!rpm_msg)
238 		return -ENOMEM;
239 	rpm_msg->needs_free = true;
240 
241 	ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
242 	if (ret) {
243 		kfree(rpm_msg);
244 		return ret;
245 	}
246 
247 	return __rpmh_write(dev, state, rpm_msg);
248 }
249 EXPORT_SYMBOL(rpmh_write_async);
250 
251 /**
252  * rpmh_write: Write a set of RPMH commands and block until response
253  *
254  * @rc: The RPMH handle got from rpmh_get_client
255  * @state: Active/sleep set
256  * @cmd: The payload data
257  * @n: The number of elements in @cmd
258  *
259  * May sleep. Do not call from atomic contexts.
260  */
261 int rpmh_write(const struct device *dev, enum rpmh_state state,
262 	       const struct tcs_cmd *cmd, u32 n)
263 {
264 	DECLARE_COMPLETION_ONSTACK(compl);
265 	DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
266 	int ret;
267 
268 	if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
269 		return -EINVAL;
270 
271 	memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
272 	rpm_msg.msg.num_cmds = n;
273 
274 	ret = __rpmh_write(dev, state, &rpm_msg);
275 	if (ret)
276 		return ret;
277 
278 	ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
279 	WARN_ON(!ret);
280 	return (ret > 0) ? 0 : -ETIMEDOUT;
281 }
282 EXPORT_SYMBOL(rpmh_write);
283 
284 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
285 {
286 	unsigned long flags;
287 
288 	spin_lock_irqsave(&ctrlr->cache_lock, flags);
289 	list_add_tail(&req->list, &ctrlr->batch_cache);
290 	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
291 }
292 
293 static int flush_batch(struct rpmh_ctrlr *ctrlr)
294 {
295 	struct batch_cache_req *req;
296 	const struct rpmh_request *rpm_msg;
297 	unsigned long flags;
298 	int ret = 0;
299 	int i;
300 
301 	/* Send Sleep/Wake requests to the controller, expect no response */
302 	spin_lock_irqsave(&ctrlr->cache_lock, flags);
303 	list_for_each_entry(req, &ctrlr->batch_cache, list) {
304 		for (i = 0; i < req->count; i++) {
305 			rpm_msg = req->rpm_msgs + i;
306 			ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
307 						       &rpm_msg->msg);
308 			if (ret)
309 				break;
310 		}
311 	}
312 	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
313 
314 	return ret;
315 }
316 
317 static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
318 {
319 	struct batch_cache_req *req, *tmp;
320 	unsigned long flags;
321 
322 	spin_lock_irqsave(&ctrlr->cache_lock, flags);
323 	list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
324 		kfree(req);
325 	INIT_LIST_HEAD(&ctrlr->batch_cache);
326 	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
327 }
328 
329 /**
330  * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
331  * batch to finish.
332  *
333  * @dev: the device making the request
334  * @state: Active/sleep set
335  * @cmd: The payload data
336  * @n: The array of count of elements in each batch, 0 terminated.
337  *
338  * Write a request to the RSC controller without caching. If the request
339  * state is ACTIVE, then the requests are treated as completion request
340  * and sent to the controller immediately. The function waits until all the
341  * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
342  * request is sent as fire-n-forget and no ack is expected.
343  *
344  * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
345  */
346 int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
347 		     const struct tcs_cmd *cmd, u32 *n)
348 {
349 	struct batch_cache_req *req;
350 	struct rpmh_request *rpm_msgs;
351 	DECLARE_COMPLETION_ONSTACK(compl);
352 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
353 	unsigned long time_left;
354 	int count = 0;
355 	int ret, i, j;
356 
357 	if (!cmd || !n)
358 		return -EINVAL;
359 
360 	while (n[count] > 0)
361 		count++;
362 	if (!count)
363 		return -EINVAL;
364 
365 	req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
366 		      GFP_ATOMIC);
367 	if (!req)
368 		return -ENOMEM;
369 	req->count = count;
370 	rpm_msgs = req->rpm_msgs;
371 
372 	for (i = 0; i < count; i++) {
373 		__fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
374 		cmd += n[i];
375 	}
376 
377 	if (state != RPMH_ACTIVE_ONLY_STATE) {
378 		cache_batch(ctrlr, req);
379 		return 0;
380 	}
381 
382 	for (i = 0; i < count; i++) {
383 		rpm_msgs[i].completion = &compl;
384 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
385 		if (ret) {
386 			pr_err("Error(%d) sending RPMH message addr=%#x\n",
387 			       ret, rpm_msgs[i].msg.cmds[0].addr);
388 			for (j = i; j < count; j++)
389 				rpmh_tx_done(&rpm_msgs[j].msg, ret);
390 			break;
391 		}
392 	}
393 
394 	time_left = RPMH_TIMEOUT_MS;
395 	for (i = 0; i < count; i++) {
396 		time_left = wait_for_completion_timeout(&compl, time_left);
397 		if (!time_left) {
398 			/*
399 			 * Better hope they never finish because they'll signal
400 			 * the completion on our stack and that's bad once
401 			 * we've returned from the function.
402 			 */
403 			WARN_ON(1);
404 			ret = -ETIMEDOUT;
405 			goto exit;
406 		}
407 	}
408 
409 exit:
410 	kfree(req);
411 
412 	return ret;
413 }
414 EXPORT_SYMBOL(rpmh_write_batch);
415 
416 static int is_req_valid(struct cache_req *req)
417 {
418 	return (req->sleep_val != UINT_MAX &&
419 		req->wake_val != UINT_MAX &&
420 		req->sleep_val != req->wake_val);
421 }
422 
423 static int send_single(const struct device *dev, enum rpmh_state state,
424 		       u32 addr, u32 data)
425 {
426 	DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
427 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
428 
429 	/* Wake sets are always complete and sleep sets are not */
430 	rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
431 	rpm_msg.cmd[0].addr = addr;
432 	rpm_msg.cmd[0].data = data;
433 	rpm_msg.msg.num_cmds = 1;
434 
435 	return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
436 }
437 
438 /**
439  * rpmh_flush: Flushes the buffered active and sleep sets to TCS
440  *
441  * @dev: The device making the request
442  *
443  * Return: -EBUSY if the controller is busy, probably waiting on a response
444  * to a RPMH request sent earlier.
445  *
446  * This function is always called from the sleep code from the last CPU
447  * that is powering down the entire system. Since no other RPMH API would be
448  * executing at this time, it is safe to run lockless.
449  */
450 int rpmh_flush(const struct device *dev)
451 {
452 	struct cache_req *p;
453 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
454 	int ret;
455 
456 	if (!ctrlr->dirty) {
457 		pr_debug("Skipping flush, TCS has latest data.\n");
458 		return 0;
459 	}
460 
461 	/* First flush the cached batch requests */
462 	ret = flush_batch(ctrlr);
463 	if (ret)
464 		return ret;
465 
466 	/*
467 	 * Nobody else should be calling this function other than system PM,
468 	 * hence we can run without locks.
469 	 */
470 	list_for_each_entry(p, &ctrlr->cache, list) {
471 		if (!is_req_valid(p)) {
472 			pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
473 				 __func__, p->addr, p->sleep_val, p->wake_val);
474 			continue;
475 		}
476 		ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
477 		if (ret)
478 			return ret;
479 		ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
480 				  p->addr, p->wake_val);
481 		if (ret)
482 			return ret;
483 	}
484 
485 	ctrlr->dirty = false;
486 
487 	return 0;
488 }
489 EXPORT_SYMBOL(rpmh_flush);
490 
491 /**
492  * rpmh_invalidate: Invalidate all sleep and active sets
493  * sets.
494  *
495  * @dev: The device making the request
496  *
497  * Invalidate the sleep and active values in the TCS blocks.
498  */
499 int rpmh_invalidate(const struct device *dev)
500 {
501 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
502 	int ret;
503 
504 	invalidate_batch(ctrlr);
505 	ctrlr->dirty = true;
506 
507 	do {
508 		ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
509 	} while (ret == -EAGAIN);
510 
511 	return ret;
512 }
513 EXPORT_SYMBOL(rpmh_invalidate);
514