1c1038456SLina Iyer // SPDX-License-Identifier: GPL-2.0
2c1038456SLina Iyer /*
3c1038456SLina Iyer * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4c1038456SLina Iyer */
5c1038456SLina Iyer
6c1038456SLina Iyer #include <linux/atomic.h>
7c1038456SLina Iyer #include <linux/bug.h>
8c1038456SLina Iyer #include <linux/interrupt.h>
9c1038456SLina Iyer #include <linux/jiffies.h>
10c1038456SLina Iyer #include <linux/kernel.h>
11600513dfSLina Iyer #include <linux/list.h>
12985427f9SMaulik Shah #include <linux/lockdep.h>
13c1038456SLina Iyer #include <linux/module.h>
14c1038456SLina Iyer #include <linux/of.h>
15c1038456SLina Iyer #include <linux/platform_device.h>
16c1038456SLina Iyer #include <linux/slab.h>
17600513dfSLina Iyer #include <linux/spinlock.h>
18c1038456SLina Iyer #include <linux/types.h>
19c1038456SLina Iyer #include <linux/wait.h>
20c1038456SLina Iyer
21c1038456SLina Iyer #include <soc/qcom/rpmh.h>
22c1038456SLina Iyer
23c1038456SLina Iyer #include "rpmh-internal.h"
24c1038456SLina Iyer
25c1038456SLina Iyer #define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
26c1038456SLina Iyer
27aff9cc08SMaulik Shah #define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
28c1038456SLina Iyer struct rpmh_request name = { \
29c1038456SLina Iyer .msg = { \
30c1038456SLina Iyer .state = s, \
31c1038456SLina Iyer .cmds = name.cmd, \
32c1038456SLina Iyer .num_cmds = 0, \
33c1038456SLina Iyer .wait_for_compl = true, \
34c1038456SLina Iyer }, \
35c1038456SLina Iyer .cmd = { { 0 } }, \
36c1038456SLina Iyer .completion = q, \
37aff9cc08SMaulik Shah .dev = device, \
38564b5e24SLina Iyer .needs_free = false, \
39c1038456SLina Iyer }
40c1038456SLina Iyer
41c1038456SLina Iyer #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
42c1038456SLina Iyer
43600513dfSLina Iyer /**
44600513dfSLina Iyer * struct cache_req: the request object for caching
45600513dfSLina Iyer *
46600513dfSLina Iyer * @addr: the address of the resource
47600513dfSLina Iyer * @sleep_val: the sleep vote
48600513dfSLina Iyer * @wake_val: the wake vote
49600513dfSLina Iyer * @list: linked list obj
50600513dfSLina Iyer */
51600513dfSLina Iyer struct cache_req {
52600513dfSLina Iyer u32 addr;
53600513dfSLina Iyer u32 sleep_val;
54600513dfSLina Iyer u32 wake_val;
55600513dfSLina Iyer struct list_head list;
56600513dfSLina Iyer };
57600513dfSLina Iyer
58c8790cb6SLina Iyer /**
59c8790cb6SLina Iyer * struct batch_cache_req - An entry in our batch catch
60c8790cb6SLina Iyer *
61c8790cb6SLina Iyer * @list: linked list obj
62c8790cb6SLina Iyer * @count: number of messages
63c8790cb6SLina Iyer * @rpm_msgs: the messages
64c8790cb6SLina Iyer */
65c8790cb6SLina Iyer
66c8790cb6SLina Iyer struct batch_cache_req {
67c8790cb6SLina Iyer struct list_head list;
68c8790cb6SLina Iyer int count;
69c8790cb6SLina Iyer struct rpmh_request rpm_msgs[];
70c8790cb6SLina Iyer };
71c8790cb6SLina Iyer
get_rpmh_ctrlr(const struct device * dev)72c1038456SLina Iyer static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
73c1038456SLina Iyer {
74c1038456SLina Iyer struct rsc_drv *drv = dev_get_drvdata(dev->parent);
75c1038456SLina Iyer
76c1038456SLina Iyer return &drv->client;
77c1038456SLina Iyer }
78c1038456SLina Iyer
rpmh_tx_done(const struct tcs_request * msg)79*323dc2dcSAbel Vesa void rpmh_tx_done(const struct tcs_request *msg)
80c1038456SLina Iyer {
81c1038456SLina Iyer struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
82c1038456SLina Iyer msg);
83c1038456SLina Iyer struct completion *compl = rpm_msg->completion;
84baef1c90SStephen Boyd bool free = rpm_msg->needs_free;
85c1038456SLina Iyer
86c8790cb6SLina Iyer if (!compl)
87c8790cb6SLina Iyer goto exit;
88c8790cb6SLina Iyer
89c1038456SLina Iyer /* Signal the blocking thread we are done */
90c1038456SLina Iyer complete(compl);
91564b5e24SLina Iyer
92c8790cb6SLina Iyer exit:
93baef1c90SStephen Boyd if (free)
94564b5e24SLina Iyer kfree(rpm_msg);
95c1038456SLina Iyer }
96c1038456SLina Iyer
__find_req(struct rpmh_ctrlr * ctrlr,u32 addr)97600513dfSLina Iyer static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
98600513dfSLina Iyer {
99600513dfSLina Iyer struct cache_req *p, *req = NULL;
100600513dfSLina Iyer
101600513dfSLina Iyer list_for_each_entry(p, &ctrlr->cache, list) {
102600513dfSLina Iyer if (p->addr == addr) {
103600513dfSLina Iyer req = p;
104600513dfSLina Iyer break;
105600513dfSLina Iyer }
106600513dfSLina Iyer }
107600513dfSLina Iyer
108600513dfSLina Iyer return req;
109600513dfSLina Iyer }
110600513dfSLina Iyer
cache_rpm_request(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,struct tcs_cmd * cmd)111600513dfSLina Iyer static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
112600513dfSLina Iyer enum rpmh_state state,
113600513dfSLina Iyer struct tcs_cmd *cmd)
114600513dfSLina Iyer {
115600513dfSLina Iyer struct cache_req *req;
116600513dfSLina Iyer unsigned long flags;
117bb700067SMaulik Shah u32 old_sleep_val, old_wake_val;
118600513dfSLina Iyer
119600513dfSLina Iyer spin_lock_irqsave(&ctrlr->cache_lock, flags);
120600513dfSLina Iyer req = __find_req(ctrlr, cmd->addr);
121600513dfSLina Iyer if (req)
122600513dfSLina Iyer goto existing;
123600513dfSLina Iyer
124600513dfSLina Iyer req = kzalloc(sizeof(*req), GFP_ATOMIC);
125600513dfSLina Iyer if (!req) {
126600513dfSLina Iyer req = ERR_PTR(-ENOMEM);
127600513dfSLina Iyer goto unlock;
128600513dfSLina Iyer }
129600513dfSLina Iyer
130600513dfSLina Iyer req->addr = cmd->addr;
131600513dfSLina Iyer req->sleep_val = req->wake_val = UINT_MAX;
132600513dfSLina Iyer list_add_tail(&req->list, &ctrlr->cache);
133600513dfSLina Iyer
134600513dfSLina Iyer existing:
135bb700067SMaulik Shah old_sleep_val = req->sleep_val;
136bb700067SMaulik Shah old_wake_val = req->wake_val;
137bb700067SMaulik Shah
138600513dfSLina Iyer switch (state) {
139600513dfSLina Iyer case RPMH_ACTIVE_ONLY_STATE:
140600513dfSLina Iyer case RPMH_WAKE_ONLY_STATE:
141600513dfSLina Iyer req->wake_val = cmd->data;
142600513dfSLina Iyer break;
143600513dfSLina Iyer case RPMH_SLEEP_STATE:
144600513dfSLina Iyer req->sleep_val = cmd->data;
145600513dfSLina Iyer break;
146600513dfSLina Iyer }
147600513dfSLina Iyer
14835bb4b22SDouglas Anderson ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
149bb700067SMaulik Shah req->wake_val != old_wake_val) &&
150bb700067SMaulik Shah req->sleep_val != UINT_MAX &&
151bb700067SMaulik Shah req->wake_val != UINT_MAX;
152bb700067SMaulik Shah
153600513dfSLina Iyer unlock:
154600513dfSLina Iyer spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
155600513dfSLina Iyer
156600513dfSLina Iyer return req;
157600513dfSLina Iyer }
158600513dfSLina Iyer
159c1038456SLina Iyer /**
160600513dfSLina Iyer * __rpmh_write: Cache and send the RPMH request
161c1038456SLina Iyer *
162c1038456SLina Iyer * @dev: The device making the request
163c1038456SLina Iyer * @state: Active/Sleep request type
164c1038456SLina Iyer * @rpm_msg: The data that needs to be sent (cmds).
165600513dfSLina Iyer *
166600513dfSLina Iyer * Cache the RPMH request and send if the state is ACTIVE_ONLY.
167600513dfSLina Iyer * SLEEP/WAKE_ONLY requests are not sent to the controller at
168600513dfSLina Iyer * this time. Use rpmh_flush() to send them to the controller.
169c1038456SLina Iyer */
__rpmh_write(const struct device * dev,enum rpmh_state state,struct rpmh_request * rpm_msg)170c1038456SLina Iyer static int __rpmh_write(const struct device *dev, enum rpmh_state state,
171c1038456SLina Iyer struct rpmh_request *rpm_msg)
172c1038456SLina Iyer {
173c1038456SLina Iyer struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
174600513dfSLina Iyer int ret = -EINVAL;
175600513dfSLina Iyer struct cache_req *req;
176600513dfSLina Iyer int i;
177c1038456SLina Iyer
178600513dfSLina Iyer /* Cache the request in our store and link the payload */
179600513dfSLina Iyer for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
180600513dfSLina Iyer req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
181600513dfSLina Iyer if (IS_ERR(req))
182600513dfSLina Iyer return PTR_ERR(req);
183600513dfSLina Iyer }
184c1038456SLina Iyer
185600513dfSLina Iyer if (state == RPMH_ACTIVE_ONLY_STATE) {
186600513dfSLina Iyer ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
187600513dfSLina Iyer } else {
188600513dfSLina Iyer /* Clean up our call by spoofing tx_done */
18973002419SRaju P.L.S.S.S.N ret = 0;
190*323dc2dcSAbel Vesa rpmh_tx_done(&rpm_msg->msg);
191600513dfSLina Iyer }
192c1038456SLina Iyer
193600513dfSLina Iyer return ret;
194c1038456SLina Iyer }
195c1038456SLina Iyer
__fill_rpmh_msg(struct rpmh_request * req,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)196564b5e24SLina Iyer static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
197564b5e24SLina Iyer const struct tcs_cmd *cmd, u32 n)
198564b5e24SLina Iyer {
199564b5e24SLina Iyer if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
200564b5e24SLina Iyer return -EINVAL;
201564b5e24SLina Iyer
202564b5e24SLina Iyer memcpy(req->cmd, cmd, n * sizeof(*cmd));
203564b5e24SLina Iyer
204564b5e24SLina Iyer req->msg.state = state;
205564b5e24SLina Iyer req->msg.cmds = req->cmd;
206564b5e24SLina Iyer req->msg.num_cmds = n;
207564b5e24SLina Iyer
208564b5e24SLina Iyer return 0;
209564b5e24SLina Iyer }
210564b5e24SLina Iyer
211564b5e24SLina Iyer /**
212564b5e24SLina Iyer * rpmh_write_async: Write a set of RPMH commands
213564b5e24SLina Iyer *
214564b5e24SLina Iyer * @dev: The device making the request
215564b5e24SLina Iyer * @state: Active/sleep set
216564b5e24SLina Iyer * @cmd: The payload data
217564b5e24SLina Iyer * @n: The number of elements in payload
218564b5e24SLina Iyer *
219564b5e24SLina Iyer * Write a set of RPMH commands, the order of commands is maintained
220564b5e24SLina Iyer * and will be sent as a single shot.
221564b5e24SLina Iyer */
rpmh_write_async(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)222564b5e24SLina Iyer int rpmh_write_async(const struct device *dev, enum rpmh_state state,
223564b5e24SLina Iyer const struct tcs_cmd *cmd, u32 n)
224564b5e24SLina Iyer {
225564b5e24SLina Iyer struct rpmh_request *rpm_msg;
226564b5e24SLina Iyer int ret;
227564b5e24SLina Iyer
228564b5e24SLina Iyer rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
229564b5e24SLina Iyer if (!rpm_msg)
230564b5e24SLina Iyer return -ENOMEM;
231564b5e24SLina Iyer rpm_msg->needs_free = true;
232564b5e24SLina Iyer
233564b5e24SLina Iyer ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
234564b5e24SLina Iyer if (ret) {
235564b5e24SLina Iyer kfree(rpm_msg);
236564b5e24SLina Iyer return ret;
237564b5e24SLina Iyer }
238564b5e24SLina Iyer
239564b5e24SLina Iyer return __rpmh_write(dev, state, rpm_msg);
240564b5e24SLina Iyer }
241564b5e24SLina Iyer EXPORT_SYMBOL(rpmh_write_async);
242564b5e24SLina Iyer
243c1038456SLina Iyer /**
244c1038456SLina Iyer * rpmh_write: Write a set of RPMH commands and block until response
245c1038456SLina Iyer *
2461894b78eSLee Jones * @dev: The device making the request
247c1038456SLina Iyer * @state: Active/sleep set
248c1038456SLina Iyer * @cmd: The payload data
249c1038456SLina Iyer * @n: The number of elements in @cmd
250c1038456SLina Iyer *
251c1038456SLina Iyer * May sleep. Do not call from atomic contexts.
252c1038456SLina Iyer */
rpmh_write(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 n)253c1038456SLina Iyer int rpmh_write(const struct device *dev, enum rpmh_state state,
254c1038456SLina Iyer const struct tcs_cmd *cmd, u32 n)
255c1038456SLina Iyer {
256c1038456SLina Iyer DECLARE_COMPLETION_ONSTACK(compl);
257c1038456SLina Iyer DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
258c1038456SLina Iyer int ret;
259c1038456SLina Iyer
2600924dad5SMaulik Shah ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
2610924dad5SMaulik Shah if (ret)
2620924dad5SMaulik Shah return ret;
263c1038456SLina Iyer
264c1038456SLina Iyer ret = __rpmh_write(dev, state, &rpm_msg);
265c1038456SLina Iyer if (ret)
266c1038456SLina Iyer return ret;
267c1038456SLina Iyer
268c1038456SLina Iyer ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
269c1038456SLina Iyer WARN_ON(!ret);
270c1038456SLina Iyer return (ret > 0) ? 0 : -ETIMEDOUT;
271c1038456SLina Iyer }
272c1038456SLina Iyer EXPORT_SYMBOL(rpmh_write);
273600513dfSLina Iyer
cache_batch(struct rpmh_ctrlr * ctrlr,struct batch_cache_req * req)274c8790cb6SLina Iyer static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
275c8790cb6SLina Iyer {
276c8790cb6SLina Iyer unsigned long flags;
277c8790cb6SLina Iyer
278c8790cb6SLina Iyer spin_lock_irqsave(&ctrlr->cache_lock, flags);
279c8790cb6SLina Iyer list_add_tail(&req->list, &ctrlr->batch_cache);
280bb700067SMaulik Shah ctrlr->dirty = true;
281c8790cb6SLina Iyer spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
282c8790cb6SLina Iyer }
283c8790cb6SLina Iyer
flush_batch(struct rpmh_ctrlr * ctrlr)284c8790cb6SLina Iyer static int flush_batch(struct rpmh_ctrlr *ctrlr)
285c8790cb6SLina Iyer {
286c8790cb6SLina Iyer struct batch_cache_req *req;
287c8790cb6SLina Iyer const struct rpmh_request *rpm_msg;
288c8790cb6SLina Iyer int ret = 0;
289c8790cb6SLina Iyer int i;
290c8790cb6SLina Iyer
291c8790cb6SLina Iyer /* Send Sleep/Wake requests to the controller, expect no response */
292c8790cb6SLina Iyer list_for_each_entry(req, &ctrlr->batch_cache, list) {
293c8790cb6SLina Iyer for (i = 0; i < req->count; i++) {
294c8790cb6SLina Iyer rpm_msg = req->rpm_msgs + i;
295c8790cb6SLina Iyer ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
296c8790cb6SLina Iyer &rpm_msg->msg);
297c8790cb6SLina Iyer if (ret)
298c8790cb6SLina Iyer break;
299c8790cb6SLina Iyer }
300c8790cb6SLina Iyer }
301c8790cb6SLina Iyer
302c8790cb6SLina Iyer return ret;
303c8790cb6SLina Iyer }
304c8790cb6SLina Iyer
305c8790cb6SLina Iyer /**
306c8790cb6SLina Iyer * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
307c8790cb6SLina Iyer * batch to finish.
308c8790cb6SLina Iyer *
309c8790cb6SLina Iyer * @dev: the device making the request
310c8790cb6SLina Iyer * @state: Active/sleep set
311c8790cb6SLina Iyer * @cmd: The payload data
312c8790cb6SLina Iyer * @n: The array of count of elements in each batch, 0 terminated.
313c8790cb6SLina Iyer *
314c8790cb6SLina Iyer * Write a request to the RSC controller without caching. If the request
315c8790cb6SLina Iyer * state is ACTIVE, then the requests are treated as completion request
316c8790cb6SLina Iyer * and sent to the controller immediately. The function waits until all the
317c8790cb6SLina Iyer * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
318c8790cb6SLina Iyer * request is sent as fire-n-forget and no ack is expected.
319c8790cb6SLina Iyer *
320c8790cb6SLina Iyer * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
321c8790cb6SLina Iyer */
rpmh_write_batch(const struct device * dev,enum rpmh_state state,const struct tcs_cmd * cmd,u32 * n)322c8790cb6SLina Iyer int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
323c8790cb6SLina Iyer const struct tcs_cmd *cmd, u32 *n)
324c8790cb6SLina Iyer {
325c8790cb6SLina Iyer struct batch_cache_req *req;
326c8790cb6SLina Iyer struct rpmh_request *rpm_msgs;
327baef1c90SStephen Boyd struct completion *compls;
328c8790cb6SLina Iyer struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
329c8790cb6SLina Iyer unsigned long time_left;
330c8790cb6SLina Iyer int count = 0;
331baef1c90SStephen Boyd int ret, i;
332baef1c90SStephen Boyd void *ptr;
333c8790cb6SLina Iyer
334c8790cb6SLina Iyer if (!cmd || !n)
335c8790cb6SLina Iyer return -EINVAL;
336c8790cb6SLina Iyer
337c8790cb6SLina Iyer while (n[count] > 0)
338c8790cb6SLina Iyer count++;
339c8790cb6SLina Iyer if (!count)
340c8790cb6SLina Iyer return -EINVAL;
341c8790cb6SLina Iyer
342baef1c90SStephen Boyd ptr = kzalloc(sizeof(*req) +
343baef1c90SStephen Boyd count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
344c8790cb6SLina Iyer GFP_ATOMIC);
345baef1c90SStephen Boyd if (!ptr)
346c8790cb6SLina Iyer return -ENOMEM;
347baef1c90SStephen Boyd
348baef1c90SStephen Boyd req = ptr;
349baef1c90SStephen Boyd compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
350baef1c90SStephen Boyd
351c8790cb6SLina Iyer req->count = count;
352c8790cb6SLina Iyer rpm_msgs = req->rpm_msgs;
353c8790cb6SLina Iyer
354c8790cb6SLina Iyer for (i = 0; i < count; i++) {
355c8790cb6SLina Iyer __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
356c8790cb6SLina Iyer cmd += n[i];
357c8790cb6SLina Iyer }
358c8790cb6SLina Iyer
359c8790cb6SLina Iyer if (state != RPMH_ACTIVE_ONLY_STATE) {
360c8790cb6SLina Iyer cache_batch(ctrlr, req);
361c8790cb6SLina Iyer return 0;
362c8790cb6SLina Iyer }
363c8790cb6SLina Iyer
364c8790cb6SLina Iyer for (i = 0; i < count; i++) {
365baef1c90SStephen Boyd struct completion *compl = &compls[i];
366baef1c90SStephen Boyd
367baef1c90SStephen Boyd init_completion(compl);
368baef1c90SStephen Boyd rpm_msgs[i].completion = compl;
369c8790cb6SLina Iyer ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
370c8790cb6SLina Iyer if (ret) {
371c8790cb6SLina Iyer pr_err("Error(%d) sending RPMH message addr=%#x\n",
372c8790cb6SLina Iyer ret, rpm_msgs[i].msg.cmds[0].addr);
373c8790cb6SLina Iyer break;
374c8790cb6SLina Iyer }
375c8790cb6SLina Iyer }
376c8790cb6SLina Iyer
377c8790cb6SLina Iyer time_left = RPMH_TIMEOUT_MS;
378baef1c90SStephen Boyd while (i--) {
379baef1c90SStephen Boyd time_left = wait_for_completion_timeout(&compls[i], time_left);
380c8790cb6SLina Iyer if (!time_left) {
381c8790cb6SLina Iyer /*
382c8790cb6SLina Iyer * Better hope they never finish because they'll signal
383baef1c90SStephen Boyd * the completion that we're going to free once
384baef1c90SStephen Boyd * we've returned from this function.
385c8790cb6SLina Iyer */
386c8790cb6SLina Iyer WARN_ON(1);
387c8790cb6SLina Iyer ret = -ETIMEDOUT;
388c8790cb6SLina Iyer goto exit;
389c8790cb6SLina Iyer }
390c8790cb6SLina Iyer }
391c8790cb6SLina Iyer
392c8790cb6SLina Iyer exit:
393baef1c90SStephen Boyd kfree(ptr);
394c8790cb6SLina Iyer
395c8790cb6SLina Iyer return ret;
396c8790cb6SLina Iyer }
397c8790cb6SLina Iyer EXPORT_SYMBOL(rpmh_write_batch);
398c8790cb6SLina Iyer
is_req_valid(struct cache_req * req)399600513dfSLina Iyer static int is_req_valid(struct cache_req *req)
400600513dfSLina Iyer {
401600513dfSLina Iyer return (req->sleep_val != UINT_MAX &&
402600513dfSLina Iyer req->wake_val != UINT_MAX &&
403600513dfSLina Iyer req->sleep_val != req->wake_val);
404600513dfSLina Iyer }
405600513dfSLina Iyer
send_single(struct rpmh_ctrlr * ctrlr,enum rpmh_state state,u32 addr,u32 data)406d5e20507SMaulik Shah static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
407600513dfSLina Iyer u32 addr, u32 data)
408600513dfSLina Iyer {
409d5e20507SMaulik Shah DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
410600513dfSLina Iyer
411600513dfSLina Iyer /* Wake sets are always complete and sleep sets are not */
412600513dfSLina Iyer rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
413600513dfSLina Iyer rpm_msg.cmd[0].addr = addr;
414600513dfSLina Iyer rpm_msg.cmd[0].data = data;
415600513dfSLina Iyer rpm_msg.msg.num_cmds = 1;
416600513dfSLina Iyer
417600513dfSLina Iyer return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
418600513dfSLina Iyer }
419600513dfSLina Iyer
420600513dfSLina Iyer /**
421985427f9SMaulik Shah * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
422600513dfSLina Iyer *
423985427f9SMaulik Shah * @ctrlr: Controller making request to flush cached data
424600513dfSLina Iyer *
425985427f9SMaulik Shah * Return:
426985427f9SMaulik Shah * * 0 - Success
427985427f9SMaulik Shah * * Error code - Otherwise
428600513dfSLina Iyer */
rpmh_flush(struct rpmh_ctrlr * ctrlr)429d5e20507SMaulik Shah int rpmh_flush(struct rpmh_ctrlr *ctrlr)
430600513dfSLina Iyer {
431600513dfSLina Iyer struct cache_req *p;
432d2a8cfc6SDouglas Anderson int ret = 0;
433600513dfSLina Iyer
434985427f9SMaulik Shah lockdep_assert_irqs_disabled();
435985427f9SMaulik Shah
436d2a8cfc6SDouglas Anderson /*
437d2a8cfc6SDouglas Anderson * Currently rpmh_flush() is only called when we think we're running
438d2a8cfc6SDouglas Anderson * on the last processor. If the lock is busy it means another
439d2a8cfc6SDouglas Anderson * processor is up and it's better to abort than spin.
440d2a8cfc6SDouglas Anderson */
441d2a8cfc6SDouglas Anderson if (!spin_trylock(&ctrlr->cache_lock))
442d2a8cfc6SDouglas Anderson return -EBUSY;
443d2a8cfc6SDouglas Anderson
444600513dfSLina Iyer if (!ctrlr->dirty) {
445600513dfSLina Iyer pr_debug("Skipping flush, TCS has latest data.\n");
446cccbe3e5SMaulik Shah goto write_next_wakeup;
447600513dfSLina Iyer }
448600513dfSLina Iyer
449f5ac95f9SMaulik Shah /* Invalidate the TCSes first to avoid stale data */
450881808d0SDouglas Anderson rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
451f5ac95f9SMaulik Shah
452c8790cb6SLina Iyer /* First flush the cached batch requests */
453c8790cb6SLina Iyer ret = flush_batch(ctrlr);
454c8790cb6SLina Iyer if (ret)
455d2a8cfc6SDouglas Anderson goto exit;
456c8790cb6SLina Iyer
457600513dfSLina Iyer list_for_each_entry(p, &ctrlr->cache, list) {
458600513dfSLina Iyer if (!is_req_valid(p)) {
459600513dfSLina Iyer pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
460600513dfSLina Iyer __func__, p->addr, p->sleep_val, p->wake_val);
461600513dfSLina Iyer continue;
462600513dfSLina Iyer }
463d5e20507SMaulik Shah ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
464d5e20507SMaulik Shah p->sleep_val);
465600513dfSLina Iyer if (ret)
466d2a8cfc6SDouglas Anderson goto exit;
467d5e20507SMaulik Shah ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
468d5e20507SMaulik Shah p->wake_val);
469600513dfSLina Iyer if (ret)
470d2a8cfc6SDouglas Anderson goto exit;
471600513dfSLina Iyer }
472600513dfSLina Iyer
473600513dfSLina Iyer ctrlr->dirty = false;
474600513dfSLina Iyer
475cccbe3e5SMaulik Shah write_next_wakeup:
476cccbe3e5SMaulik Shah rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr));
477d2a8cfc6SDouglas Anderson exit:
478d2a8cfc6SDouglas Anderson spin_unlock(&ctrlr->cache_lock);
479d2a8cfc6SDouglas Anderson return ret;
480600513dfSLina Iyer }
481600513dfSLina Iyer
482600513dfSLina Iyer /**
483f5ac95f9SMaulik Shah * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
484600513dfSLina Iyer *
485600513dfSLina Iyer * @dev: The device making the request
486600513dfSLina Iyer *
487f5ac95f9SMaulik Shah * Invalidate the sleep and wake values in batch_cache.
488600513dfSLina Iyer */
rpmh_invalidate(const struct device * dev)48973edcd38SMaulik Shah void rpmh_invalidate(const struct device *dev)
490600513dfSLina Iyer {
491600513dfSLina Iyer struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
492f5ac95f9SMaulik Shah struct batch_cache_req *req, *tmp;
493f5ac95f9SMaulik Shah unsigned long flags;
494600513dfSLina Iyer
495f5ac95f9SMaulik Shah spin_lock_irqsave(&ctrlr->cache_lock, flags);
496f5ac95f9SMaulik Shah list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
497f5ac95f9SMaulik Shah kfree(req);
498f5ac95f9SMaulik Shah INIT_LIST_HEAD(&ctrlr->batch_cache);
499f5ac95f9SMaulik Shah ctrlr->dirty = true;
500f5ac95f9SMaulik Shah spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
501600513dfSLina Iyer }
502600513dfSLina Iyer EXPORT_SYMBOL(rpmh_invalidate);
503