1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message SMC/HVC
4 * Transport driver
5 *
6 * Copyright 2020 NXP
7 */
8
9 #include <linux/arm-smccc.h>
10 #include <linux/atomic.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/processor.h>
19 #include <linux/slab.h>
20
21 #include "common.h"
22
23 /*
24 * The shmem address is split into 4K page and offset.
25 * This is to make sure the parameters fit in 32bit arguments of the
26 * smc/hvc call to keep it uniform across smc32/smc64 conventions.
27 * This however limits the shmem address to 44 bit.
28 *
29 * These optional parameters can be used to distinguish among multiple
30 * scmi instances that are using the same smc-id.
31 * The page parameter is passed in r1/x1/w1 register and the offset parameter
32 * is passed in r2/x2/w2 register.
33 */
34
35 #define SHMEM_SIZE (SZ_4K)
36 #define SHMEM_SHIFT 12
37 #define SHMEM_PAGE(x) (_UL((x) >> SHMEM_SHIFT))
38 #define SHMEM_OFFSET(x) ((x) & (SHMEM_SIZE - 1))
39
40 /**
41 * struct scmi_smc - Structure representing a SCMI smc transport
42 *
43 * @irq: An optional IRQ for completion
44 * @cinfo: SCMI channel info
45 * @shmem: Transmit/Receive shared memory area
46 * @shmem_lock: Lock to protect access to Tx/Rx shared memory area.
47 * Used when NOT operating in atomic mode.
48 * @inflight: Atomic flag to protect access to Tx/Rx shared memory area.
49 * Used when operating in atomic mode.
50 * @func_id: smc/hvc call function id
51 * @param_page: 4K page number of the shmem channel
52 * @param_offset: Offset within the 4K page of the shmem channel
53 */
54
55 struct scmi_smc {
56 int irq;
57 struct scmi_chan_info *cinfo;
58 struct scmi_shared_mem __iomem *shmem;
59 /* Protect access to shmem area */
60 struct mutex shmem_lock;
61 #define INFLIGHT_NONE MSG_TOKEN_MAX
62 atomic_t inflight;
63 u32 func_id;
64 u32 param_page;
65 u32 param_offset;
66 };
67
smc_msg_done_isr(int irq,void * data)68 static irqreturn_t smc_msg_done_isr(int irq, void *data)
69 {
70 struct scmi_smc *scmi_info = data;
71
72 scmi_rx_callback(scmi_info->cinfo,
73 shmem_read_header(scmi_info->shmem), NULL);
74
75 return IRQ_HANDLED;
76 }
77
smc_chan_available(struct device_node * of_node,int idx)78 static bool smc_chan_available(struct device_node *of_node, int idx)
79 {
80 struct device_node *np = of_parse_phandle(of_node, "shmem", 0);
81 if (!np)
82 return false;
83
84 of_node_put(np);
85 return true;
86 }
87
smc_channel_lock_init(struct scmi_smc * scmi_info)88 static inline void smc_channel_lock_init(struct scmi_smc *scmi_info)
89 {
90 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
91 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
92 else
93 mutex_init(&scmi_info->shmem_lock);
94 }
95
smc_xfer_inflight(struct scmi_xfer * xfer,atomic_t * inflight)96 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight)
97 {
98 int ret;
99
100 ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq);
101
102 return ret == INFLIGHT_NONE;
103 }
104
105 static inline void
smc_channel_lock_acquire(struct scmi_smc * scmi_info,struct scmi_xfer * xfer __maybe_unused)106 smc_channel_lock_acquire(struct scmi_smc *scmi_info,
107 struct scmi_xfer *xfer __maybe_unused)
108 {
109 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
110 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight));
111 else
112 mutex_lock(&scmi_info->shmem_lock);
113 }
114
smc_channel_lock_release(struct scmi_smc * scmi_info)115 static inline void smc_channel_lock_release(struct scmi_smc *scmi_info)
116 {
117 if (IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE))
118 atomic_set(&scmi_info->inflight, INFLIGHT_NONE);
119 else
120 mutex_unlock(&scmi_info->shmem_lock);
121 }
122
smc_chan_setup(struct scmi_chan_info * cinfo,struct device * dev,bool tx)123 static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
124 bool tx)
125 {
126 struct device *cdev = cinfo->dev;
127 struct scmi_smc *scmi_info;
128 resource_size_t size;
129 struct resource res;
130 struct device_node *np;
131 u32 func_id;
132 int ret;
133
134 if (!tx)
135 return -ENODEV;
136
137 scmi_info = devm_kzalloc(dev, sizeof(*scmi_info), GFP_KERNEL);
138 if (!scmi_info)
139 return -ENOMEM;
140
141 np = of_parse_phandle(cdev->of_node, "shmem", 0);
142 if (!of_device_is_compatible(np, "arm,scmi-shmem")) {
143 of_node_put(np);
144 return -ENXIO;
145 }
146
147 ret = of_address_to_resource(np, 0, &res);
148 of_node_put(np);
149 if (ret) {
150 dev_err(cdev, "failed to get SCMI Tx shared memory\n");
151 return ret;
152 }
153
154 size = resource_size(&res);
155 scmi_info->shmem = devm_ioremap(dev, res.start, size);
156 if (!scmi_info->shmem) {
157 dev_err(dev, "failed to ioremap SCMI Tx shared memory\n");
158 return -EADDRNOTAVAIL;
159 }
160
161 ret = of_property_read_u32(dev->of_node, "arm,smc-id", &func_id);
162 if (ret < 0)
163 return ret;
164
165 if (of_device_is_compatible(dev->of_node, "arm,scmi-smc-param")) {
166 scmi_info->param_page = SHMEM_PAGE(res.start);
167 scmi_info->param_offset = SHMEM_OFFSET(res.start);
168 }
169 /*
170 * If there is an interrupt named "a2p", then the service and
171 * completion of a message is signaled by an interrupt rather than by
172 * the return of the SMC call.
173 */
174 scmi_info->irq = of_irq_get_byname(cdev->of_node, "a2p");
175 if (scmi_info->irq > 0) {
176 ret = request_irq(scmi_info->irq, smc_msg_done_isr,
177 IRQF_NO_SUSPEND, dev_name(dev), scmi_info);
178 if (ret) {
179 dev_err(dev, "failed to setup SCMI smc irq\n");
180 return ret;
181 }
182 } else {
183 cinfo->no_completion_irq = true;
184 }
185
186 scmi_info->func_id = func_id;
187 scmi_info->cinfo = cinfo;
188 smc_channel_lock_init(scmi_info);
189 cinfo->transport_info = scmi_info;
190
191 return 0;
192 }
193
smc_chan_free(int id,void * p,void * data)194 static int smc_chan_free(int id, void *p, void *data)
195 {
196 struct scmi_chan_info *cinfo = p;
197 struct scmi_smc *scmi_info = cinfo->transport_info;
198
199 /*
200 * Different protocols might share the same chan info, so a previous
201 * smc_chan_free call might have already freed the structure.
202 */
203 if (!scmi_info)
204 return 0;
205
206 /* Ignore any possible further reception on the IRQ path */
207 if (scmi_info->irq > 0)
208 free_irq(scmi_info->irq, scmi_info);
209
210 cinfo->transport_info = NULL;
211 scmi_info->cinfo = NULL;
212
213 return 0;
214 }
215
smc_send_message(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)216 static int smc_send_message(struct scmi_chan_info *cinfo,
217 struct scmi_xfer *xfer)
218 {
219 struct scmi_smc *scmi_info = cinfo->transport_info;
220 struct arm_smccc_res res;
221 unsigned long page = scmi_info->param_page;
222 unsigned long offset = scmi_info->param_offset;
223
224 /*
225 * Channel will be released only once response has been
226 * surely fully retrieved, so after .mark_txdone()
227 */
228 smc_channel_lock_acquire(scmi_info, xfer);
229
230 shmem_tx_prepare(scmi_info->shmem, xfer, cinfo);
231
232 arm_smccc_1_1_invoke(scmi_info->func_id, page, offset, 0, 0, 0, 0, 0,
233 &res);
234
235 /* Only SMCCC_RET_NOT_SUPPORTED is valid error code */
236 if (res.a0) {
237 smc_channel_lock_release(scmi_info);
238 return -EOPNOTSUPP;
239 }
240
241 return 0;
242 }
243
smc_fetch_response(struct scmi_chan_info * cinfo,struct scmi_xfer * xfer)244 static void smc_fetch_response(struct scmi_chan_info *cinfo,
245 struct scmi_xfer *xfer)
246 {
247 struct scmi_smc *scmi_info = cinfo->transport_info;
248
249 shmem_fetch_response(scmi_info->shmem, xfer);
250 }
251
smc_mark_txdone(struct scmi_chan_info * cinfo,int ret,struct scmi_xfer * __unused)252 static void smc_mark_txdone(struct scmi_chan_info *cinfo, int ret,
253 struct scmi_xfer *__unused)
254 {
255 struct scmi_smc *scmi_info = cinfo->transport_info;
256
257 smc_channel_lock_release(scmi_info);
258 }
259
260 static const struct scmi_transport_ops scmi_smc_ops = {
261 .chan_available = smc_chan_available,
262 .chan_setup = smc_chan_setup,
263 .chan_free = smc_chan_free,
264 .send_message = smc_send_message,
265 .mark_txdone = smc_mark_txdone,
266 .fetch_response = smc_fetch_response,
267 };
268
269 const struct scmi_desc scmi_smc_desc = {
270 .ops = &scmi_smc_ops,
271 .max_rx_timeout_ms = 30,
272 .max_msg = 20,
273 .max_msg_size = 128,
274 /*
275 * Setting .sync_cmds_atomic_replies to true for SMC assumes that,
276 * once the SMC instruction has completed successfully, the issued
277 * SCMI command would have been already fully processed by the SCMI
278 * platform firmware and so any possible response value expected
279 * for the issued command will be immmediately ready to be fetched
280 * from the shared memory area.
281 */
282 .sync_cmds_completed_on_ret = true,
283 .atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_SMC_ATOMIC_ENABLE),
284 };
285