1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016, Linaro Ltd 4 */ 5 6 #include <linux/io.h> 7 #include <linux/module.h> 8 #include <linux/of.h> 9 #include <linux/of_address.h> 10 #include <linux/interrupt.h> 11 #include <linux/platform_device.h> 12 #include <linux/mfd/syscon.h> 13 #include <linux/slab.h> 14 #include <linux/rpmsg.h> 15 #include <linux/idr.h> 16 #include <linux/circ_buf.h> 17 #include <linux/soc/qcom/smem.h> 18 #include <linux/sizes.h> 19 #include <linux/delay.h> 20 #include <linux/regmap.h> 21 #include <linux/workqueue.h> 22 #include <linux/list.h> 23 24 #include <linux/rpmsg/qcom_glink.h> 25 26 #include "qcom_glink_native.h" 27 28 #define FIFO_FULL_RESERVE 8 29 #define FIFO_ALIGNMENT 8 30 #define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */ 31 32 #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478 33 #define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479 34 #define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480 35 36 struct glink_smem_pipe { 37 struct qcom_glink_pipe native; 38 39 __le32 *tail; 40 __le32 *head; 41 42 void *fifo; 43 44 int remote_pid; 45 }; 46 47 #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native) 48 49 static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np) 50 { 51 struct glink_smem_pipe *pipe = to_smem_pipe(np); 52 size_t len; 53 void *fifo; 54 u32 head; 55 u32 tail; 56 57 if (!pipe->fifo) { 58 fifo = qcom_smem_get(pipe->remote_pid, 59 SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len); 60 if (IS_ERR(fifo)) { 61 pr_err("failed to acquire RX fifo handle: %ld\n", 62 PTR_ERR(fifo)); 63 return 0; 64 } 65 66 pipe->fifo = fifo; 67 pipe->native.length = len; 68 } 69 70 head = le32_to_cpu(*pipe->head); 71 tail = le32_to_cpu(*pipe->tail); 72 73 if (head < tail) 74 return pipe->native.length - tail + head; 75 else 76 return head - tail; 77 } 78 79 static void glink_smem_rx_peak(struct qcom_glink_pipe *np, 80 void *data, unsigned int offset, size_t count) 81 { 82 struct glink_smem_pipe *pipe = to_smem_pipe(np); 83 size_t len; 84 u32 tail; 85 86 tail = le32_to_cpu(*pipe->tail); 87 tail += offset; 88 if (tail >= pipe->native.length) 89 tail -= pipe->native.length; 90 91 len = min_t(size_t, count, pipe->native.length - tail); 92 if (len) 93 memcpy_fromio(data, pipe->fifo + tail, len); 94 95 if (len != count) 96 memcpy_fromio(data + len, pipe->fifo, (count - len)); 97 } 98 99 static void glink_smem_rx_advance(struct qcom_glink_pipe *np, 100 size_t count) 101 { 102 struct glink_smem_pipe *pipe = to_smem_pipe(np); 103 u32 tail; 104 105 tail = le32_to_cpu(*pipe->tail); 106 107 tail += count; 108 if (tail > pipe->native.length) 109 tail -= pipe->native.length; 110 111 *pipe->tail = cpu_to_le32(tail); 112 } 113 114 static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np) 115 { 116 struct glink_smem_pipe *pipe = to_smem_pipe(np); 117 u32 head; 118 u32 tail; 119 u32 avail; 120 121 head = le32_to_cpu(*pipe->head); 122 tail = le32_to_cpu(*pipe->tail); 123 124 if (tail <= head) 125 avail = pipe->native.length - head + tail; 126 else 127 avail = tail - head; 128 129 if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)) 130 avail = 0; 131 else 132 avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE; 133 134 return avail; 135 } 136 137 static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe, 138 unsigned int head, 139 const void *data, size_t count) 140 { 141 size_t len; 142 143 len = min_t(size_t, count, pipe->native.length - head); 144 if (len) 145 memcpy(pipe->fifo + head, data, len); 146 147 if (len != count) 148 memcpy(pipe->fifo, data + len, count - len); 149 150 head += count; 151 if (head >= pipe->native.length) 152 head -= pipe->native.length; 153 154 return head; 155 } 156 157 static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe, 158 const void *hdr, size_t hlen, 159 const void *data, size_t dlen) 160 { 161 struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe); 162 unsigned int head; 163 164 head = le32_to_cpu(*pipe->head); 165 166 head = glink_smem_tx_write_one(pipe, head, hdr, hlen); 167 head = glink_smem_tx_write_one(pipe, head, data, dlen); 168 169 /* Ensure head is always aligned to 8 bytes */ 170 head = ALIGN(head, 8); 171 if (head >= pipe->native.length) 172 head -= pipe->native.length; 173 174 /* Ensure ordering of fifo and head update */ 175 wmb(); 176 177 *pipe->head = cpu_to_le32(head); 178 } 179 180 static void qcom_glink_smem_release(struct device *dev) 181 { 182 kfree(dev); 183 } 184 185 struct qcom_glink *qcom_glink_smem_register(struct device *parent, 186 struct device_node *node) 187 { 188 struct glink_smem_pipe *rx_pipe; 189 struct glink_smem_pipe *tx_pipe; 190 struct qcom_glink *glink; 191 struct device *dev; 192 u32 remote_pid; 193 __le32 *descs; 194 size_t size; 195 int ret; 196 197 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 198 if (!dev) 199 return ERR_PTR(-ENOMEM); 200 201 dev->parent = parent; 202 dev->of_node = node; 203 dev->release = qcom_glink_smem_release; 204 dev_set_name(dev, "%pOFn:%pOFn", node->parent, node); 205 ret = device_register(dev); 206 if (ret) { 207 pr_err("failed to register glink edge\n"); 208 put_device(dev); 209 return ERR_PTR(ret); 210 } 211 212 ret = of_property_read_u32(dev->of_node, "qcom,remote-pid", 213 &remote_pid); 214 if (ret) { 215 dev_err(dev, "failed to parse qcom,remote-pid\n"); 216 goto err_put_dev; 217 } 218 219 rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL); 220 tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL); 221 if (!rx_pipe || !tx_pipe) { 222 ret = -ENOMEM; 223 goto err_put_dev; 224 } 225 226 ret = qcom_smem_alloc(remote_pid, 227 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32); 228 if (ret && ret != -EEXIST) { 229 dev_err(dev, "failed to allocate glink descriptors\n"); 230 goto err_put_dev; 231 } 232 233 descs = qcom_smem_get(remote_pid, 234 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size); 235 if (IS_ERR(descs)) { 236 dev_err(dev, "failed to acquire xprt descriptor\n"); 237 ret = PTR_ERR(descs); 238 goto err_put_dev; 239 } 240 241 if (size != 32) { 242 dev_err(dev, "glink descriptor of invalid size\n"); 243 ret = -EINVAL; 244 goto err_put_dev; 245 } 246 247 tx_pipe->tail = &descs[0]; 248 tx_pipe->head = &descs[1]; 249 rx_pipe->tail = &descs[2]; 250 rx_pipe->head = &descs[3]; 251 252 ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 253 SZ_16K); 254 if (ret && ret != -EEXIST) { 255 dev_err(dev, "failed to allocate TX fifo\n"); 256 goto err_put_dev; 257 } 258 259 tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 260 &tx_pipe->native.length); 261 if (IS_ERR(tx_pipe->fifo)) { 262 dev_err(dev, "failed to acquire TX fifo\n"); 263 ret = PTR_ERR(tx_pipe->fifo); 264 goto err_put_dev; 265 } 266 267 rx_pipe->native.avail = glink_smem_rx_avail; 268 rx_pipe->native.peak = glink_smem_rx_peak; 269 rx_pipe->native.advance = glink_smem_rx_advance; 270 rx_pipe->remote_pid = remote_pid; 271 272 tx_pipe->native.avail = glink_smem_tx_avail; 273 tx_pipe->native.write = glink_smem_tx_write; 274 tx_pipe->remote_pid = remote_pid; 275 276 *rx_pipe->tail = 0; 277 *tx_pipe->head = 0; 278 279 glink = qcom_glink_native_probe(dev, 280 GLINK_FEATURE_INTENT_REUSE, 281 &rx_pipe->native, &tx_pipe->native, 282 false); 283 if (IS_ERR(glink)) { 284 ret = PTR_ERR(glink); 285 goto err_put_dev; 286 } 287 288 return glink; 289 290 err_put_dev: 291 device_unregister(dev); 292 293 return ERR_PTR(ret); 294 } 295 EXPORT_SYMBOL_GPL(qcom_glink_smem_register); 296 297 void qcom_glink_smem_unregister(struct qcom_glink *glink) 298 { 299 qcom_glink_native_remove(glink); 300 qcom_glink_native_unregister(glink); 301 } 302 EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister); 303 304 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>"); 305 MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver"); 306 MODULE_LICENSE("GPL v2"); 307