1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2016, Linaro Ltd 4 */ 5 6 #include <linux/io.h> 7 #include <linux/module.h> 8 #include <linux/of.h> 9 #include <linux/of_address.h> 10 #include <linux/interrupt.h> 11 #include <linux/platform_device.h> 12 #include <linux/mfd/syscon.h> 13 #include <linux/slab.h> 14 #include <linux/rpmsg.h> 15 #include <linux/idr.h> 16 #include <linux/circ_buf.h> 17 #include <linux/soc/qcom/smem.h> 18 #include <linux/sizes.h> 19 #include <linux/delay.h> 20 #include <linux/regmap.h> 21 #include <linux/workqueue.h> 22 #include <linux/list.h> 23 24 #include <linux/rpmsg/qcom_glink.h> 25 26 #include "qcom_glink_native.h" 27 28 #define FIFO_FULL_RESERVE 8 29 #define FIFO_ALIGNMENT 8 30 #define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */ 31 32 #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478 33 #define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479 34 #define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480 35 36 struct glink_smem_pipe { 37 struct qcom_glink_pipe native; 38 39 __le32 *tail; 40 __le32 *head; 41 42 void *fifo; 43 44 int remote_pid; 45 }; 46 47 #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native) 48 49 static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np) 50 { 51 struct glink_smem_pipe *pipe = to_smem_pipe(np); 52 size_t len; 53 void *fifo; 54 u32 head; 55 u32 tail; 56 57 if (!pipe->fifo) { 58 fifo = qcom_smem_get(pipe->remote_pid, 59 SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len); 60 if (IS_ERR(fifo)) { 61 pr_err("failed to acquire RX fifo handle: %ld\n", 62 PTR_ERR(fifo)); 63 return 0; 64 } 65 66 pipe->fifo = fifo; 67 pipe->native.length = len; 68 } 69 70 head = le32_to_cpu(*pipe->head); 71 tail = le32_to_cpu(*pipe->tail); 72 73 if (head < tail) 74 return pipe->native.length - tail + head; 75 else 76 return head - tail; 77 } 78 79 static void glink_smem_rx_peak(struct qcom_glink_pipe *np, 80 void *data, unsigned int offset, size_t count) 81 { 82 struct glink_smem_pipe *pipe = to_smem_pipe(np); 83 size_t len; 84 u32 tail; 85 86 tail = le32_to_cpu(*pipe->tail); 87 tail += offset; 88 if (tail >= pipe->native.length) 89 tail -= pipe->native.length; 90 91 len = min_t(size_t, count, pipe->native.length - tail); 92 if (len) { 93 __ioread32_copy(data, pipe->fifo + tail, 94 len / sizeof(u32)); 95 } 96 97 if (len != count) { 98 __ioread32_copy(data + len, pipe->fifo, 99 (count - len) / sizeof(u32)); 100 } 101 } 102 103 static void glink_smem_rx_advance(struct qcom_glink_pipe *np, 104 size_t count) 105 { 106 struct glink_smem_pipe *pipe = to_smem_pipe(np); 107 u32 tail; 108 109 tail = le32_to_cpu(*pipe->tail); 110 111 tail += count; 112 if (tail > pipe->native.length) 113 tail -= pipe->native.length; 114 115 *pipe->tail = cpu_to_le32(tail); 116 } 117 118 static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np) 119 { 120 struct glink_smem_pipe *pipe = to_smem_pipe(np); 121 u32 head; 122 u32 tail; 123 u32 avail; 124 125 head = le32_to_cpu(*pipe->head); 126 tail = le32_to_cpu(*pipe->tail); 127 128 if (tail <= head) 129 avail = pipe->native.length - head + tail; 130 else 131 avail = tail - head; 132 133 if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)) 134 avail = 0; 135 else 136 avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE; 137 138 return avail; 139 } 140 141 static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe, 142 unsigned int head, 143 const void *data, size_t count) 144 { 145 size_t len; 146 147 len = min_t(size_t, count, pipe->native.length - head); 148 if (len) 149 memcpy(pipe->fifo + head, data, len); 150 151 if (len != count) 152 memcpy(pipe->fifo, data + len, count - len); 153 154 head += count; 155 if (head >= pipe->native.length) 156 head -= pipe->native.length; 157 158 return head; 159 } 160 161 static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe, 162 const void *hdr, size_t hlen, 163 const void *data, size_t dlen) 164 { 165 struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe); 166 unsigned int head; 167 168 head = le32_to_cpu(*pipe->head); 169 170 head = glink_smem_tx_write_one(pipe, head, hdr, hlen); 171 head = glink_smem_tx_write_one(pipe, head, data, dlen); 172 173 /* Ensure head is always aligned to 8 bytes */ 174 head = ALIGN(head, 8); 175 if (head >= pipe->native.length) 176 head -= pipe->native.length; 177 178 /* Ensure ordering of fifo and head update */ 179 wmb(); 180 181 *pipe->head = cpu_to_le32(head); 182 } 183 184 static void qcom_glink_smem_release(struct device *dev) 185 { 186 kfree(dev); 187 } 188 189 struct qcom_glink *qcom_glink_smem_register(struct device *parent, 190 struct device_node *node) 191 { 192 struct glink_smem_pipe *rx_pipe; 193 struct glink_smem_pipe *tx_pipe; 194 struct qcom_glink *glink; 195 struct device *dev; 196 u32 remote_pid; 197 __le32 *descs; 198 size_t size; 199 int ret; 200 201 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 202 if (!dev) 203 return ERR_PTR(-ENOMEM); 204 205 dev->parent = parent; 206 dev->of_node = node; 207 dev->release = qcom_glink_smem_release; 208 dev_set_name(dev, "%s:%s", node->parent->name, node->name); 209 ret = device_register(dev); 210 if (ret) { 211 pr_err("failed to register glink edge\n"); 212 put_device(dev); 213 return ERR_PTR(ret); 214 } 215 216 ret = of_property_read_u32(dev->of_node, "qcom,remote-pid", 217 &remote_pid); 218 if (ret) { 219 dev_err(dev, "failed to parse qcom,remote-pid\n"); 220 goto err_put_dev; 221 } 222 223 rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL); 224 tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL); 225 if (!rx_pipe || !tx_pipe) { 226 ret = -ENOMEM; 227 goto err_put_dev; 228 } 229 230 ret = qcom_smem_alloc(remote_pid, 231 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32); 232 if (ret && ret != -EEXIST) { 233 dev_err(dev, "failed to allocate glink descriptors\n"); 234 goto err_put_dev; 235 } 236 237 descs = qcom_smem_get(remote_pid, 238 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size); 239 if (IS_ERR(descs)) { 240 dev_err(dev, "failed to acquire xprt descriptor\n"); 241 ret = PTR_ERR(descs); 242 goto err_put_dev; 243 } 244 245 if (size != 32) { 246 dev_err(dev, "glink descriptor of invalid size\n"); 247 ret = -EINVAL; 248 goto err_put_dev; 249 } 250 251 tx_pipe->tail = &descs[0]; 252 tx_pipe->head = &descs[1]; 253 rx_pipe->tail = &descs[2]; 254 rx_pipe->head = &descs[3]; 255 256 ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 257 SZ_16K); 258 if (ret && ret != -EEXIST) { 259 dev_err(dev, "failed to allocate TX fifo\n"); 260 goto err_put_dev; 261 } 262 263 tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 264 &tx_pipe->native.length); 265 if (IS_ERR(tx_pipe->fifo)) { 266 dev_err(dev, "failed to acquire TX fifo\n"); 267 ret = PTR_ERR(tx_pipe->fifo); 268 goto err_put_dev; 269 } 270 271 rx_pipe->native.avail = glink_smem_rx_avail; 272 rx_pipe->native.peak = glink_smem_rx_peak; 273 rx_pipe->native.advance = glink_smem_rx_advance; 274 rx_pipe->remote_pid = remote_pid; 275 276 tx_pipe->native.avail = glink_smem_tx_avail; 277 tx_pipe->native.write = glink_smem_tx_write; 278 tx_pipe->remote_pid = remote_pid; 279 280 *rx_pipe->tail = 0; 281 *tx_pipe->head = 0; 282 283 glink = qcom_glink_native_probe(dev, 284 GLINK_FEATURE_INTENT_REUSE, 285 &rx_pipe->native, &tx_pipe->native, 286 false); 287 if (IS_ERR(glink)) { 288 ret = PTR_ERR(glink); 289 goto err_put_dev; 290 } 291 292 return glink; 293 294 err_put_dev: 295 device_unregister(dev); 296 297 return ERR_PTR(ret); 298 } 299 EXPORT_SYMBOL_GPL(qcom_glink_smem_register); 300 301 void qcom_glink_smem_unregister(struct qcom_glink *glink) 302 { 303 qcom_glink_native_remove(glink); 304 qcom_glink_native_unregister(glink); 305 } 306 EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister); 307 308 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>"); 309 MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver"); 310 MODULE_LICENSE("GPL v2"); 311