1 /* 2 * Copyright (c) 2016, Linaro Ltd 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/io.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/interrupt.h> 19 #include <linux/platform_device.h> 20 #include <linux/mfd/syscon.h> 21 #include <linux/slab.h> 22 #include <linux/rpmsg.h> 23 #include <linux/idr.h> 24 #include <linux/circ_buf.h> 25 #include <linux/soc/qcom/smem.h> 26 #include <linux/sizes.h> 27 #include <linux/delay.h> 28 #include <linux/regmap.h> 29 #include <linux/workqueue.h> 30 #include <linux/list.h> 31 32 #include <linux/delay.h> 33 #include <linux/rpmsg.h> 34 #include <linux/rpmsg/qcom_glink.h> 35 36 #include "qcom_glink_native.h" 37 38 #define FIFO_FULL_RESERVE 8 39 #define FIFO_ALIGNMENT 8 40 #define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */ 41 42 #define SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR 478 43 #define SMEM_GLINK_NATIVE_XPRT_FIFO_0 479 44 #define SMEM_GLINK_NATIVE_XPRT_FIFO_1 480 45 46 struct glink_smem_pipe { 47 struct qcom_glink_pipe native; 48 49 __le32 *tail; 50 __le32 *head; 51 52 void *fifo; 53 54 int remote_pid; 55 }; 56 57 #define to_smem_pipe(p) container_of(p, struct glink_smem_pipe, native) 58 59 static size_t glink_smem_rx_avail(struct qcom_glink_pipe *np) 60 { 61 struct glink_smem_pipe *pipe = to_smem_pipe(np); 62 size_t len; 63 void *fifo; 64 u32 head; 65 u32 tail; 66 67 if (!pipe->fifo) { 68 fifo = qcom_smem_get(pipe->remote_pid, 69 SMEM_GLINK_NATIVE_XPRT_FIFO_1, &len); 70 if (IS_ERR(fifo)) { 71 pr_err("failed to acquire RX fifo handle: %ld\n", 72 PTR_ERR(fifo)); 73 return 0; 74 } 75 76 pipe->fifo = fifo; 77 pipe->native.length = len; 78 } 79 80 head = le32_to_cpu(*pipe->head); 81 tail = le32_to_cpu(*pipe->tail); 82 83 if (head < tail) 84 return pipe->native.length - tail + head; 85 else 86 return head - tail; 87 } 88 89 static void glink_smem_rx_peak(struct qcom_glink_pipe *np, 90 void *data, size_t count) 91 { 92 struct glink_smem_pipe *pipe = to_smem_pipe(np); 93 size_t len; 94 u32 tail; 95 96 tail = le32_to_cpu(*pipe->tail); 97 98 len = min_t(size_t, count, pipe->native.length - tail); 99 if (len) { 100 __ioread32_copy(data, pipe->fifo + tail, 101 len / sizeof(u32)); 102 } 103 104 if (len != count) { 105 __ioread32_copy(data + len, pipe->fifo, 106 (count - len) / sizeof(u32)); 107 } 108 } 109 110 static void glink_smem_rx_advance(struct qcom_glink_pipe *np, 111 size_t count) 112 { 113 struct glink_smem_pipe *pipe = to_smem_pipe(np); 114 u32 tail; 115 116 tail = le32_to_cpu(*pipe->tail); 117 118 tail += count; 119 if (tail > pipe->native.length) 120 tail -= pipe->native.length; 121 122 *pipe->tail = cpu_to_le32(tail); 123 } 124 125 static size_t glink_smem_tx_avail(struct qcom_glink_pipe *np) 126 { 127 struct glink_smem_pipe *pipe = to_smem_pipe(np); 128 u32 head; 129 u32 tail; 130 u32 avail; 131 132 head = le32_to_cpu(*pipe->head); 133 tail = le32_to_cpu(*pipe->tail); 134 135 if (tail <= head) 136 avail = pipe->native.length - head + tail; 137 else 138 avail = tail - head; 139 140 if (avail < (FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)) 141 avail = 0; 142 else 143 avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE; 144 145 return avail; 146 } 147 148 static unsigned int glink_smem_tx_write_one(struct glink_smem_pipe *pipe, 149 unsigned int head, 150 const void *data, size_t count) 151 { 152 size_t len; 153 154 len = min_t(size_t, count, pipe->native.length - head); 155 if (len) 156 memcpy(pipe->fifo + head, data, len); 157 158 if (len != count) 159 memcpy(pipe->fifo, data + len, count - len); 160 161 head += count; 162 if (head >= pipe->native.length) 163 head -= pipe->native.length; 164 165 return head; 166 } 167 168 static void glink_smem_tx_write(struct qcom_glink_pipe *glink_pipe, 169 const void *hdr, size_t hlen, 170 const void *data, size_t dlen) 171 { 172 struct glink_smem_pipe *pipe = to_smem_pipe(glink_pipe); 173 unsigned int head; 174 175 head = le32_to_cpu(*pipe->head); 176 177 head = glink_smem_tx_write_one(pipe, head, hdr, hlen); 178 head = glink_smem_tx_write_one(pipe, head, data, dlen); 179 180 /* Ensure head is always aligned to 8 bytes */ 181 head = ALIGN(head, 8); 182 if (head >= pipe->native.length) 183 head -= pipe->native.length; 184 185 *pipe->head = cpu_to_le32(head); 186 } 187 188 static void qcom_glink_smem_release(struct device *dev) 189 { 190 kfree(dev); 191 } 192 193 struct qcom_glink *qcom_glink_smem_register(struct device *parent, 194 struct device_node *node) 195 { 196 struct glink_smem_pipe *rx_pipe; 197 struct glink_smem_pipe *tx_pipe; 198 struct qcom_glink *glink; 199 struct device *dev; 200 u32 remote_pid; 201 __le32 *descs; 202 size_t size; 203 int ret; 204 205 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 206 if (!dev) 207 return ERR_PTR(-ENOMEM); 208 209 dev->parent = parent; 210 dev->of_node = node; 211 dev->release = qcom_glink_smem_release; 212 dev_set_name(dev, "%s:%s", node->parent->name, node->name); 213 ret = device_register(dev); 214 if (ret) { 215 pr_err("failed to register glink edge\n"); 216 return ERR_PTR(ret); 217 } 218 219 ret = of_property_read_u32(dev->of_node, "qcom,remote-pid", 220 &remote_pid); 221 if (ret) { 222 dev_err(dev, "failed to parse qcom,remote-pid\n"); 223 goto err_put_dev; 224 } 225 226 rx_pipe = devm_kzalloc(dev, sizeof(*rx_pipe), GFP_KERNEL); 227 tx_pipe = devm_kzalloc(dev, sizeof(*tx_pipe), GFP_KERNEL); 228 if (!rx_pipe || !tx_pipe) { 229 ret = -ENOMEM; 230 goto err_put_dev; 231 } 232 233 ret = qcom_smem_alloc(remote_pid, 234 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, 32); 235 if (ret && ret != -EEXIST) { 236 dev_err(dev, "failed to allocate glink descriptors\n"); 237 goto err_put_dev; 238 } 239 240 descs = qcom_smem_get(remote_pid, 241 SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, &size); 242 if (IS_ERR(descs)) { 243 dev_err(dev, "failed to acquire xprt descriptor\n"); 244 ret = PTR_ERR(descs); 245 goto err_put_dev; 246 } 247 248 if (size != 32) { 249 dev_err(dev, "glink descriptor of invalid size\n"); 250 ret = -EINVAL; 251 goto err_put_dev; 252 } 253 254 tx_pipe->tail = &descs[0]; 255 tx_pipe->head = &descs[1]; 256 rx_pipe->tail = &descs[2]; 257 rx_pipe->head = &descs[3]; 258 259 ret = qcom_smem_alloc(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 260 SZ_16K); 261 if (ret && ret != -EEXIST) { 262 dev_err(dev, "failed to allocate TX fifo\n"); 263 goto err_put_dev; 264 } 265 266 tx_pipe->fifo = qcom_smem_get(remote_pid, SMEM_GLINK_NATIVE_XPRT_FIFO_0, 267 &tx_pipe->native.length); 268 if (IS_ERR(tx_pipe->fifo)) { 269 dev_err(dev, "failed to acquire TX fifo\n"); 270 ret = PTR_ERR(tx_pipe->fifo); 271 goto err_put_dev; 272 } 273 274 rx_pipe->native.avail = glink_smem_rx_avail; 275 rx_pipe->native.peak = glink_smem_rx_peak; 276 rx_pipe->native.advance = glink_smem_rx_advance; 277 rx_pipe->remote_pid = remote_pid; 278 279 tx_pipe->native.avail = glink_smem_tx_avail; 280 tx_pipe->native.write = glink_smem_tx_write; 281 tx_pipe->remote_pid = remote_pid; 282 283 *rx_pipe->tail = 0; 284 *tx_pipe->head = 0; 285 286 glink = qcom_glink_native_probe(dev, 287 GLINK_FEATURE_INTENT_REUSE, 288 &rx_pipe->native, &tx_pipe->native, 289 false); 290 if (IS_ERR(glink)) { 291 ret = PTR_ERR(glink); 292 goto err_put_dev; 293 } 294 295 return glink; 296 297 err_put_dev: 298 put_device(dev); 299 300 return ERR_PTR(ret); 301 } 302 EXPORT_SYMBOL_GPL(qcom_glink_smem_register); 303 304 void qcom_glink_smem_unregister(struct qcom_glink *glink) 305 { 306 qcom_glink_native_remove(glink); 307 qcom_glink_native_unregister(glink); 308 } 309 EXPORT_SYMBOL_GPL(qcom_glink_smem_unregister); 310 311 MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>"); 312 MODULE_DESCRIPTION("Qualcomm GLINK SMEM driver"); 313 MODULE_LICENSE("GPL v2"); 314