1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Remote processor messaging transport (OMAP platform-specific bits) 4 * 5 * Copyright (C) 2011 Texas Instruments, Inc. 6 * Copyright (C) 2011 Google, Inc. 7 * 8 * Ohad Ben-Cohen <ohad@wizery.com> 9 * Brian Swetland <swetland@google.com> 10 */ 11 12 #include <linux/dma-map-ops.h> 13 #include <linux/export.h> 14 #include <linux/of_reserved_mem.h> 15 #include <linux/remoteproc.h> 16 #include <linux/virtio.h> 17 #include <linux/virtio_config.h> 18 #include <linux/virtio_ids.h> 19 #include <linux/virtio_ring.h> 20 #include <linux/err.h> 21 #include <linux/kref.h> 22 #include <linux/slab.h> 23 24 #include "remoteproc_internal.h" 25 26 static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) 27 { 28 return container_of(vdev->dev.parent, struct rproc_vdev, dev); 29 } 30 31 static struct rproc *vdev_to_rproc(struct virtio_device *vdev) 32 { 33 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 34 35 return rvdev->rproc; 36 } 37 38 /* kick the remote processor, and let it know which virtqueue to poke at */ 39 static bool rproc_virtio_notify(struct virtqueue *vq) 40 { 41 struct rproc_vring *rvring = vq->priv; 42 struct rproc *rproc = rvring->rvdev->rproc; 43 int notifyid = rvring->notifyid; 44 45 dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid); 46 47 rproc->ops->kick(rproc, notifyid); 48 return true; 49 } 50 51 /** 52 * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted 53 * @rproc: handle to the remote processor 54 * @notifyid: index of the signalled virtqueue (unique per this @rproc) 55 * 56 * This function should be called by the platform-specific rproc driver, 57 * when the remote processor signals that a specific virtqueue has pending 58 * messages available. 59 * 60 * Return: IRQ_NONE if no message was found in the @notifyid virtqueue, 61 * and otherwise returns IRQ_HANDLED. 62 */ 63 irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid) 64 { 65 struct rproc_vring *rvring; 66 67 dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid); 68 69 rvring = idr_find(&rproc->notifyids, notifyid); 70 if (!rvring || !rvring->vq) 71 return IRQ_NONE; 72 73 return vring_interrupt(0, rvring->vq); 74 } 75 EXPORT_SYMBOL(rproc_vq_interrupt); 76 77 static struct virtqueue *rp_find_vq(struct virtio_device *vdev, 78 unsigned int id, 79 void (*callback)(struct virtqueue *vq), 80 const char *name, bool ctx) 81 { 82 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 83 struct rproc *rproc = vdev_to_rproc(vdev); 84 struct device *dev = &rproc->dev; 85 struct rproc_mem_entry *mem; 86 struct rproc_vring *rvring; 87 struct fw_rsc_vdev *rsc; 88 struct virtqueue *vq; 89 void *addr; 90 int len, size; 91 92 /* we're temporarily limited to two virtqueues per rvdev */ 93 if (id >= ARRAY_SIZE(rvdev->vring)) 94 return ERR_PTR(-EINVAL); 95 96 if (!name) 97 return NULL; 98 99 /* Search allocated memory region by name */ 100 mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index, 101 id); 102 if (!mem || !mem->va) 103 return ERR_PTR(-ENOMEM); 104 105 rvring = &rvdev->vring[id]; 106 addr = mem->va; 107 len = rvring->len; 108 109 /* zero vring */ 110 size = vring_size(len, rvring->align); 111 memset(addr, 0, size); 112 113 dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n", 114 id, addr, len, rvring->notifyid); 115 116 /* 117 * Create the new vq, and tell virtio we're not interested in 118 * the 'weak' smp barriers, since we're talking with a real device. 119 */ 120 vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, ctx, 121 addr, rproc_virtio_notify, callback, name); 122 if (!vq) { 123 dev_err(dev, "vring_new_virtqueue %s failed\n", name); 124 rproc_free_vring(rvring); 125 return ERR_PTR(-ENOMEM); 126 } 127 128 rvring->vq = vq; 129 vq->priv = rvring; 130 131 /* Update vring in resource table */ 132 rsc = (void *)rproc->table_ptr + rvdev->rsc_offset; 133 rsc->vring[id].da = mem->da; 134 135 return vq; 136 } 137 138 static void __rproc_virtio_del_vqs(struct virtio_device *vdev) 139 { 140 struct virtqueue *vq, *n; 141 struct rproc_vring *rvring; 142 143 list_for_each_entry_safe(vq, n, &vdev->vqs, list) { 144 rvring = vq->priv; 145 rvring->vq = NULL; 146 vring_del_virtqueue(vq); 147 } 148 } 149 150 static void rproc_virtio_del_vqs(struct virtio_device *vdev) 151 { 152 __rproc_virtio_del_vqs(vdev); 153 } 154 155 static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs, 156 struct virtqueue *vqs[], 157 vq_callback_t *callbacks[], 158 const char * const names[], 159 const bool * ctx, 160 struct irq_affinity *desc) 161 { 162 int i, ret, queue_idx = 0; 163 164 for (i = 0; i < nvqs; ++i) { 165 if (!names[i]) { 166 vqs[i] = NULL; 167 continue; 168 } 169 170 vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i], 171 ctx ? ctx[i] : false); 172 if (IS_ERR(vqs[i])) { 173 ret = PTR_ERR(vqs[i]); 174 goto error; 175 } 176 } 177 178 return 0; 179 180 error: 181 __rproc_virtio_del_vqs(vdev); 182 return ret; 183 } 184 185 static u8 rproc_virtio_get_status(struct virtio_device *vdev) 186 { 187 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 188 struct fw_rsc_vdev *rsc; 189 190 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 191 192 return rsc->status; 193 } 194 195 static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status) 196 { 197 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 198 struct fw_rsc_vdev *rsc; 199 200 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 201 202 rsc->status = status; 203 dev_dbg(&vdev->dev, "status: %d\n", status); 204 } 205 206 static void rproc_virtio_reset(struct virtio_device *vdev) 207 { 208 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 209 struct fw_rsc_vdev *rsc; 210 211 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 212 213 rsc->status = 0; 214 dev_dbg(&vdev->dev, "reset !\n"); 215 } 216 217 /* provide the vdev features as retrieved from the firmware */ 218 static u64 rproc_virtio_get_features(struct virtio_device *vdev) 219 { 220 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 221 struct fw_rsc_vdev *rsc; 222 223 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 224 225 return rsc->dfeatures; 226 } 227 228 static void rproc_transport_features(struct virtio_device *vdev) 229 { 230 /* 231 * Packed ring isn't enabled on remoteproc for now, 232 * because remoteproc uses vring_new_virtqueue() which 233 * creates virtio rings on preallocated memory. 234 */ 235 __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED); 236 } 237 238 static int rproc_virtio_finalize_features(struct virtio_device *vdev) 239 { 240 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 241 struct fw_rsc_vdev *rsc; 242 243 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 244 245 /* Give virtio_ring a chance to accept features */ 246 vring_transport_features(vdev); 247 248 /* Give virtio_rproc a chance to accept features. */ 249 rproc_transport_features(vdev); 250 251 /* Make sure we don't have any features > 32 bits! */ 252 BUG_ON((u32)vdev->features != vdev->features); 253 254 /* 255 * Remember the finalized features of our vdev, and provide it 256 * to the remote processor once it is powered on. 257 */ 258 rsc->gfeatures = vdev->features; 259 260 return 0; 261 } 262 263 static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset, 264 void *buf, unsigned int len) 265 { 266 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 267 struct fw_rsc_vdev *rsc; 268 void *cfg; 269 270 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 271 cfg = &rsc->vring[rsc->num_of_vrings]; 272 273 if (offset + len > rsc->config_len || offset + len < len) { 274 dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n"); 275 return; 276 } 277 278 memcpy(buf, cfg + offset, len); 279 } 280 281 static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset, 282 const void *buf, unsigned int len) 283 { 284 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 285 struct fw_rsc_vdev *rsc; 286 void *cfg; 287 288 rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset; 289 cfg = &rsc->vring[rsc->num_of_vrings]; 290 291 if (offset + len > rsc->config_len || offset + len < len) { 292 dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n"); 293 return; 294 } 295 296 memcpy(cfg + offset, buf, len); 297 } 298 299 static const struct virtio_config_ops rproc_virtio_config_ops = { 300 .get_features = rproc_virtio_get_features, 301 .finalize_features = rproc_virtio_finalize_features, 302 .find_vqs = rproc_virtio_find_vqs, 303 .del_vqs = rproc_virtio_del_vqs, 304 .reset = rproc_virtio_reset, 305 .set_status = rproc_virtio_set_status, 306 .get_status = rproc_virtio_get_status, 307 .get = rproc_virtio_get, 308 .set = rproc_virtio_set, 309 }; 310 311 /* 312 * This function is called whenever vdev is released, and is responsible 313 * to decrement the remote processor's refcount which was taken when vdev was 314 * added. 315 * 316 * Never call this function directly; it will be called by the driver 317 * core when needed. 318 */ 319 static void rproc_virtio_dev_release(struct device *dev) 320 { 321 struct virtio_device *vdev = dev_to_virtio(dev); 322 struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); 323 struct rproc *rproc = vdev_to_rproc(vdev); 324 325 kfree(vdev); 326 327 kref_put(&rvdev->refcount, rproc_vdev_release); 328 329 put_device(&rproc->dev); 330 } 331 332 /** 333 * rproc_add_virtio_dev() - register an rproc-induced virtio device 334 * @rvdev: the remote vdev 335 * @id: the device type identification (used to match it with a driver). 336 * 337 * This function registers a virtio device. This vdev's partent is 338 * the rproc device. 339 * 340 * Return: 0 on success or an appropriate error value otherwise 341 */ 342 int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) 343 { 344 struct rproc *rproc = rvdev->rproc; 345 struct device *dev = &rvdev->dev; 346 struct virtio_device *vdev; 347 struct rproc_mem_entry *mem; 348 int ret; 349 350 if (rproc->ops->kick == NULL) { 351 ret = -EINVAL; 352 dev_err(dev, ".kick method not defined for %s\n", rproc->name); 353 goto out; 354 } 355 356 /* Try to find dedicated vdev buffer carveout */ 357 mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index); 358 if (mem) { 359 phys_addr_t pa; 360 361 if (mem->of_resm_idx != -1) { 362 struct device_node *np = rproc->dev.parent->of_node; 363 364 /* Associate reserved memory to vdev device */ 365 ret = of_reserved_mem_device_init_by_idx(dev, np, 366 mem->of_resm_idx); 367 if (ret) { 368 dev_err(dev, "Can't associate reserved memory\n"); 369 goto out; 370 } 371 } else { 372 if (mem->va) { 373 dev_warn(dev, "vdev %d buffer already mapped\n", 374 rvdev->index); 375 pa = rproc_va_to_pa(mem->va); 376 } else { 377 /* Use dma address as carveout no memmapped yet */ 378 pa = (phys_addr_t)mem->dma; 379 } 380 381 /* Associate vdev buffer memory pool to vdev subdev */ 382 ret = dma_declare_coherent_memory(dev, pa, 383 mem->da, 384 mem->len); 385 if (ret < 0) { 386 dev_err(dev, "Failed to associate buffer\n"); 387 goto out; 388 } 389 } 390 } else { 391 struct device_node *np = rproc->dev.parent->of_node; 392 393 /* 394 * If we don't have dedicated buffer, just attempt to re-assign 395 * the reserved memory from our parent. A default memory-region 396 * at index 0 from the parent's memory-regions is assigned for 397 * the rvdev dev to allocate from. Failure is non-critical and 398 * the allocations will fall back to global pools, so don't 399 * check return value either. 400 */ 401 of_reserved_mem_device_init_by_idx(dev, np, 0); 402 } 403 404 /* Allocate virtio device */ 405 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); 406 if (!vdev) { 407 ret = -ENOMEM; 408 goto out; 409 } 410 vdev->id.device = id, 411 vdev->config = &rproc_virtio_config_ops, 412 vdev->dev.parent = dev; 413 vdev->dev.release = rproc_virtio_dev_release; 414 415 /* 416 * We're indirectly making a non-temporary copy of the rproc pointer 417 * here, because drivers probed with this vdev will indirectly 418 * access the wrapping rproc. 419 * 420 * Therefore we must increment the rproc refcount here, and decrement 421 * it _only_ when the vdev is released. 422 */ 423 get_device(&rproc->dev); 424 425 /* Reference the vdev and vring allocations */ 426 kref_get(&rvdev->refcount); 427 428 ret = register_virtio_device(vdev); 429 if (ret) { 430 put_device(&vdev->dev); 431 dev_err(dev, "failed to register vdev: %d\n", ret); 432 goto out; 433 } 434 435 dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id); 436 437 out: 438 return ret; 439 } 440 441 /** 442 * rproc_remove_virtio_dev() - remove an rproc-induced virtio device 443 * @dev: the virtio device 444 * @data: must be null 445 * 446 * This function unregisters an existing virtio device. 447 * 448 * Return: 0 449 */ 450 int rproc_remove_virtio_dev(struct device *dev, void *data) 451 { 452 struct virtio_device *vdev = dev_to_virtio(dev); 453 454 unregister_virtio_device(vdev); 455 return 0; 456 } 457