1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Remote processor messaging transport (OMAP platform-specific bits)
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  * Copyright (C) 2011 Google, Inc.
7  *
8  * Ohad Ben-Cohen <ohad@wizery.com>
9  * Brian Swetland <swetland@google.com>
10  */
11 
12 #include <linux/dma-map-ops.h>
13 #include <linux/export.h>
14 #include <linux/of_reserved_mem.h>
15 #include <linux/remoteproc.h>
16 #include <linux/virtio.h>
17 #include <linux/virtio_config.h>
18 #include <linux/virtio_ids.h>
19 #include <linux/virtio_ring.h>
20 #include <linux/err.h>
21 #include <linux/kref.h>
22 #include <linux/slab.h>
23 
24 #include "remoteproc_internal.h"
25 
26 static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
27 {
28 	return container_of(vdev->dev.parent, struct rproc_vdev, dev);
29 }
30 
31 static  struct rproc *vdev_to_rproc(struct virtio_device *vdev)
32 {
33 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
34 
35 	return rvdev->rproc;
36 }
37 
38 /* kick the remote processor, and let it know which virtqueue to poke at */
39 static bool rproc_virtio_notify(struct virtqueue *vq)
40 {
41 	struct rproc_vring *rvring = vq->priv;
42 	struct rproc *rproc = rvring->rvdev->rproc;
43 	int notifyid = rvring->notifyid;
44 
45 	dev_dbg(&rproc->dev, "kicking vq index: %d\n", notifyid);
46 
47 	rproc->ops->kick(rproc, notifyid);
48 	return true;
49 }
50 
51 /**
52  * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted
53  * @rproc: handle to the remote processor
54  * @notifyid: index of the signalled virtqueue (unique per this @rproc)
55  *
56  * This function should be called by the platform-specific rproc driver,
57  * when the remote processor signals that a specific virtqueue has pending
58  * messages available.
59  *
60  * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
61  * and otherwise returns IRQ_HANDLED.
62  */
63 irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
64 {
65 	struct rproc_vring *rvring;
66 
67 	dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
68 
69 	rvring = idr_find(&rproc->notifyids, notifyid);
70 	if (!rvring || !rvring->vq)
71 		return IRQ_NONE;
72 
73 	return vring_interrupt(0, rvring->vq);
74 }
75 EXPORT_SYMBOL(rproc_vq_interrupt);
76 
77 static struct virtqueue *rp_find_vq(struct virtio_device *vdev,
78 				    unsigned int id,
79 				    void (*callback)(struct virtqueue *vq),
80 				    const char *name, bool ctx)
81 {
82 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
83 	struct rproc *rproc = vdev_to_rproc(vdev);
84 	struct device *dev = &rproc->dev;
85 	struct rproc_mem_entry *mem;
86 	struct rproc_vring *rvring;
87 	struct fw_rsc_vdev *rsc;
88 	struct virtqueue *vq;
89 	void *addr;
90 	int num, size;
91 
92 	/* we're temporarily limited to two virtqueues per rvdev */
93 	if (id >= ARRAY_SIZE(rvdev->vring))
94 		return ERR_PTR(-EINVAL);
95 
96 	if (!name)
97 		return NULL;
98 
99 	/* Search allocated memory region by name */
100 	mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
101 					  id);
102 	if (!mem || !mem->va)
103 		return ERR_PTR(-ENOMEM);
104 
105 	rvring = &rvdev->vring[id];
106 	addr = mem->va;
107 	num = rvring->num;
108 
109 	/* zero vring */
110 	size = vring_size(num, rvring->align);
111 	memset(addr, 0, size);
112 
113 	dev_dbg(dev, "vring%d: va %pK qsz %d notifyid %d\n",
114 		id, addr, num, rvring->notifyid);
115 
116 	/*
117 	 * Create the new vq, and tell virtio we're not interested in
118 	 * the 'weak' smp barriers, since we're talking with a real device.
119 	 */
120 	vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
121 				 addr, rproc_virtio_notify, callback, name);
122 	if (!vq) {
123 		dev_err(dev, "vring_new_virtqueue %s failed\n", name);
124 		rproc_free_vring(rvring);
125 		return ERR_PTR(-ENOMEM);
126 	}
127 
128 	vq->num_max = num;
129 
130 	rvring->vq = vq;
131 	vq->priv = rvring;
132 
133 	/* Update vring in resource table */
134 	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
135 	rsc->vring[id].da = mem->da;
136 
137 	return vq;
138 }
139 
140 static void __rproc_virtio_del_vqs(struct virtio_device *vdev)
141 {
142 	struct virtqueue *vq, *n;
143 	struct rproc_vring *rvring;
144 
145 	list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
146 		rvring = vq->priv;
147 		rvring->vq = NULL;
148 		vring_del_virtqueue(vq);
149 	}
150 }
151 
152 static void rproc_virtio_del_vqs(struct virtio_device *vdev)
153 {
154 	__rproc_virtio_del_vqs(vdev);
155 }
156 
157 static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
158 				 struct virtqueue *vqs[],
159 				 vq_callback_t *callbacks[],
160 				 const char * const names[],
161 				 u32 sizes[],
162 				 const bool * ctx,
163 				 struct irq_affinity *desc)
164 {
165 	int i, ret, queue_idx = 0;
166 
167 	for (i = 0; i < nvqs; ++i) {
168 		if (!names[i]) {
169 			vqs[i] = NULL;
170 			continue;
171 		}
172 
173 		vqs[i] = rp_find_vq(vdev, queue_idx++, callbacks[i], names[i],
174 				    ctx ? ctx[i] : false);
175 		if (IS_ERR(vqs[i])) {
176 			ret = PTR_ERR(vqs[i]);
177 			goto error;
178 		}
179 	}
180 
181 	return 0;
182 
183 error:
184 	__rproc_virtio_del_vqs(vdev);
185 	return ret;
186 }
187 
188 static u8 rproc_virtio_get_status(struct virtio_device *vdev)
189 {
190 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
191 	struct fw_rsc_vdev *rsc;
192 
193 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
194 
195 	return rsc->status;
196 }
197 
198 static void rproc_virtio_set_status(struct virtio_device *vdev, u8 status)
199 {
200 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
201 	struct fw_rsc_vdev *rsc;
202 
203 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
204 
205 	rsc->status = status;
206 	dev_dbg(&vdev->dev, "status: %d\n", status);
207 }
208 
209 static void rproc_virtio_reset(struct virtio_device *vdev)
210 {
211 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
212 	struct fw_rsc_vdev *rsc;
213 
214 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
215 
216 	rsc->status = 0;
217 	dev_dbg(&vdev->dev, "reset !\n");
218 }
219 
220 /* provide the vdev features as retrieved from the firmware */
221 static u64 rproc_virtio_get_features(struct virtio_device *vdev)
222 {
223 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
224 	struct fw_rsc_vdev *rsc;
225 
226 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
227 
228 	return rsc->dfeatures;
229 }
230 
231 static void rproc_transport_features(struct virtio_device *vdev)
232 {
233 	/*
234 	 * Packed ring isn't enabled on remoteproc for now,
235 	 * because remoteproc uses vring_new_virtqueue() which
236 	 * creates virtio rings on preallocated memory.
237 	 */
238 	__virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
239 }
240 
241 static int rproc_virtio_finalize_features(struct virtio_device *vdev)
242 {
243 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
244 	struct fw_rsc_vdev *rsc;
245 
246 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
247 
248 	/* Give virtio_ring a chance to accept features */
249 	vring_transport_features(vdev);
250 
251 	/* Give virtio_rproc a chance to accept features. */
252 	rproc_transport_features(vdev);
253 
254 	/* Make sure we don't have any features > 32 bits! */
255 	BUG_ON((u32)vdev->features != vdev->features);
256 
257 	/*
258 	 * Remember the finalized features of our vdev, and provide it
259 	 * to the remote processor once it is powered on.
260 	 */
261 	rsc->gfeatures = vdev->features;
262 
263 	return 0;
264 }
265 
266 static void rproc_virtio_get(struct virtio_device *vdev, unsigned int offset,
267 			     void *buf, unsigned int len)
268 {
269 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
270 	struct fw_rsc_vdev *rsc;
271 	void *cfg;
272 
273 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
274 	cfg = &rsc->vring[rsc->num_of_vrings];
275 
276 	if (offset + len > rsc->config_len || offset + len < len) {
277 		dev_err(&vdev->dev, "rproc_virtio_get: access out of bounds\n");
278 		return;
279 	}
280 
281 	memcpy(buf, cfg + offset, len);
282 }
283 
284 static void rproc_virtio_set(struct virtio_device *vdev, unsigned int offset,
285 			     const void *buf, unsigned int len)
286 {
287 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
288 	struct fw_rsc_vdev *rsc;
289 	void *cfg;
290 
291 	rsc = (void *)rvdev->rproc->table_ptr + rvdev->rsc_offset;
292 	cfg = &rsc->vring[rsc->num_of_vrings];
293 
294 	if (offset + len > rsc->config_len || offset + len < len) {
295 		dev_err(&vdev->dev, "rproc_virtio_set: access out of bounds\n");
296 		return;
297 	}
298 
299 	memcpy(cfg + offset, buf, len);
300 }
301 
302 static const struct virtio_config_ops rproc_virtio_config_ops = {
303 	.get_features	= rproc_virtio_get_features,
304 	.finalize_features = rproc_virtio_finalize_features,
305 	.find_vqs	= rproc_virtio_find_vqs,
306 	.del_vqs	= rproc_virtio_del_vqs,
307 	.reset		= rproc_virtio_reset,
308 	.set_status	= rproc_virtio_set_status,
309 	.get_status	= rproc_virtio_get_status,
310 	.get		= rproc_virtio_get,
311 	.set		= rproc_virtio_set,
312 };
313 
314 /*
315  * This function is called whenever vdev is released, and is responsible
316  * to decrement the remote processor's refcount which was taken when vdev was
317  * added.
318  *
319  * Never call this function directly; it will be called by the driver
320  * core when needed.
321  */
322 static void rproc_virtio_dev_release(struct device *dev)
323 {
324 	struct virtio_device *vdev = dev_to_virtio(dev);
325 	struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
326 	struct rproc *rproc = vdev_to_rproc(vdev);
327 
328 	kfree(vdev);
329 
330 	kref_put(&rvdev->refcount, rproc_vdev_release);
331 
332 	put_device(&rproc->dev);
333 }
334 
335 /**
336  * rproc_add_virtio_dev() - register an rproc-induced virtio device
337  * @rvdev: the remote vdev
338  * @id: the device type identification (used to match it with a driver).
339  *
340  * This function registers a virtio device. This vdev's partent is
341  * the rproc device.
342  *
343  * Return: 0 on success or an appropriate error value otherwise
344  */
345 int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
346 {
347 	struct rproc *rproc = rvdev->rproc;
348 	struct device *dev = &rvdev->dev;
349 	struct virtio_device *vdev;
350 	struct rproc_mem_entry *mem;
351 	int ret;
352 
353 	if (rproc->ops->kick == NULL) {
354 		ret = -EINVAL;
355 		dev_err(dev, ".kick method not defined for %s\n", rproc->name);
356 		goto out;
357 	}
358 
359 	/* Try to find dedicated vdev buffer carveout */
360 	mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
361 	if (mem) {
362 		phys_addr_t pa;
363 
364 		if (mem->of_resm_idx != -1) {
365 			struct device_node *np = rproc->dev.parent->of_node;
366 
367 			/* Associate reserved memory to vdev device */
368 			ret = of_reserved_mem_device_init_by_idx(dev, np,
369 								 mem->of_resm_idx);
370 			if (ret) {
371 				dev_err(dev, "Can't associate reserved memory\n");
372 				goto out;
373 			}
374 		} else {
375 			if (mem->va) {
376 				dev_warn(dev, "vdev %d buffer already mapped\n",
377 					 rvdev->index);
378 				pa = rproc_va_to_pa(mem->va);
379 			} else {
380 				/* Use dma address as carveout no memmapped yet */
381 				pa = (phys_addr_t)mem->dma;
382 			}
383 
384 			/* Associate vdev buffer memory pool to vdev subdev */
385 			ret = dma_declare_coherent_memory(dev, pa,
386 							   mem->da,
387 							   mem->len);
388 			if (ret < 0) {
389 				dev_err(dev, "Failed to associate buffer\n");
390 				goto out;
391 			}
392 		}
393 	} else {
394 		struct device_node *np = rproc->dev.parent->of_node;
395 
396 		/*
397 		 * If we don't have dedicated buffer, just attempt to re-assign
398 		 * the reserved memory from our parent. A default memory-region
399 		 * at index 0 from the parent's memory-regions is assigned for
400 		 * the rvdev dev to allocate from. Failure is non-critical and
401 		 * the allocations will fall back to global pools, so don't
402 		 * check return value either.
403 		 */
404 		of_reserved_mem_device_init_by_idx(dev, np, 0);
405 	}
406 
407 	/* Allocate virtio device */
408 	vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
409 	if (!vdev) {
410 		ret = -ENOMEM;
411 		goto out;
412 	}
413 	vdev->id.device	= id,
414 	vdev->config = &rproc_virtio_config_ops,
415 	vdev->dev.parent = dev;
416 	vdev->dev.release = rproc_virtio_dev_release;
417 
418 	/*
419 	 * We're indirectly making a non-temporary copy of the rproc pointer
420 	 * here, because drivers probed with this vdev will indirectly
421 	 * access the wrapping rproc.
422 	 *
423 	 * Therefore we must increment the rproc refcount here, and decrement
424 	 * it _only_ when the vdev is released.
425 	 */
426 	get_device(&rproc->dev);
427 
428 	/* Reference the vdev and vring allocations */
429 	kref_get(&rvdev->refcount);
430 
431 	ret = register_virtio_device(vdev);
432 	if (ret) {
433 		put_device(&vdev->dev);
434 		dev_err(dev, "failed to register vdev: %d\n", ret);
435 		goto out;
436 	}
437 
438 	dev_info(dev, "registered %s (type %d)\n", dev_name(&vdev->dev), id);
439 
440 out:
441 	return ret;
442 }
443 
444 /**
445  * rproc_remove_virtio_dev() - remove an rproc-induced virtio device
446  * @dev: the virtio device
447  * @data: must be null
448  *
449  * This function unregisters an existing virtio device.
450  *
451  * Return: 0
452  */
453 int rproc_remove_virtio_dev(struct device *dev, void *data)
454 {
455 	struct virtio_device *vdev = dev_to_virtio(dev);
456 
457 	unregister_virtio_device(vdev);
458 	return 0;
459 }
460