xref: /openbmc/linux/drivers/rpmsg/rpmsg_char.c (revision 465d0eb0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2022, STMicroelectronics
4  * Copyright (c) 2016, Linaro Ltd.
5  * Copyright (c) 2012, Michal Simek <monstr@monstr.eu>
6  * Copyright (c) 2012, PetaLogix
7  * Copyright (c) 2011, Texas Instruments, Inc.
8  * Copyright (c) 2011, Google, Inc.
9  *
10  * Based on rpmsg performance statistics driver by Michal Simek, which in turn
11  * was based on TI & Google OMX rpmsg driver.
12  */
13 
14 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
15 
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/fs.h>
19 #include <linux/idr.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/poll.h>
23 #include <linux/rpmsg.h>
24 #include <linux/skbuff.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <uapi/linux/rpmsg.h>
28 
29 #include "rpmsg_char.h"
30 #include "rpmsg_internal.h"
31 
32 #define RPMSG_DEV_MAX	(MINORMASK + 1)
33 
34 static dev_t rpmsg_major;
35 
36 static DEFINE_IDA(rpmsg_ept_ida);
37 static DEFINE_IDA(rpmsg_minor_ida);
38 
39 #define dev_to_eptdev(dev) container_of(dev, struct rpmsg_eptdev, dev)
40 #define cdev_to_eptdev(i_cdev) container_of(i_cdev, struct rpmsg_eptdev, cdev)
41 
42 /**
43  * struct rpmsg_eptdev - endpoint device context
44  * @dev:	endpoint device
45  * @cdev:	cdev for the endpoint device
46  * @rpdev:	underlaying rpmsg device
47  * @chinfo:	info used to open the endpoint
48  * @ept_lock:	synchronization of @ept modifications
49  * @ept:	rpmsg endpoint reference, when open
50  * @queue_lock:	synchronization of @queue operations
51  * @queue:	incoming message queue
52  * @readq:	wait object for incoming queue
53  * @default_ept: set to channel default endpoint if the default endpoint should be re-used
54  *              on device open to prevent endpoint address update.
55  */
56 struct rpmsg_eptdev {
57 	struct device dev;
58 	struct cdev cdev;
59 
60 	struct rpmsg_device *rpdev;
61 	struct rpmsg_channel_info chinfo;
62 
63 	struct mutex ept_lock;
64 	struct rpmsg_endpoint *ept;
65 	struct rpmsg_endpoint *default_ept;
66 
67 	spinlock_t queue_lock;
68 	struct sk_buff_head queue;
69 	wait_queue_head_t readq;
70 
71 };
72 
73 int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
74 {
75 	struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
76 
77 	mutex_lock(&eptdev->ept_lock);
78 	if (eptdev->ept) {
79 		rpmsg_destroy_ept(eptdev->ept);
80 		eptdev->ept = NULL;
81 	}
82 	mutex_unlock(&eptdev->ept_lock);
83 
84 	/* wake up any blocked readers */
85 	wake_up_interruptible(&eptdev->readq);
86 
87 	cdev_device_del(&eptdev->cdev, &eptdev->dev);
88 	put_device(&eptdev->dev);
89 
90 	return 0;
91 }
92 EXPORT_SYMBOL(rpmsg_chrdev_eptdev_destroy);
93 
94 static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
95 			void *priv, u32 addr)
96 {
97 	struct rpmsg_eptdev *eptdev = priv;
98 	struct sk_buff *skb;
99 
100 	skb = alloc_skb(len, GFP_ATOMIC);
101 	if (!skb)
102 		return -ENOMEM;
103 
104 	skb_put_data(skb, buf, len);
105 
106 	spin_lock(&eptdev->queue_lock);
107 	skb_queue_tail(&eptdev->queue, skb);
108 	spin_unlock(&eptdev->queue_lock);
109 
110 	/* wake up any blocking processes, waiting for new data */
111 	wake_up_interruptible(&eptdev->readq);
112 
113 	return 0;
114 }
115 
116 static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
117 {
118 	struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
119 	struct rpmsg_endpoint *ept;
120 	struct rpmsg_device *rpdev = eptdev->rpdev;
121 	struct device *dev = &eptdev->dev;
122 
123 	mutex_lock(&eptdev->ept_lock);
124 	if (eptdev->ept) {
125 		mutex_unlock(&eptdev->ept_lock);
126 		return -EBUSY;
127 	}
128 
129 	get_device(dev);
130 
131 	/*
132 	 * If the default_ept is set, the rpmsg device default endpoint is used.
133 	 * Else a new endpoint is created on open that will be destroyed on release.
134 	 */
135 	if (eptdev->default_ept)
136 		ept = eptdev->default_ept;
137 	else
138 		ept = rpmsg_create_ept(rpdev, rpmsg_ept_cb, eptdev, eptdev->chinfo);
139 
140 	if (!ept) {
141 		dev_err(dev, "failed to open %s\n", eptdev->chinfo.name);
142 		put_device(dev);
143 		mutex_unlock(&eptdev->ept_lock);
144 		return -EINVAL;
145 	}
146 
147 	eptdev->ept = ept;
148 	filp->private_data = eptdev;
149 	mutex_unlock(&eptdev->ept_lock);
150 
151 	return 0;
152 }
153 
154 static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
155 {
156 	struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
157 	struct device *dev = &eptdev->dev;
158 
159 	/* Close the endpoint, if it's not already destroyed by the parent */
160 	mutex_lock(&eptdev->ept_lock);
161 	if (eptdev->ept) {
162 		if (!eptdev->default_ept)
163 			rpmsg_destroy_ept(eptdev->ept);
164 		eptdev->ept = NULL;
165 	}
166 	mutex_unlock(&eptdev->ept_lock);
167 
168 	/* Discard all SKBs */
169 	skb_queue_purge(&eptdev->queue);
170 
171 	put_device(dev);
172 
173 	return 0;
174 }
175 
176 static ssize_t rpmsg_eptdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
177 {
178 	struct file *filp = iocb->ki_filp;
179 	struct rpmsg_eptdev *eptdev = filp->private_data;
180 	unsigned long flags;
181 	struct sk_buff *skb;
182 	int use;
183 
184 	if (!eptdev->ept)
185 		return -EPIPE;
186 
187 	spin_lock_irqsave(&eptdev->queue_lock, flags);
188 
189 	/* Wait for data in the queue */
190 	if (skb_queue_empty(&eptdev->queue)) {
191 		spin_unlock_irqrestore(&eptdev->queue_lock, flags);
192 
193 		if (filp->f_flags & O_NONBLOCK)
194 			return -EAGAIN;
195 
196 		/* Wait until we get data or the endpoint goes away */
197 		if (wait_event_interruptible(eptdev->readq,
198 					     !skb_queue_empty(&eptdev->queue) ||
199 					     !eptdev->ept))
200 			return -ERESTARTSYS;
201 
202 		/* We lost the endpoint while waiting */
203 		if (!eptdev->ept)
204 			return -EPIPE;
205 
206 		spin_lock_irqsave(&eptdev->queue_lock, flags);
207 	}
208 
209 	skb = skb_dequeue(&eptdev->queue);
210 	spin_unlock_irqrestore(&eptdev->queue_lock, flags);
211 	if (!skb)
212 		return -EFAULT;
213 
214 	use = min_t(size_t, iov_iter_count(to), skb->len);
215 	if (copy_to_iter(skb->data, use, to) != use)
216 		use = -EFAULT;
217 
218 	kfree_skb(skb);
219 
220 	return use;
221 }
222 
223 static ssize_t rpmsg_eptdev_write_iter(struct kiocb *iocb,
224 				       struct iov_iter *from)
225 {
226 	struct file *filp = iocb->ki_filp;
227 	struct rpmsg_eptdev *eptdev = filp->private_data;
228 	size_t len = iov_iter_count(from);
229 	void *kbuf;
230 	int ret;
231 
232 	kbuf = kzalloc(len, GFP_KERNEL);
233 	if (!kbuf)
234 		return -ENOMEM;
235 
236 	if (!copy_from_iter_full(kbuf, len, from)) {
237 		ret = -EFAULT;
238 		goto free_kbuf;
239 	}
240 
241 	if (mutex_lock_interruptible(&eptdev->ept_lock)) {
242 		ret = -ERESTARTSYS;
243 		goto free_kbuf;
244 	}
245 
246 	if (!eptdev->ept) {
247 		ret = -EPIPE;
248 		goto unlock_eptdev;
249 	}
250 
251 	if (filp->f_flags & O_NONBLOCK) {
252 		ret = rpmsg_trysendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
253 		if (ret == -ENOMEM)
254 			ret = -EAGAIN;
255 	} else {
256 		ret = rpmsg_sendto(eptdev->ept, kbuf, len, eptdev->chinfo.dst);
257 	}
258 
259 unlock_eptdev:
260 	mutex_unlock(&eptdev->ept_lock);
261 
262 free_kbuf:
263 	kfree(kbuf);
264 	return ret < 0 ? ret : len;
265 }
266 
267 static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
268 {
269 	struct rpmsg_eptdev *eptdev = filp->private_data;
270 	__poll_t mask = 0;
271 
272 	if (!eptdev->ept)
273 		return EPOLLERR;
274 
275 	poll_wait(filp, &eptdev->readq, wait);
276 
277 	if (!skb_queue_empty(&eptdev->queue))
278 		mask |= EPOLLIN | EPOLLRDNORM;
279 
280 	mask |= rpmsg_poll(eptdev->ept, filp, wait);
281 
282 	return mask;
283 }
284 
285 static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
286 			       unsigned long arg)
287 {
288 	struct rpmsg_eptdev *eptdev = fp->private_data;
289 
290 	if (cmd != RPMSG_DESTROY_EPT_IOCTL)
291 		return -EINVAL;
292 
293 	/* Don't allow to destroy a default endpoint. */
294 	if (eptdev->default_ept)
295 		return -EINVAL;
296 
297 	return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
298 }
299 
300 static const struct file_operations rpmsg_eptdev_fops = {
301 	.owner = THIS_MODULE,
302 	.open = rpmsg_eptdev_open,
303 	.release = rpmsg_eptdev_release,
304 	.read_iter = rpmsg_eptdev_read_iter,
305 	.write_iter = rpmsg_eptdev_write_iter,
306 	.poll = rpmsg_eptdev_poll,
307 	.unlocked_ioctl = rpmsg_eptdev_ioctl,
308 	.compat_ioctl = compat_ptr_ioctl,
309 };
310 
311 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
312 			 char *buf)
313 {
314 	struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
315 
316 	return sprintf(buf, "%s\n", eptdev->chinfo.name);
317 }
318 static DEVICE_ATTR_RO(name);
319 
320 static ssize_t src_show(struct device *dev, struct device_attribute *attr,
321 			 char *buf)
322 {
323 	struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
324 
325 	return sprintf(buf, "%d\n", eptdev->chinfo.src);
326 }
327 static DEVICE_ATTR_RO(src);
328 
329 static ssize_t dst_show(struct device *dev, struct device_attribute *attr,
330 			 char *buf)
331 {
332 	struct rpmsg_eptdev *eptdev = dev_get_drvdata(dev);
333 
334 	return sprintf(buf, "%d\n", eptdev->chinfo.dst);
335 }
336 static DEVICE_ATTR_RO(dst);
337 
338 static struct attribute *rpmsg_eptdev_attrs[] = {
339 	&dev_attr_name.attr,
340 	&dev_attr_src.attr,
341 	&dev_attr_dst.attr,
342 	NULL
343 };
344 ATTRIBUTE_GROUPS(rpmsg_eptdev);
345 
346 static void rpmsg_eptdev_release_device(struct device *dev)
347 {
348 	struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
349 
350 	ida_simple_remove(&rpmsg_ept_ida, dev->id);
351 	ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
352 	kfree(eptdev);
353 }
354 
355 static struct rpmsg_eptdev *rpmsg_chrdev_eptdev_alloc(struct rpmsg_device *rpdev,
356 						      struct device *parent)
357 {
358 	struct rpmsg_eptdev *eptdev;
359 	struct device *dev;
360 
361 	eptdev = kzalloc(sizeof(*eptdev), GFP_KERNEL);
362 	if (!eptdev)
363 		return ERR_PTR(-ENOMEM);
364 
365 	dev = &eptdev->dev;
366 	eptdev->rpdev = rpdev;
367 
368 	mutex_init(&eptdev->ept_lock);
369 	spin_lock_init(&eptdev->queue_lock);
370 	skb_queue_head_init(&eptdev->queue);
371 	init_waitqueue_head(&eptdev->readq);
372 
373 	device_initialize(dev);
374 	dev->class = rpmsg_class;
375 	dev->parent = parent;
376 	dev->groups = rpmsg_eptdev_groups;
377 	dev_set_drvdata(dev, eptdev);
378 
379 	cdev_init(&eptdev->cdev, &rpmsg_eptdev_fops);
380 	eptdev->cdev.owner = THIS_MODULE;
381 
382 	return eptdev;
383 }
384 
385 static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_channel_info chinfo)
386 {
387 	struct device *dev = &eptdev->dev;
388 	int ret;
389 
390 	eptdev->chinfo = chinfo;
391 
392 	ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
393 	if (ret < 0)
394 		goto free_eptdev;
395 	dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
396 
397 	ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
398 	if (ret < 0)
399 		goto free_minor_ida;
400 	dev->id = ret;
401 	dev_set_name(dev, "rpmsg%d", ret);
402 
403 	ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
404 	if (ret)
405 		goto free_ept_ida;
406 
407 	/* We can now rely on the release function for cleanup */
408 	dev->release = rpmsg_eptdev_release_device;
409 
410 	return ret;
411 
412 free_ept_ida:
413 	ida_simple_remove(&rpmsg_ept_ida, dev->id);
414 free_minor_ida:
415 	ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
416 free_eptdev:
417 	put_device(dev);
418 	kfree(eptdev);
419 
420 	return ret;
421 }
422 
423 int rpmsg_chrdev_eptdev_create(struct rpmsg_device *rpdev, struct device *parent,
424 			       struct rpmsg_channel_info chinfo)
425 {
426 	struct rpmsg_eptdev *eptdev;
427 	int ret;
428 
429 	eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, parent);
430 	if (IS_ERR(eptdev))
431 		return PTR_ERR(eptdev);
432 
433 	ret = rpmsg_chrdev_eptdev_add(eptdev, chinfo);
434 
435 	return ret;
436 }
437 EXPORT_SYMBOL(rpmsg_chrdev_eptdev_create);
438 
439 static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
440 {
441 	struct rpmsg_channel_info chinfo;
442 	struct rpmsg_eptdev *eptdev;
443 	struct device *dev = &rpdev->dev;
444 
445 	memcpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
446 	chinfo.src = rpdev->src;
447 	chinfo.dst = rpdev->dst;
448 
449 	eptdev = rpmsg_chrdev_eptdev_alloc(rpdev, dev);
450 	if (IS_ERR(eptdev))
451 		return PTR_ERR(eptdev);
452 
453 	/* Set the default_ept to the rpmsg device endpoint */
454 	eptdev->default_ept = rpdev->ept;
455 
456 	/*
457 	 * The rpmsg_ept_cb uses *priv parameter to get its rpmsg_eptdev context.
458 	 * Storedit in default_ept *priv field.
459 	 */
460 	eptdev->default_ept->priv = eptdev;
461 
462 	return rpmsg_chrdev_eptdev_add(eptdev, chinfo);
463 }
464 
465 static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
466 {
467 	int ret;
468 
469 	ret = device_for_each_child(&rpdev->dev, NULL, rpmsg_chrdev_eptdev_destroy);
470 	if (ret)
471 		dev_warn(&rpdev->dev, "failed to destroy endpoints: %d\n", ret);
472 }
473 
474 static struct rpmsg_device_id rpmsg_chrdev_id_table[] = {
475 	{ .name	= "rpmsg-raw" },
476 	{ },
477 };
478 
479 static struct rpmsg_driver rpmsg_chrdev_driver = {
480 	.probe = rpmsg_chrdev_probe,
481 	.remove = rpmsg_chrdev_remove,
482 	.callback = rpmsg_ept_cb,
483 	.id_table = rpmsg_chrdev_id_table,
484 	.drv.name = "rpmsg_chrdev",
485 };
486 
487 static int rpmsg_chrdev_init(void)
488 {
489 	int ret;
490 
491 	ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg_char");
492 	if (ret < 0) {
493 		pr_err("failed to allocate char dev region\n");
494 		return ret;
495 	}
496 
497 	ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
498 	if (ret < 0) {
499 		pr_err("rpmsg: failed to register rpmsg raw driver\n");
500 		goto free_region;
501 	}
502 
503 	return 0;
504 
505 free_region:
506 	unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
507 
508 	return ret;
509 }
510 postcore_initcall(rpmsg_chrdev_init);
511 
512 static void rpmsg_chrdev_exit(void)
513 {
514 	unregister_rpmsg_driver(&rpmsg_chrdev_driver);
515 	unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
516 }
517 module_exit(rpmsg_chrdev_exit);
518 
519 MODULE_ALIAS("rpmsg:rpmsg_chrdev");
520 MODULE_LICENSE("GPL v2");
521