xref: /openbmc/linux/drivers/net/wwan/wwan_core.c (revision b3e22e10)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */
3 
4 #include <linux/err.h>
5 #include <linux/errno.h>
6 #include <linux/fs.h>
7 #include <linux/init.h>
8 #include <linux/idr.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/poll.h>
12 #include <linux/skbuff.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/wwan.h>
16 
17 #define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */
18 
19 static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
20 static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
21 static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */
22 static struct class *wwan_class;
23 static int wwan_major;
24 
25 #define to_wwan_dev(d) container_of(d, struct wwan_device, dev)
26 #define to_wwan_port(d) container_of(d, struct wwan_port, dev)
27 
28 /* WWAN port flags */
29 #define WWAN_PORT_TX_OFF	0
30 
31 /**
32  * struct wwan_device - The structure that defines a WWAN device
33  *
34  * @id: WWAN device unique ID.
35  * @dev: Underlying device.
36  * @port_id: Current available port ID to pick.
37  */
38 struct wwan_device {
39 	unsigned int id;
40 	struct device dev;
41 	atomic_t port_id;
42 };
43 
44 /**
45  * struct wwan_port - The structure that defines a WWAN port
46  * @type: Port type
47  * @start_count: Port start counter
48  * @flags: Store port state and capabilities
49  * @ops: Pointer to WWAN port operations
50  * @ops_lock: Protect port ops
51  * @dev: Underlying device
52  * @rxq: Buffer inbound queue
53  * @waitqueue: The waitqueue for port fops (read/write/poll)
54  */
55 struct wwan_port {
56 	enum wwan_port_type type;
57 	unsigned int start_count;
58 	unsigned long flags;
59 	const struct wwan_port_ops *ops;
60 	struct mutex ops_lock; /* Serialize ops + protect against removal */
61 	struct device dev;
62 	struct sk_buff_head rxq;
63 	wait_queue_head_t waitqueue;
64 };
65 
66 static void wwan_dev_destroy(struct device *dev)
67 {
68 	struct wwan_device *wwandev = to_wwan_dev(dev);
69 
70 	ida_free(&wwan_dev_ids, wwandev->id);
71 	kfree(wwandev);
72 }
73 
74 static const struct device_type wwan_dev_type = {
75 	.name    = "wwan_dev",
76 	.release = wwan_dev_destroy,
77 };
78 
79 static int wwan_dev_parent_match(struct device *dev, const void *parent)
80 {
81 	return (dev->type == &wwan_dev_type && dev->parent == parent);
82 }
83 
84 static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
85 {
86 	struct device *dev;
87 
88 	dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match);
89 	if (!dev)
90 		return ERR_PTR(-ENODEV);
91 
92 	return to_wwan_dev(dev);
93 }
94 
95 /* This function allocates and registers a new WWAN device OR if a WWAN device
96  * already exist for the given parent, it gets a reference and return it.
97  * This function is not exported (for now), it is called indirectly via
98  * wwan_create_port().
99  */
100 static struct wwan_device *wwan_create_dev(struct device *parent)
101 {
102 	struct wwan_device *wwandev;
103 	int err, id;
104 
105 	/* The 'find-alloc-register' operation must be protected against
106 	 * concurrent execution, a WWAN device is possibly shared between
107 	 * multiple callers or concurrently unregistered from wwan_remove_dev().
108 	 */
109 	mutex_lock(&wwan_register_lock);
110 
111 	/* If wwandev already exists, return it */
112 	wwandev = wwan_dev_get_by_parent(parent);
113 	if (!IS_ERR(wwandev))
114 		goto done_unlock;
115 
116 	id = ida_alloc(&wwan_dev_ids, GFP_KERNEL);
117 	if (id < 0)
118 		goto done_unlock;
119 
120 	wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL);
121 	if (!wwandev) {
122 		ida_free(&wwan_dev_ids, id);
123 		goto done_unlock;
124 	}
125 
126 	wwandev->dev.parent = parent;
127 	wwandev->dev.class = wwan_class;
128 	wwandev->dev.type = &wwan_dev_type;
129 	wwandev->id = id;
130 	dev_set_name(&wwandev->dev, "wwan%d", wwandev->id);
131 
132 	err = device_register(&wwandev->dev);
133 	if (err) {
134 		put_device(&wwandev->dev);
135 		wwandev = NULL;
136 	}
137 
138 done_unlock:
139 	mutex_unlock(&wwan_register_lock);
140 
141 	return wwandev;
142 }
143 
144 static int is_wwan_child(struct device *dev, void *data)
145 {
146 	return dev->class == wwan_class;
147 }
148 
149 static void wwan_remove_dev(struct wwan_device *wwandev)
150 {
151 	int ret;
152 
153 	/* Prevent concurrent picking from wwan_create_dev */
154 	mutex_lock(&wwan_register_lock);
155 
156 	/* WWAN device is created and registered (get+add) along with its first
157 	 * child port, and subsequent port registrations only grab a reference
158 	 * (get). The WWAN device must then be unregistered (del+put) along with
159 	 * its latest port, and reference simply dropped (put) otherwise.
160 	 */
161 	ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
162 	if (!ret)
163 		device_unregister(&wwandev->dev);
164 	else
165 		put_device(&wwandev->dev);
166 
167 	mutex_unlock(&wwan_register_lock);
168 }
169 
170 /* ------- WWAN port management ------- */
171 
172 /* Keep aligned with wwan_port_type enum */
173 static const char * const wwan_port_type_str[] = {
174 	"AT",
175 	"MBIM",
176 	"QMI",
177 	"QCDM",
178 	"FIREHOSE"
179 };
180 
181 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
182 			 char *buf)
183 {
184 	struct wwan_port *port = to_wwan_port(dev);
185 
186 	return sprintf(buf, "%s\n", wwan_port_type_str[port->type]);
187 }
188 static DEVICE_ATTR_RO(type);
189 
190 static struct attribute *wwan_port_attrs[] = {
191 	&dev_attr_type.attr,
192 	NULL,
193 };
194 ATTRIBUTE_GROUPS(wwan_port);
195 
196 static void wwan_port_destroy(struct device *dev)
197 {
198 	struct wwan_port *port = to_wwan_port(dev);
199 
200 	ida_free(&minors, MINOR(port->dev.devt));
201 	skb_queue_purge(&port->rxq);
202 	mutex_destroy(&port->ops_lock);
203 	kfree(port);
204 }
205 
206 static const struct device_type wwan_port_dev_type = {
207 	.name = "wwan_port",
208 	.release = wwan_port_destroy,
209 	.groups = wwan_port_groups,
210 };
211 
212 static int wwan_port_minor_match(struct device *dev, const void *minor)
213 {
214 	return (dev->type == &wwan_port_dev_type &&
215 		MINOR(dev->devt) == *(unsigned int *)minor);
216 }
217 
218 static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
219 {
220 	struct device *dev;
221 
222 	dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match);
223 	if (!dev)
224 		return ERR_PTR(-ENODEV);
225 
226 	return to_wwan_port(dev);
227 }
228 
229 struct wwan_port *wwan_create_port(struct device *parent,
230 				   enum wwan_port_type type,
231 				   const struct wwan_port_ops *ops,
232 				   void *drvdata)
233 {
234 	struct wwan_device *wwandev;
235 	struct wwan_port *port;
236 	int minor, err = -ENOMEM;
237 
238 	if (type >= WWAN_PORT_MAX || !ops)
239 		return ERR_PTR(-EINVAL);
240 
241 	/* A port is always a child of a WWAN device, retrieve (allocate or
242 	 * pick) the WWAN device based on the provided parent device.
243 	 */
244 	wwandev = wwan_create_dev(parent);
245 	if (IS_ERR(wwandev))
246 		return ERR_CAST(wwandev);
247 
248 	/* A port is exposed as character device, get a minor */
249 	minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL);
250 	if (minor < 0)
251 		goto error_wwandev_remove;
252 
253 	port = kzalloc(sizeof(*port), GFP_KERNEL);
254 	if (!port) {
255 		ida_free(&minors, minor);
256 		goto error_wwandev_remove;
257 	}
258 
259 	port->type = type;
260 	port->ops = ops;
261 	mutex_init(&port->ops_lock);
262 	skb_queue_head_init(&port->rxq);
263 	init_waitqueue_head(&port->waitqueue);
264 
265 	port->dev.parent = &wwandev->dev;
266 	port->dev.class = wwan_class;
267 	port->dev.type = &wwan_port_dev_type;
268 	port->dev.devt = MKDEV(wwan_major, minor);
269 	dev_set_drvdata(&port->dev, drvdata);
270 
271 	/* create unique name based on wwan device id, port index and type */
272 	dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id,
273 		     atomic_inc_return(&wwandev->port_id),
274 		     wwan_port_type_str[port->type]);
275 
276 	err = device_register(&port->dev);
277 	if (err)
278 		goto error_put_device;
279 
280 	return port;
281 
282 error_put_device:
283 	put_device(&port->dev);
284 error_wwandev_remove:
285 	wwan_remove_dev(wwandev);
286 
287 	return ERR_PTR(err);
288 }
289 EXPORT_SYMBOL_GPL(wwan_create_port);
290 
291 void wwan_remove_port(struct wwan_port *port)
292 {
293 	struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
294 
295 	mutex_lock(&port->ops_lock);
296 	if (port->start_count)
297 		port->ops->stop(port);
298 	port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */
299 	mutex_unlock(&port->ops_lock);
300 
301 	wake_up_interruptible(&port->waitqueue);
302 
303 	skb_queue_purge(&port->rxq);
304 	dev_set_drvdata(&port->dev, NULL);
305 	device_unregister(&port->dev);
306 
307 	/* Release related wwan device */
308 	wwan_remove_dev(wwandev);
309 }
310 EXPORT_SYMBOL_GPL(wwan_remove_port);
311 
312 void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb)
313 {
314 	skb_queue_tail(&port->rxq, skb);
315 	wake_up_interruptible(&port->waitqueue);
316 }
317 EXPORT_SYMBOL_GPL(wwan_port_rx);
318 
319 void wwan_port_txon(struct wwan_port *port)
320 {
321 	clear_bit(WWAN_PORT_TX_OFF, &port->flags);
322 	wake_up_interruptible(&port->waitqueue);
323 }
324 EXPORT_SYMBOL_GPL(wwan_port_txon);
325 
326 void wwan_port_txoff(struct wwan_port *port)
327 {
328 	set_bit(WWAN_PORT_TX_OFF, &port->flags);
329 }
330 EXPORT_SYMBOL_GPL(wwan_port_txoff);
331 
332 void *wwan_port_get_drvdata(struct wwan_port *port)
333 {
334 	return dev_get_drvdata(&port->dev);
335 }
336 EXPORT_SYMBOL_GPL(wwan_port_get_drvdata);
337 
338 static int wwan_port_op_start(struct wwan_port *port)
339 {
340 	int ret = 0;
341 
342 	mutex_lock(&port->ops_lock);
343 	if (!port->ops) { /* Port got unplugged */
344 		ret = -ENODEV;
345 		goto out_unlock;
346 	}
347 
348 	/* If port is already started, don't start again */
349 	if (!port->start_count)
350 		ret = port->ops->start(port);
351 
352 	if (!ret)
353 		port->start_count++;
354 
355 out_unlock:
356 	mutex_unlock(&port->ops_lock);
357 
358 	return ret;
359 }
360 
361 static void wwan_port_op_stop(struct wwan_port *port)
362 {
363 	mutex_lock(&port->ops_lock);
364 	port->start_count--;
365 	if (port->ops && !port->start_count)
366 		port->ops->stop(port);
367 	mutex_unlock(&port->ops_lock);
368 }
369 
370 static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb)
371 {
372 	int ret;
373 
374 	mutex_lock(&port->ops_lock);
375 	if (!port->ops) { /* Port got unplugged */
376 		ret = -ENODEV;
377 		goto out_unlock;
378 	}
379 
380 	ret = port->ops->tx(port, skb);
381 
382 out_unlock:
383 	mutex_unlock(&port->ops_lock);
384 
385 	return ret;
386 }
387 
388 static bool is_read_blocked(struct wwan_port *port)
389 {
390 	return skb_queue_empty(&port->rxq) && port->ops;
391 }
392 
393 static bool is_write_blocked(struct wwan_port *port)
394 {
395 	return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops;
396 }
397 
398 static int wwan_wait_rx(struct wwan_port *port, bool nonblock)
399 {
400 	if (!is_read_blocked(port))
401 		return 0;
402 
403 	if (nonblock)
404 		return -EAGAIN;
405 
406 	if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port)))
407 		return -ERESTARTSYS;
408 
409 	return 0;
410 }
411 
412 static int wwan_wait_tx(struct wwan_port *port, bool nonblock)
413 {
414 	if (!is_write_blocked(port))
415 		return 0;
416 
417 	if (nonblock)
418 		return -EAGAIN;
419 
420 	if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port)))
421 		return -ERESTARTSYS;
422 
423 	return 0;
424 }
425 
426 static int wwan_port_fops_open(struct inode *inode, struct file *file)
427 {
428 	struct wwan_port *port;
429 	int err = 0;
430 
431 	port = wwan_port_get_by_minor(iminor(inode));
432 	if (IS_ERR(port))
433 		return PTR_ERR(port);
434 
435 	file->private_data = port;
436 	stream_open(inode, file);
437 
438 	err = wwan_port_op_start(port);
439 	if (err)
440 		put_device(&port->dev);
441 
442 	return err;
443 }
444 
445 static int wwan_port_fops_release(struct inode *inode, struct file *filp)
446 {
447 	struct wwan_port *port = filp->private_data;
448 
449 	wwan_port_op_stop(port);
450 	put_device(&port->dev);
451 
452 	return 0;
453 }
454 
455 static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
456 				   size_t count, loff_t *ppos)
457 {
458 	struct wwan_port *port = filp->private_data;
459 	struct sk_buff *skb;
460 	size_t copied;
461 	int ret;
462 
463 	ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK));
464 	if (ret)
465 		return ret;
466 
467 	skb = skb_dequeue(&port->rxq);
468 	if (!skb)
469 		return -EIO;
470 
471 	copied = min_t(size_t, count, skb->len);
472 	if (copy_to_user(buf, skb->data, copied)) {
473 		kfree_skb(skb);
474 		return -EFAULT;
475 	}
476 	skb_pull(skb, copied);
477 
478 	/* skb is not fully consumed, keep it in the queue */
479 	if (skb->len)
480 		skb_queue_head(&port->rxq, skb);
481 	else
482 		consume_skb(skb);
483 
484 	return copied;
485 }
486 
487 static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
488 				    size_t count, loff_t *offp)
489 {
490 	struct wwan_port *port = filp->private_data;
491 	struct sk_buff *skb;
492 	int ret;
493 
494 	ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK));
495 	if (ret)
496 		return ret;
497 
498 	skb = alloc_skb(count, GFP_KERNEL);
499 	if (!skb)
500 		return -ENOMEM;
501 
502 	if (copy_from_user(skb_put(skb, count), buf, count)) {
503 		kfree_skb(skb);
504 		return -EFAULT;
505 	}
506 
507 	ret = wwan_port_op_tx(port, skb);
508 	if (ret) {
509 		kfree_skb(skb);
510 		return ret;
511 	}
512 
513 	return count;
514 }
515 
516 static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
517 {
518 	struct wwan_port *port = filp->private_data;
519 	__poll_t mask = 0;
520 
521 	poll_wait(filp, &port->waitqueue, wait);
522 
523 	if (!is_write_blocked(port))
524 		mask |= EPOLLOUT | EPOLLWRNORM;
525 	if (!is_read_blocked(port))
526 		mask |= EPOLLIN | EPOLLRDNORM;
527 	if (!port->ops)
528 		mask |= EPOLLHUP | EPOLLERR;
529 
530 	return mask;
531 }
532 
533 static const struct file_operations wwan_port_fops = {
534 	.owner = THIS_MODULE,
535 	.open = wwan_port_fops_open,
536 	.release = wwan_port_fops_release,
537 	.read = wwan_port_fops_read,
538 	.write = wwan_port_fops_write,
539 	.poll = wwan_port_fops_poll,
540 	.llseek = noop_llseek,
541 };
542 
543 static int __init wwan_init(void)
544 {
545 	wwan_class = class_create(THIS_MODULE, "wwan");
546 	if (IS_ERR(wwan_class))
547 		return PTR_ERR(wwan_class);
548 
549 	/* chrdev used for wwan ports */
550 	wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops);
551 	if (wwan_major < 0) {
552 		class_destroy(wwan_class);
553 		return wwan_major;
554 	}
555 
556 	return 0;
557 }
558 
559 static void __exit wwan_exit(void)
560 {
561 	unregister_chrdev(wwan_major, "wwan_port");
562 	class_destroy(wwan_class);
563 }
564 
565 module_init(wwan_init);
566 module_exit(wwan_exit);
567 
568 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
569 MODULE_DESCRIPTION("WWAN core");
570 MODULE_LICENSE("GPL v2");
571