1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2021, Linaro Ltd <loic.poulain@linaro.org> */ 3 4 #include <linux/err.h> 5 #include <linux/errno.h> 6 #include <linux/debugfs.h> 7 #include <linux/fs.h> 8 #include <linux/init.h> 9 #include <linux/idr.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/poll.h> 13 #include <linux/skbuff.h> 14 #include <linux/slab.h> 15 #include <linux/types.h> 16 #include <linux/uaccess.h> 17 #include <linux/termios.h> 18 #include <linux/wwan.h> 19 #include <net/rtnetlink.h> 20 #include <uapi/linux/wwan.h> 21 22 /* Maximum number of minors in use */ 23 #define WWAN_MAX_MINORS (1 << MINORBITS) 24 25 static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */ 26 static DEFINE_IDA(minors); /* minors for WWAN port chardevs */ 27 static DEFINE_IDA(wwan_dev_ids); /* for unique WWAN device IDs */ 28 static struct class *wwan_class; 29 static int wwan_major; 30 static struct dentry *wwan_debugfs_dir; 31 32 #define to_wwan_dev(d) container_of(d, struct wwan_device, dev) 33 #define to_wwan_port(d) container_of(d, struct wwan_port, dev) 34 35 /* WWAN port flags */ 36 #define WWAN_PORT_TX_OFF 0 37 38 /** 39 * struct wwan_device - The structure that defines a WWAN device 40 * 41 * @id: WWAN device unique ID. 42 * @dev: Underlying device. 43 * @port_id: Current available port ID to pick. 44 * @ops: wwan device ops 45 * @ops_ctxt: context to pass to ops 46 * @debugfs_dir: WWAN device debugfs dir 47 */ 48 struct wwan_device { 49 unsigned int id; 50 struct device dev; 51 atomic_t port_id; 52 const struct wwan_ops *ops; 53 void *ops_ctxt; 54 #ifdef CONFIG_WWAN_DEBUGFS 55 struct dentry *debugfs_dir; 56 #endif 57 }; 58 59 /** 60 * struct wwan_port - The structure that defines a WWAN port 61 * @type: Port type 62 * @start_count: Port start counter 63 * @flags: Store port state and capabilities 64 * @ops: Pointer to WWAN port operations 65 * @ops_lock: Protect port ops 66 * @dev: Underlying device 67 * @rxq: Buffer inbound queue 68 * @waitqueue: The waitqueue for port fops (read/write/poll) 69 * @data_lock: Port specific data access serialization 70 * @at_data: AT port specific data 71 */ 72 struct wwan_port { 73 enum wwan_port_type type; 74 unsigned int start_count; 75 unsigned long flags; 76 const struct wwan_port_ops *ops; 77 struct mutex ops_lock; /* Serialize ops + protect against removal */ 78 struct device dev; 79 struct sk_buff_head rxq; 80 wait_queue_head_t waitqueue; 81 struct mutex data_lock; /* Port specific data access serialization */ 82 union { 83 struct { 84 struct ktermios termios; 85 int mdmbits; 86 } at_data; 87 }; 88 }; 89 90 static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf) 91 { 92 struct wwan_device *wwan = to_wwan_dev(dev); 93 94 return sprintf(buf, "%d\n", wwan->id); 95 } 96 static DEVICE_ATTR_RO(index); 97 98 static struct attribute *wwan_dev_attrs[] = { 99 &dev_attr_index.attr, 100 NULL, 101 }; 102 ATTRIBUTE_GROUPS(wwan_dev); 103 104 static void wwan_dev_destroy(struct device *dev) 105 { 106 struct wwan_device *wwandev = to_wwan_dev(dev); 107 108 ida_free(&wwan_dev_ids, wwandev->id); 109 kfree(wwandev); 110 } 111 112 static const struct device_type wwan_dev_type = { 113 .name = "wwan_dev", 114 .release = wwan_dev_destroy, 115 .groups = wwan_dev_groups, 116 }; 117 118 static int wwan_dev_parent_match(struct device *dev, const void *parent) 119 { 120 return (dev->type == &wwan_dev_type && 121 (dev->parent == parent || dev == parent)); 122 } 123 124 static struct wwan_device *wwan_dev_get_by_parent(struct device *parent) 125 { 126 struct device *dev; 127 128 dev = class_find_device(wwan_class, NULL, parent, wwan_dev_parent_match); 129 if (!dev) 130 return ERR_PTR(-ENODEV); 131 132 return to_wwan_dev(dev); 133 } 134 135 static int wwan_dev_name_match(struct device *dev, const void *name) 136 { 137 return dev->type == &wwan_dev_type && 138 strcmp(dev_name(dev), name) == 0; 139 } 140 141 static struct wwan_device *wwan_dev_get_by_name(const char *name) 142 { 143 struct device *dev; 144 145 dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match); 146 if (!dev) 147 return ERR_PTR(-ENODEV); 148 149 return to_wwan_dev(dev); 150 } 151 152 #ifdef CONFIG_WWAN_DEBUGFS 153 struct dentry *wwan_get_debugfs_dir(struct device *parent) 154 { 155 struct wwan_device *wwandev; 156 157 wwandev = wwan_dev_get_by_parent(parent); 158 if (IS_ERR(wwandev)) 159 return ERR_CAST(wwandev); 160 161 return wwandev->debugfs_dir; 162 } 163 EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir); 164 165 static int wwan_dev_debugfs_match(struct device *dev, const void *dir) 166 { 167 struct wwan_device *wwandev; 168 169 if (dev->type != &wwan_dev_type) 170 return 0; 171 172 wwandev = to_wwan_dev(dev); 173 174 return wwandev->debugfs_dir == dir; 175 } 176 177 static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir) 178 { 179 struct device *dev; 180 181 dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match); 182 if (!dev) 183 return ERR_PTR(-ENODEV); 184 185 return to_wwan_dev(dev); 186 } 187 188 void wwan_put_debugfs_dir(struct dentry *dir) 189 { 190 struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir); 191 192 if (WARN_ON(IS_ERR(wwandev))) 193 return; 194 195 /* wwan_dev_get_by_debugfs() also got a reference */ 196 put_device(&wwandev->dev); 197 put_device(&wwandev->dev); 198 } 199 EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir); 200 #endif 201 202 /* This function allocates and registers a new WWAN device OR if a WWAN device 203 * already exist for the given parent, it gets a reference and return it. 204 * This function is not exported (for now), it is called indirectly via 205 * wwan_create_port(). 206 */ 207 static struct wwan_device *wwan_create_dev(struct device *parent) 208 { 209 struct wwan_device *wwandev; 210 int err, id; 211 212 /* The 'find-alloc-register' operation must be protected against 213 * concurrent execution, a WWAN device is possibly shared between 214 * multiple callers or concurrently unregistered from wwan_remove_dev(). 215 */ 216 mutex_lock(&wwan_register_lock); 217 218 /* If wwandev already exists, return it */ 219 wwandev = wwan_dev_get_by_parent(parent); 220 if (!IS_ERR(wwandev)) 221 goto done_unlock; 222 223 id = ida_alloc(&wwan_dev_ids, GFP_KERNEL); 224 if (id < 0) { 225 wwandev = ERR_PTR(id); 226 goto done_unlock; 227 } 228 229 wwandev = kzalloc(sizeof(*wwandev), GFP_KERNEL); 230 if (!wwandev) { 231 wwandev = ERR_PTR(-ENOMEM); 232 ida_free(&wwan_dev_ids, id); 233 goto done_unlock; 234 } 235 236 wwandev->dev.parent = parent; 237 wwandev->dev.class = wwan_class; 238 wwandev->dev.type = &wwan_dev_type; 239 wwandev->id = id; 240 dev_set_name(&wwandev->dev, "wwan%d", wwandev->id); 241 242 err = device_register(&wwandev->dev); 243 if (err) { 244 put_device(&wwandev->dev); 245 wwandev = ERR_PTR(err); 246 goto done_unlock; 247 } 248 249 #ifdef CONFIG_WWAN_DEBUGFS 250 wwandev->debugfs_dir = 251 debugfs_create_dir(kobject_name(&wwandev->dev.kobj), 252 wwan_debugfs_dir); 253 #endif 254 255 done_unlock: 256 mutex_unlock(&wwan_register_lock); 257 258 return wwandev; 259 } 260 261 static int is_wwan_child(struct device *dev, void *data) 262 { 263 return dev->class == wwan_class; 264 } 265 266 static void wwan_remove_dev(struct wwan_device *wwandev) 267 { 268 int ret; 269 270 /* Prevent concurrent picking from wwan_create_dev */ 271 mutex_lock(&wwan_register_lock); 272 273 /* WWAN device is created and registered (get+add) along with its first 274 * child port, and subsequent port registrations only grab a reference 275 * (get). The WWAN device must then be unregistered (del+put) along with 276 * its last port, and reference simply dropped (put) otherwise. In the 277 * same fashion, we must not unregister it when the ops are still there. 278 */ 279 if (wwandev->ops) 280 ret = 1; 281 else 282 ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child); 283 284 if (!ret) { 285 #ifdef CONFIG_WWAN_DEBUGFS 286 debugfs_remove_recursive(wwandev->debugfs_dir); 287 #endif 288 device_unregister(&wwandev->dev); 289 } else { 290 put_device(&wwandev->dev); 291 } 292 293 mutex_unlock(&wwan_register_lock); 294 } 295 296 /* ------- WWAN port management ------- */ 297 298 static const struct { 299 const char * const name; /* Port type name */ 300 const char * const devsuf; /* Port devce name suffix */ 301 } wwan_port_types[WWAN_PORT_MAX + 1] = { 302 [WWAN_PORT_AT] = { 303 .name = "AT", 304 .devsuf = "at", 305 }, 306 [WWAN_PORT_MBIM] = { 307 .name = "MBIM", 308 .devsuf = "mbim", 309 }, 310 [WWAN_PORT_QMI] = { 311 .name = "QMI", 312 .devsuf = "qmi", 313 }, 314 [WWAN_PORT_QCDM] = { 315 .name = "QCDM", 316 .devsuf = "qcdm", 317 }, 318 [WWAN_PORT_FIREHOSE] = { 319 .name = "FIREHOSE", 320 .devsuf = "firehose", 321 }, 322 }; 323 324 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 325 char *buf) 326 { 327 struct wwan_port *port = to_wwan_port(dev); 328 329 return sprintf(buf, "%s\n", wwan_port_types[port->type].name); 330 } 331 static DEVICE_ATTR_RO(type); 332 333 static struct attribute *wwan_port_attrs[] = { 334 &dev_attr_type.attr, 335 NULL, 336 }; 337 ATTRIBUTE_GROUPS(wwan_port); 338 339 static void wwan_port_destroy(struct device *dev) 340 { 341 struct wwan_port *port = to_wwan_port(dev); 342 343 ida_free(&minors, MINOR(port->dev.devt)); 344 mutex_destroy(&port->data_lock); 345 mutex_destroy(&port->ops_lock); 346 kfree(port); 347 } 348 349 static const struct device_type wwan_port_dev_type = { 350 .name = "wwan_port", 351 .release = wwan_port_destroy, 352 .groups = wwan_port_groups, 353 }; 354 355 static int wwan_port_minor_match(struct device *dev, const void *minor) 356 { 357 return (dev->type == &wwan_port_dev_type && 358 MINOR(dev->devt) == *(unsigned int *)minor); 359 } 360 361 static struct wwan_port *wwan_port_get_by_minor(unsigned int minor) 362 { 363 struct device *dev; 364 365 dev = class_find_device(wwan_class, NULL, &minor, wwan_port_minor_match); 366 if (!dev) 367 return ERR_PTR(-ENODEV); 368 369 return to_wwan_port(dev); 370 } 371 372 /* Allocate and set unique name based on passed format 373 * 374 * Name allocation approach is highly inspired by the __dev_alloc_name() 375 * function. 376 * 377 * To avoid names collision, the caller must prevent the new port device 378 * registration as well as concurrent invocation of this function. 379 */ 380 static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt) 381 { 382 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); 383 const unsigned int max_ports = PAGE_SIZE * 8; 384 struct class_dev_iter iter; 385 unsigned long *idmap; 386 struct device *dev; 387 char buf[0x20]; 388 int id; 389 390 idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL); 391 if (!idmap) 392 return -ENOMEM; 393 394 /* Collect ids of same name format ports */ 395 class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type); 396 while ((dev = class_dev_iter_next(&iter))) { 397 if (dev->parent != &wwandev->dev) 398 continue; 399 if (sscanf(dev_name(dev), fmt, &id) != 1) 400 continue; 401 if (id < 0 || id >= max_ports) 402 continue; 403 set_bit(id, idmap); 404 } 405 class_dev_iter_exit(&iter); 406 407 /* Allocate unique id */ 408 id = find_first_zero_bit(idmap, max_ports); 409 free_page((unsigned long)idmap); 410 411 snprintf(buf, sizeof(buf), fmt, id); /* Name generation */ 412 413 dev = device_find_child_by_name(&wwandev->dev, buf); 414 if (dev) { 415 put_device(dev); 416 return -ENFILE; 417 } 418 419 return dev_set_name(&port->dev, buf); 420 } 421 422 struct wwan_port *wwan_create_port(struct device *parent, 423 enum wwan_port_type type, 424 const struct wwan_port_ops *ops, 425 void *drvdata) 426 { 427 struct wwan_device *wwandev; 428 struct wwan_port *port; 429 char namefmt[0x20]; 430 int minor, err; 431 432 if (type > WWAN_PORT_MAX || !ops) 433 return ERR_PTR(-EINVAL); 434 435 /* A port is always a child of a WWAN device, retrieve (allocate or 436 * pick) the WWAN device based on the provided parent device. 437 */ 438 wwandev = wwan_create_dev(parent); 439 if (IS_ERR(wwandev)) 440 return ERR_CAST(wwandev); 441 442 /* A port is exposed as character device, get a minor */ 443 minor = ida_alloc_range(&minors, 0, WWAN_MAX_MINORS - 1, GFP_KERNEL); 444 if (minor < 0) { 445 err = minor; 446 goto error_wwandev_remove; 447 } 448 449 port = kzalloc(sizeof(*port), GFP_KERNEL); 450 if (!port) { 451 err = -ENOMEM; 452 ida_free(&minors, minor); 453 goto error_wwandev_remove; 454 } 455 456 port->type = type; 457 port->ops = ops; 458 mutex_init(&port->ops_lock); 459 skb_queue_head_init(&port->rxq); 460 init_waitqueue_head(&port->waitqueue); 461 mutex_init(&port->data_lock); 462 463 port->dev.parent = &wwandev->dev; 464 port->dev.class = wwan_class; 465 port->dev.type = &wwan_port_dev_type; 466 port->dev.devt = MKDEV(wwan_major, minor); 467 dev_set_drvdata(&port->dev, drvdata); 468 469 /* allocate unique name based on wwan device id, port type and number */ 470 snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id, 471 wwan_port_types[port->type].devsuf); 472 473 /* Serialize ports registration */ 474 mutex_lock(&wwan_register_lock); 475 476 __wwan_port_dev_assign_name(port, namefmt); 477 err = device_register(&port->dev); 478 479 mutex_unlock(&wwan_register_lock); 480 481 if (err) 482 goto error_put_device; 483 484 return port; 485 486 error_put_device: 487 put_device(&port->dev); 488 error_wwandev_remove: 489 wwan_remove_dev(wwandev); 490 491 return ERR_PTR(err); 492 } 493 EXPORT_SYMBOL_GPL(wwan_create_port); 494 495 void wwan_remove_port(struct wwan_port *port) 496 { 497 struct wwan_device *wwandev = to_wwan_dev(port->dev.parent); 498 499 mutex_lock(&port->ops_lock); 500 if (port->start_count) 501 port->ops->stop(port); 502 port->ops = NULL; /* Prevent any new port operations (e.g. from fops) */ 503 mutex_unlock(&port->ops_lock); 504 505 wake_up_interruptible(&port->waitqueue); 506 507 skb_queue_purge(&port->rxq); 508 dev_set_drvdata(&port->dev, NULL); 509 device_unregister(&port->dev); 510 511 /* Release related wwan device */ 512 wwan_remove_dev(wwandev); 513 } 514 EXPORT_SYMBOL_GPL(wwan_remove_port); 515 516 void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb) 517 { 518 skb_queue_tail(&port->rxq, skb); 519 wake_up_interruptible(&port->waitqueue); 520 } 521 EXPORT_SYMBOL_GPL(wwan_port_rx); 522 523 void wwan_port_txon(struct wwan_port *port) 524 { 525 clear_bit(WWAN_PORT_TX_OFF, &port->flags); 526 wake_up_interruptible(&port->waitqueue); 527 } 528 EXPORT_SYMBOL_GPL(wwan_port_txon); 529 530 void wwan_port_txoff(struct wwan_port *port) 531 { 532 set_bit(WWAN_PORT_TX_OFF, &port->flags); 533 } 534 EXPORT_SYMBOL_GPL(wwan_port_txoff); 535 536 void *wwan_port_get_drvdata(struct wwan_port *port) 537 { 538 return dev_get_drvdata(&port->dev); 539 } 540 EXPORT_SYMBOL_GPL(wwan_port_get_drvdata); 541 542 static int wwan_port_op_start(struct wwan_port *port) 543 { 544 int ret = 0; 545 546 mutex_lock(&port->ops_lock); 547 if (!port->ops) { /* Port got unplugged */ 548 ret = -ENODEV; 549 goto out_unlock; 550 } 551 552 /* If port is already started, don't start again */ 553 if (!port->start_count) 554 ret = port->ops->start(port); 555 556 if (!ret) 557 port->start_count++; 558 559 out_unlock: 560 mutex_unlock(&port->ops_lock); 561 562 return ret; 563 } 564 565 static void wwan_port_op_stop(struct wwan_port *port) 566 { 567 mutex_lock(&port->ops_lock); 568 port->start_count--; 569 if (!port->start_count) { 570 if (port->ops) 571 port->ops->stop(port); 572 skb_queue_purge(&port->rxq); 573 } 574 mutex_unlock(&port->ops_lock); 575 } 576 577 static int wwan_port_op_tx(struct wwan_port *port, struct sk_buff *skb, 578 bool nonblock) 579 { 580 int ret; 581 582 mutex_lock(&port->ops_lock); 583 if (!port->ops) { /* Port got unplugged */ 584 ret = -ENODEV; 585 goto out_unlock; 586 } 587 588 if (nonblock || !port->ops->tx_blocking) 589 ret = port->ops->tx(port, skb); 590 else 591 ret = port->ops->tx_blocking(port, skb); 592 593 out_unlock: 594 mutex_unlock(&port->ops_lock); 595 596 return ret; 597 } 598 599 static bool is_read_blocked(struct wwan_port *port) 600 { 601 return skb_queue_empty(&port->rxq) && port->ops; 602 } 603 604 static bool is_write_blocked(struct wwan_port *port) 605 { 606 return test_bit(WWAN_PORT_TX_OFF, &port->flags) && port->ops; 607 } 608 609 static int wwan_wait_rx(struct wwan_port *port, bool nonblock) 610 { 611 if (!is_read_blocked(port)) 612 return 0; 613 614 if (nonblock) 615 return -EAGAIN; 616 617 if (wait_event_interruptible(port->waitqueue, !is_read_blocked(port))) 618 return -ERESTARTSYS; 619 620 return 0; 621 } 622 623 static int wwan_wait_tx(struct wwan_port *port, bool nonblock) 624 { 625 if (!is_write_blocked(port)) 626 return 0; 627 628 if (nonblock) 629 return -EAGAIN; 630 631 if (wait_event_interruptible(port->waitqueue, !is_write_blocked(port))) 632 return -ERESTARTSYS; 633 634 return 0; 635 } 636 637 static int wwan_port_fops_open(struct inode *inode, struct file *file) 638 { 639 struct wwan_port *port; 640 int err = 0; 641 642 port = wwan_port_get_by_minor(iminor(inode)); 643 if (IS_ERR(port)) 644 return PTR_ERR(port); 645 646 file->private_data = port; 647 stream_open(inode, file); 648 649 err = wwan_port_op_start(port); 650 if (err) 651 put_device(&port->dev); 652 653 return err; 654 } 655 656 static int wwan_port_fops_release(struct inode *inode, struct file *filp) 657 { 658 struct wwan_port *port = filp->private_data; 659 660 wwan_port_op_stop(port); 661 put_device(&port->dev); 662 663 return 0; 664 } 665 666 static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf, 667 size_t count, loff_t *ppos) 668 { 669 struct wwan_port *port = filp->private_data; 670 struct sk_buff *skb; 671 size_t copied; 672 int ret; 673 674 ret = wwan_wait_rx(port, !!(filp->f_flags & O_NONBLOCK)); 675 if (ret) 676 return ret; 677 678 skb = skb_dequeue(&port->rxq); 679 if (!skb) 680 return -EIO; 681 682 copied = min_t(size_t, count, skb->len); 683 if (copy_to_user(buf, skb->data, copied)) { 684 kfree_skb(skb); 685 return -EFAULT; 686 } 687 skb_pull(skb, copied); 688 689 /* skb is not fully consumed, keep it in the queue */ 690 if (skb->len) 691 skb_queue_head(&port->rxq, skb); 692 else 693 consume_skb(skb); 694 695 return copied; 696 } 697 698 static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf, 699 size_t count, loff_t *offp) 700 { 701 struct wwan_port *port = filp->private_data; 702 struct sk_buff *skb; 703 int ret; 704 705 ret = wwan_wait_tx(port, !!(filp->f_flags & O_NONBLOCK)); 706 if (ret) 707 return ret; 708 709 skb = alloc_skb(count, GFP_KERNEL); 710 if (!skb) 711 return -ENOMEM; 712 713 if (copy_from_user(skb_put(skb, count), buf, count)) { 714 kfree_skb(skb); 715 return -EFAULT; 716 } 717 718 ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK)); 719 if (ret) { 720 kfree_skb(skb); 721 return ret; 722 } 723 724 return count; 725 } 726 727 static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait) 728 { 729 struct wwan_port *port = filp->private_data; 730 __poll_t mask = 0; 731 732 poll_wait(filp, &port->waitqueue, wait); 733 734 mutex_lock(&port->ops_lock); 735 if (port->ops && port->ops->tx_poll) 736 mask |= port->ops->tx_poll(port, filp, wait); 737 else if (!is_write_blocked(port)) 738 mask |= EPOLLOUT | EPOLLWRNORM; 739 if (!is_read_blocked(port)) 740 mask |= EPOLLIN | EPOLLRDNORM; 741 if (!port->ops) 742 mask |= EPOLLHUP | EPOLLERR; 743 mutex_unlock(&port->ops_lock); 744 745 return mask; 746 } 747 748 /* Implements minimalistic stub terminal IOCTLs support */ 749 static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd, 750 unsigned long arg) 751 { 752 int ret = 0; 753 754 mutex_lock(&port->data_lock); 755 756 switch (cmd) { 757 case TCFLSH: 758 break; 759 760 case TCGETS: 761 if (copy_to_user((void __user *)arg, &port->at_data.termios, 762 sizeof(struct termios))) 763 ret = -EFAULT; 764 break; 765 766 case TCSETS: 767 case TCSETSW: 768 case TCSETSF: 769 if (copy_from_user(&port->at_data.termios, (void __user *)arg, 770 sizeof(struct termios))) 771 ret = -EFAULT; 772 break; 773 774 #ifdef TCGETS2 775 case TCGETS2: 776 if (copy_to_user((void __user *)arg, &port->at_data.termios, 777 sizeof(struct termios2))) 778 ret = -EFAULT; 779 break; 780 781 case TCSETS2: 782 case TCSETSW2: 783 case TCSETSF2: 784 if (copy_from_user(&port->at_data.termios, (void __user *)arg, 785 sizeof(struct termios2))) 786 ret = -EFAULT; 787 break; 788 #endif 789 790 case TIOCMGET: 791 ret = put_user(port->at_data.mdmbits, (int __user *)arg); 792 break; 793 794 case TIOCMSET: 795 case TIOCMBIC: 796 case TIOCMBIS: { 797 int mdmbits; 798 799 if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) { 800 ret = -EFAULT; 801 break; 802 } 803 if (cmd == TIOCMBIC) 804 port->at_data.mdmbits &= ~mdmbits; 805 else if (cmd == TIOCMBIS) 806 port->at_data.mdmbits |= mdmbits; 807 else 808 port->at_data.mdmbits = mdmbits; 809 break; 810 } 811 812 default: 813 ret = -ENOIOCTLCMD; 814 } 815 816 mutex_unlock(&port->data_lock); 817 818 return ret; 819 } 820 821 static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd, 822 unsigned long arg) 823 { 824 struct wwan_port *port = filp->private_data; 825 int res; 826 827 if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */ 828 res = wwan_port_fops_at_ioctl(port, cmd, arg); 829 if (res != -ENOIOCTLCMD) 830 return res; 831 } 832 833 switch (cmd) { 834 case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */ 835 unsigned long flags; 836 struct sk_buff *skb; 837 int amount = 0; 838 839 spin_lock_irqsave(&port->rxq.lock, flags); 840 skb_queue_walk(&port->rxq, skb) 841 amount += skb->len; 842 spin_unlock_irqrestore(&port->rxq.lock, flags); 843 844 return put_user(amount, (int __user *)arg); 845 } 846 847 default: 848 return -ENOIOCTLCMD; 849 } 850 } 851 852 static const struct file_operations wwan_port_fops = { 853 .owner = THIS_MODULE, 854 .open = wwan_port_fops_open, 855 .release = wwan_port_fops_release, 856 .read = wwan_port_fops_read, 857 .write = wwan_port_fops_write, 858 .poll = wwan_port_fops_poll, 859 .unlocked_ioctl = wwan_port_fops_ioctl, 860 #ifdef CONFIG_COMPAT 861 .compat_ioctl = compat_ptr_ioctl, 862 #endif 863 .llseek = noop_llseek, 864 }; 865 866 static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[], 867 struct netlink_ext_ack *extack) 868 { 869 if (!data) 870 return -EINVAL; 871 872 if (!tb[IFLA_PARENT_DEV_NAME]) 873 return -EINVAL; 874 875 if (!data[IFLA_WWAN_LINK_ID]) 876 return -EINVAL; 877 878 return 0; 879 } 880 881 static struct device_type wwan_type = { .name = "wwan" }; 882 883 static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[], 884 const char *ifname, 885 unsigned char name_assign_type, 886 unsigned int num_tx_queues, 887 unsigned int num_rx_queues) 888 { 889 const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]); 890 struct wwan_device *wwandev = wwan_dev_get_by_name(devname); 891 struct net_device *dev; 892 unsigned int priv_size; 893 894 if (IS_ERR(wwandev)) 895 return ERR_CAST(wwandev); 896 897 /* only supported if ops were registered (not just ports) */ 898 if (!wwandev->ops) { 899 dev = ERR_PTR(-EOPNOTSUPP); 900 goto out; 901 } 902 903 priv_size = sizeof(struct wwan_netdev_priv) + wwandev->ops->priv_size; 904 dev = alloc_netdev_mqs(priv_size, ifname, name_assign_type, 905 wwandev->ops->setup, num_tx_queues, num_rx_queues); 906 907 if (dev) { 908 SET_NETDEV_DEV(dev, &wwandev->dev); 909 SET_NETDEV_DEVTYPE(dev, &wwan_type); 910 } 911 912 out: 913 /* release the reference */ 914 put_device(&wwandev->dev); 915 return dev; 916 } 917 918 static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev, 919 struct nlattr *tb[], struct nlattr *data[], 920 struct netlink_ext_ack *extack) 921 { 922 struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent); 923 u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]); 924 struct wwan_netdev_priv *priv = netdev_priv(dev); 925 int ret; 926 927 if (IS_ERR(wwandev)) 928 return PTR_ERR(wwandev); 929 930 /* shouldn't have a netdev (left) with us as parent so WARN */ 931 if (WARN_ON(!wwandev->ops)) { 932 ret = -EOPNOTSUPP; 933 goto out; 934 } 935 936 priv->link_id = link_id; 937 if (wwandev->ops->newlink) 938 ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev, 939 link_id, extack); 940 else 941 ret = register_netdevice(dev); 942 943 out: 944 /* release the reference */ 945 put_device(&wwandev->dev); 946 return ret; 947 } 948 949 static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head) 950 { 951 struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent); 952 953 if (IS_ERR(wwandev)) 954 return; 955 956 /* shouldn't have a netdev (left) with us as parent so WARN */ 957 if (WARN_ON(!wwandev->ops)) 958 goto out; 959 960 if (wwandev->ops->dellink) 961 wwandev->ops->dellink(wwandev->ops_ctxt, dev, head); 962 else 963 unregister_netdevice_queue(dev, head); 964 965 out: 966 /* release the reference */ 967 put_device(&wwandev->dev); 968 } 969 970 static size_t wwan_rtnl_get_size(const struct net_device *dev) 971 { 972 return 973 nla_total_size(4) + /* IFLA_WWAN_LINK_ID */ 974 0; 975 } 976 977 static int wwan_rtnl_fill_info(struct sk_buff *skb, 978 const struct net_device *dev) 979 { 980 struct wwan_netdev_priv *priv = netdev_priv(dev); 981 982 if (nla_put_u32(skb, IFLA_WWAN_LINK_ID, priv->link_id)) 983 goto nla_put_failure; 984 985 return 0; 986 987 nla_put_failure: 988 return -EMSGSIZE; 989 } 990 991 static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = { 992 [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 }, 993 }; 994 995 static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = { 996 .kind = "wwan", 997 .maxtype = __IFLA_WWAN_MAX, 998 .alloc = wwan_rtnl_alloc, 999 .validate = wwan_rtnl_validate, 1000 .newlink = wwan_rtnl_newlink, 1001 .dellink = wwan_rtnl_dellink, 1002 .get_size = wwan_rtnl_get_size, 1003 .fill_info = wwan_rtnl_fill_info, 1004 .policy = wwan_rtnl_policy, 1005 }; 1006 1007 static void wwan_create_default_link(struct wwan_device *wwandev, 1008 u32 def_link_id) 1009 { 1010 struct nlattr *tb[IFLA_MAX + 1], *linkinfo[IFLA_INFO_MAX + 1]; 1011 struct nlattr *data[IFLA_WWAN_MAX + 1]; 1012 struct net_device *dev; 1013 struct nlmsghdr *nlh; 1014 struct sk_buff *msg; 1015 1016 /* Forge attributes required to create a WWAN netdev. We first 1017 * build a netlink message and then parse it. This looks 1018 * odd, but such approach is less error prone. 1019 */ 1020 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1021 if (WARN_ON(!msg)) 1022 return; 1023 nlh = nlmsg_put(msg, 0, 0, RTM_NEWLINK, 0, 0); 1024 if (WARN_ON(!nlh)) 1025 goto free_attrs; 1026 1027 if (nla_put_string(msg, IFLA_PARENT_DEV_NAME, dev_name(&wwandev->dev))) 1028 goto free_attrs; 1029 tb[IFLA_LINKINFO] = nla_nest_start(msg, IFLA_LINKINFO); 1030 if (!tb[IFLA_LINKINFO]) 1031 goto free_attrs; 1032 linkinfo[IFLA_INFO_DATA] = nla_nest_start(msg, IFLA_INFO_DATA); 1033 if (!linkinfo[IFLA_INFO_DATA]) 1034 goto free_attrs; 1035 if (nla_put_u32(msg, IFLA_WWAN_LINK_ID, def_link_id)) 1036 goto free_attrs; 1037 nla_nest_end(msg, linkinfo[IFLA_INFO_DATA]); 1038 nla_nest_end(msg, tb[IFLA_LINKINFO]); 1039 1040 nlmsg_end(msg, nlh); 1041 1042 /* The next three parsing calls can not fail */ 1043 nlmsg_parse_deprecated(nlh, 0, tb, IFLA_MAX, NULL, NULL); 1044 nla_parse_nested_deprecated(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO], 1045 NULL, NULL); 1046 nla_parse_nested_deprecated(data, IFLA_WWAN_MAX, 1047 linkinfo[IFLA_INFO_DATA], NULL, NULL); 1048 1049 rtnl_lock(); 1050 1051 dev = rtnl_create_link(&init_net, "wwan%d", NET_NAME_ENUM, 1052 &wwan_rtnl_link_ops, tb, NULL); 1053 if (WARN_ON(IS_ERR(dev))) 1054 goto unlock; 1055 1056 if (WARN_ON(wwan_rtnl_newlink(&init_net, dev, tb, data, NULL))) { 1057 free_netdev(dev); 1058 goto unlock; 1059 } 1060 1061 rtnl_configure_link(dev, NULL); /* Link initialized, notify new link */ 1062 1063 unlock: 1064 rtnl_unlock(); 1065 1066 free_attrs: 1067 nlmsg_free(msg); 1068 } 1069 1070 /** 1071 * wwan_register_ops - register WWAN device ops 1072 * @parent: Device to use as parent and shared by all WWAN ports and 1073 * created netdevs 1074 * @ops: operations to register 1075 * @ctxt: context to pass to operations 1076 * @def_link_id: id of the default link that will be automatically created by 1077 * the WWAN core for the WWAN device. The default link will not be created 1078 * if the passed value is WWAN_NO_DEFAULT_LINK. 1079 * 1080 * Returns: 0 on success, a negative error code on failure 1081 */ 1082 int wwan_register_ops(struct device *parent, const struct wwan_ops *ops, 1083 void *ctxt, u32 def_link_id) 1084 { 1085 struct wwan_device *wwandev; 1086 1087 if (WARN_ON(!parent || !ops || !ops->setup)) 1088 return -EINVAL; 1089 1090 wwandev = wwan_create_dev(parent); 1091 if (IS_ERR(wwandev)) 1092 return PTR_ERR(wwandev); 1093 1094 if (WARN_ON(wwandev->ops)) { 1095 wwan_remove_dev(wwandev); 1096 return -EBUSY; 1097 } 1098 1099 wwandev->ops = ops; 1100 wwandev->ops_ctxt = ctxt; 1101 1102 /* NB: we do not abort ops registration in case of default link 1103 * creation failure. Link ops is the management interface, while the 1104 * default link creation is a service option. And we should not prevent 1105 * a user from manually creating a link latter if service option failed 1106 * now. 1107 */ 1108 if (def_link_id != WWAN_NO_DEFAULT_LINK) 1109 wwan_create_default_link(wwandev, def_link_id); 1110 1111 return 0; 1112 } 1113 EXPORT_SYMBOL_GPL(wwan_register_ops); 1114 1115 /* Enqueue child netdev deletion */ 1116 static int wwan_child_dellink(struct device *dev, void *data) 1117 { 1118 struct list_head *kill_list = data; 1119 1120 if (dev->type == &wwan_type) 1121 wwan_rtnl_dellink(to_net_dev(dev), kill_list); 1122 1123 return 0; 1124 } 1125 1126 /** 1127 * wwan_unregister_ops - remove WWAN device ops 1128 * @parent: Device to use as parent and shared by all WWAN ports and 1129 * created netdevs 1130 */ 1131 void wwan_unregister_ops(struct device *parent) 1132 { 1133 struct wwan_device *wwandev = wwan_dev_get_by_parent(parent); 1134 LIST_HEAD(kill_list); 1135 1136 if (WARN_ON(IS_ERR(wwandev))) 1137 return; 1138 if (WARN_ON(!wwandev->ops)) { 1139 put_device(&wwandev->dev); 1140 return; 1141 } 1142 1143 /* put the reference obtained by wwan_dev_get_by_parent(), 1144 * we should still have one (that the owner is giving back 1145 * now) due to the ops being assigned. 1146 */ 1147 put_device(&wwandev->dev); 1148 1149 rtnl_lock(); /* Prevent concurent netdev(s) creation/destroying */ 1150 1151 /* Remove all child netdev(s), using batch removing */ 1152 device_for_each_child(&wwandev->dev, &kill_list, 1153 wwan_child_dellink); 1154 unregister_netdevice_many(&kill_list); 1155 1156 wwandev->ops = NULL; /* Finally remove ops */ 1157 1158 rtnl_unlock(); 1159 1160 wwandev->ops_ctxt = NULL; 1161 wwan_remove_dev(wwandev); 1162 } 1163 EXPORT_SYMBOL_GPL(wwan_unregister_ops); 1164 1165 static int __init wwan_init(void) 1166 { 1167 int err; 1168 1169 err = rtnl_link_register(&wwan_rtnl_link_ops); 1170 if (err) 1171 return err; 1172 1173 wwan_class = class_create(THIS_MODULE, "wwan"); 1174 if (IS_ERR(wwan_class)) { 1175 err = PTR_ERR(wwan_class); 1176 goto unregister; 1177 } 1178 1179 /* chrdev used for wwan ports */ 1180 wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port", 1181 &wwan_port_fops); 1182 if (wwan_major < 0) { 1183 err = wwan_major; 1184 goto destroy; 1185 } 1186 1187 #ifdef CONFIG_WWAN_DEBUGFS 1188 wwan_debugfs_dir = debugfs_create_dir("wwan", NULL); 1189 #endif 1190 1191 return 0; 1192 1193 destroy: 1194 class_destroy(wwan_class); 1195 unregister: 1196 rtnl_link_unregister(&wwan_rtnl_link_ops); 1197 return err; 1198 } 1199 1200 static void __exit wwan_exit(void) 1201 { 1202 debugfs_remove_recursive(wwan_debugfs_dir); 1203 __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port"); 1204 rtnl_link_unregister(&wwan_rtnl_link_ops); 1205 class_destroy(wwan_class); 1206 } 1207 1208 module_init(wwan_init); 1209 module_exit(wwan_exit); 1210 1211 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); 1212 MODULE_DESCRIPTION("WWAN core"); 1213 MODULE_LICENSE("GPL v2"); 1214