1 /* 2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation 3 * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. 4 * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20 #include <linux/cdev.h> 21 #include <linux/debugfs.h> 22 #include <linux/device.h> 23 #include <linux/err.h> 24 #include <linux/fs.h> 25 #include <linux/init.h> 26 #include <linux/list.h> 27 #include <linux/poll.h> 28 #include <linux/sched.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/virtio.h> 32 #include <linux/virtio_console.h> 33 #include <linux/wait.h> 34 #include <linux/workqueue.h> 35 #include "../tty/hvc/hvc_console.h" 36 37 /* 38 * This is a global struct for storing common data for all the devices 39 * this driver handles. 40 * 41 * Mainly, it has a linked list for all the consoles in one place so 42 * that callbacks from hvc for get_chars(), put_chars() work properly 43 * across multiple devices and multiple ports per device. 44 */ 45 struct ports_driver_data { 46 /* Used for registering chardevs */ 47 struct class *class; 48 49 /* Used for exporting per-port information to debugfs */ 50 struct dentry *debugfs_dir; 51 52 /* List of all the devices we're handling */ 53 struct list_head portdevs; 54 55 /* Number of devices this driver is handling */ 56 unsigned int index; 57 58 /* 59 * This is used to keep track of the number of hvc consoles 60 * spawned by this driver. This number is given as the first 61 * argument to hvc_alloc(). To correctly map an initial 62 * console spawned via hvc_instantiate to the console being 63 * hooked up via hvc_alloc, we need to pass the same vtermno. 64 * 65 * We also just assume the first console being initialised was 66 * the first one that got used as the initial console. 67 */ 68 unsigned int next_vtermno; 69 70 /* All the console devices handled by this driver */ 71 struct list_head consoles; 72 }; 73 static struct ports_driver_data pdrvdata; 74 75 DEFINE_SPINLOCK(pdrvdata_lock); 76 77 /* This struct holds information that's relevant only for console ports */ 78 struct console { 79 /* We'll place all consoles in a list in the pdrvdata struct */ 80 struct list_head list; 81 82 /* The hvc device associated with this console port */ 83 struct hvc_struct *hvc; 84 85 /* The size of the console */ 86 struct winsize ws; 87 88 /* 89 * This number identifies the number that we used to register 90 * with hvc in hvc_instantiate() and hvc_alloc(); this is the 91 * number passed on by the hvc callbacks to us to 92 * differentiate between the other console ports handled by 93 * this driver 94 */ 95 u32 vtermno; 96 }; 97 98 struct port_buffer { 99 char *buf; 100 101 /* size of the buffer in *buf above */ 102 size_t size; 103 104 /* used length of the buffer */ 105 size_t len; 106 /* offset in the buf from which to consume data */ 107 size_t offset; 108 }; 109 110 /* 111 * This is a per-device struct that stores data common to all the 112 * ports for that device (vdev->priv). 113 */ 114 struct ports_device { 115 /* Next portdev in the list, head is in the pdrvdata struct */ 116 struct list_head list; 117 118 /* 119 * Workqueue handlers where we process deferred work after 120 * notification 121 */ 122 struct work_struct control_work; 123 124 struct list_head ports; 125 126 /* To protect the list of ports */ 127 spinlock_t ports_lock; 128 129 /* To protect the vq operations for the control channel */ 130 spinlock_t cvq_lock; 131 132 /* The current config space is stored here */ 133 struct virtio_console_config config; 134 135 /* The virtio device we're associated with */ 136 struct virtio_device *vdev; 137 138 /* 139 * A couple of virtqueues for the control channel: one for 140 * guest->host transfers, one for host->guest transfers 141 */ 142 struct virtqueue *c_ivq, *c_ovq; 143 144 /* Array of per-port IO virtqueues */ 145 struct virtqueue **in_vqs, **out_vqs; 146 147 /* Used for numbering devices for sysfs and debugfs */ 148 unsigned int drv_index; 149 150 /* Major number for this device. Ports will be created as minors. */ 151 int chr_major; 152 }; 153 154 /* This struct holds the per-port data */ 155 struct port { 156 /* Next port in the list, head is in the ports_device */ 157 struct list_head list; 158 159 /* Pointer to the parent virtio_console device */ 160 struct ports_device *portdev; 161 162 /* The current buffer from which data has to be fed to readers */ 163 struct port_buffer *inbuf; 164 165 /* 166 * To protect the operations on the in_vq associated with this 167 * port. Has to be a spinlock because it can be called from 168 * interrupt context (get_char()). 169 */ 170 spinlock_t inbuf_lock; 171 172 /* Protect the operations on the out_vq. */ 173 spinlock_t outvq_lock; 174 175 /* The IO vqs for this port */ 176 struct virtqueue *in_vq, *out_vq; 177 178 /* File in the debugfs directory that exposes this port's information */ 179 struct dentry *debugfs_file; 180 181 /* 182 * The entries in this struct will be valid if this port is 183 * hooked up to an hvc console 184 */ 185 struct console cons; 186 187 /* Each port associates with a separate char device */ 188 struct cdev *cdev; 189 struct device *dev; 190 191 /* Reference-counting to handle port hot-unplugs and file operations */ 192 struct kref kref; 193 194 /* A waitqueue for poll() or blocking read operations */ 195 wait_queue_head_t waitqueue; 196 197 /* The 'name' of the port that we expose via sysfs properties */ 198 char *name; 199 200 /* We can notify apps of host connect / disconnect events via SIGIO */ 201 struct fasync_struct *async_queue; 202 203 /* The 'id' to identify the port with the Host */ 204 u32 id; 205 206 bool outvq_full; 207 208 /* Is the host device open */ 209 bool host_connected; 210 211 /* We should allow only one process to open a port */ 212 bool guest_connected; 213 }; 214 215 /* This is the very early arch-specified put chars function. */ 216 static int (*early_put_chars)(u32, const char *, int); 217 218 static struct port *find_port_by_vtermno(u32 vtermno) 219 { 220 struct port *port; 221 struct console *cons; 222 unsigned long flags; 223 224 spin_lock_irqsave(&pdrvdata_lock, flags); 225 list_for_each_entry(cons, &pdrvdata.consoles, list) { 226 if (cons->vtermno == vtermno) { 227 port = container_of(cons, struct port, cons); 228 goto out; 229 } 230 } 231 port = NULL; 232 out: 233 spin_unlock_irqrestore(&pdrvdata_lock, flags); 234 return port; 235 } 236 237 static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, 238 dev_t dev) 239 { 240 struct port *port; 241 unsigned long flags; 242 243 spin_lock_irqsave(&portdev->ports_lock, flags); 244 list_for_each_entry(port, &portdev->ports, list) 245 if (port->cdev->dev == dev) 246 goto out; 247 port = NULL; 248 out: 249 spin_unlock_irqrestore(&portdev->ports_lock, flags); 250 251 return port; 252 } 253 254 static struct port *find_port_by_devt(dev_t dev) 255 { 256 struct ports_device *portdev; 257 struct port *port; 258 unsigned long flags; 259 260 spin_lock_irqsave(&pdrvdata_lock, flags); 261 list_for_each_entry(portdev, &pdrvdata.portdevs, list) { 262 port = find_port_by_devt_in_portdev(portdev, dev); 263 if (port) 264 goto out; 265 } 266 port = NULL; 267 out: 268 spin_unlock_irqrestore(&pdrvdata_lock, flags); 269 return port; 270 } 271 272 static struct port *find_port_by_id(struct ports_device *portdev, u32 id) 273 { 274 struct port *port; 275 unsigned long flags; 276 277 spin_lock_irqsave(&portdev->ports_lock, flags); 278 list_for_each_entry(port, &portdev->ports, list) 279 if (port->id == id) 280 goto out; 281 port = NULL; 282 out: 283 spin_unlock_irqrestore(&portdev->ports_lock, flags); 284 285 return port; 286 } 287 288 static struct port *find_port_by_vq(struct ports_device *portdev, 289 struct virtqueue *vq) 290 { 291 struct port *port; 292 unsigned long flags; 293 294 spin_lock_irqsave(&portdev->ports_lock, flags); 295 list_for_each_entry(port, &portdev->ports, list) 296 if (port->in_vq == vq || port->out_vq == vq) 297 goto out; 298 port = NULL; 299 out: 300 spin_unlock_irqrestore(&portdev->ports_lock, flags); 301 return port; 302 } 303 304 static bool is_console_port(struct port *port) 305 { 306 if (port->cons.hvc) 307 return true; 308 return false; 309 } 310 311 static inline bool use_multiport(struct ports_device *portdev) 312 { 313 /* 314 * This condition can be true when put_chars is called from 315 * early_init 316 */ 317 if (!portdev->vdev) 318 return 0; 319 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); 320 } 321 322 static void free_buf(struct port_buffer *buf) 323 { 324 kfree(buf->buf); 325 kfree(buf); 326 } 327 328 static struct port_buffer *alloc_buf(size_t buf_size) 329 { 330 struct port_buffer *buf; 331 332 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 333 if (!buf) 334 goto fail; 335 buf->buf = kzalloc(buf_size, GFP_KERNEL); 336 if (!buf->buf) 337 goto free_buf; 338 buf->len = 0; 339 buf->offset = 0; 340 buf->size = buf_size; 341 return buf; 342 343 free_buf: 344 kfree(buf); 345 fail: 346 return NULL; 347 } 348 349 /* Callers should take appropriate locks */ 350 static void *get_inbuf(struct port *port) 351 { 352 struct port_buffer *buf; 353 struct virtqueue *vq; 354 unsigned int len; 355 356 vq = port->in_vq; 357 buf = virtqueue_get_buf(vq, &len); 358 if (buf) { 359 buf->len = len; 360 buf->offset = 0; 361 } 362 return buf; 363 } 364 365 /* 366 * Create a scatter-gather list representing our input buffer and put 367 * it in the queue. 368 * 369 * Callers should take appropriate locks. 370 */ 371 static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) 372 { 373 struct scatterlist sg[1]; 374 int ret; 375 376 sg_init_one(sg, buf->buf, buf->size); 377 378 ret = virtqueue_add_buf(vq, sg, 0, 1, buf); 379 virtqueue_kick(vq); 380 return ret; 381 } 382 383 /* Discard any unread data this port has. Callers lockers. */ 384 static void discard_port_data(struct port *port) 385 { 386 struct port_buffer *buf; 387 struct virtqueue *vq; 388 unsigned int len; 389 int ret; 390 391 if (!port->portdev) { 392 /* Device has been unplugged. vqs are already gone. */ 393 return; 394 } 395 vq = port->in_vq; 396 if (port->inbuf) 397 buf = port->inbuf; 398 else 399 buf = virtqueue_get_buf(vq, &len); 400 401 ret = 0; 402 while (buf) { 403 if (add_inbuf(vq, buf) < 0) { 404 ret++; 405 free_buf(buf); 406 } 407 buf = virtqueue_get_buf(vq, &len); 408 } 409 port->inbuf = NULL; 410 if (ret) 411 dev_warn(port->dev, "Errors adding %d buffers back to vq\n", 412 ret); 413 } 414 415 static bool port_has_data(struct port *port) 416 { 417 unsigned long flags; 418 bool ret; 419 420 spin_lock_irqsave(&port->inbuf_lock, flags); 421 if (port->inbuf) { 422 ret = true; 423 goto out; 424 } 425 port->inbuf = get_inbuf(port); 426 if (port->inbuf) { 427 ret = true; 428 goto out; 429 } 430 ret = false; 431 out: 432 spin_unlock_irqrestore(&port->inbuf_lock, flags); 433 return ret; 434 } 435 436 static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, 437 unsigned int event, unsigned int value) 438 { 439 struct scatterlist sg[1]; 440 struct virtio_console_control cpkt; 441 struct virtqueue *vq; 442 unsigned int len; 443 444 if (!use_multiport(portdev)) 445 return 0; 446 447 cpkt.id = port_id; 448 cpkt.event = event; 449 cpkt.value = value; 450 451 vq = portdev->c_ovq; 452 453 sg_init_one(sg, &cpkt, sizeof(cpkt)); 454 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { 455 virtqueue_kick(vq); 456 while (!virtqueue_get_buf(vq, &len)) 457 cpu_relax(); 458 } 459 return 0; 460 } 461 462 static ssize_t send_control_msg(struct port *port, unsigned int event, 463 unsigned int value) 464 { 465 /* Did the port get unplugged before userspace closed it? */ 466 if (port->portdev) 467 return __send_control_msg(port->portdev, port->id, event, value); 468 return 0; 469 } 470 471 /* Callers must take the port->outvq_lock */ 472 static void reclaim_consumed_buffers(struct port *port) 473 { 474 void *buf; 475 unsigned int len; 476 477 if (!port->portdev) { 478 /* Device has been unplugged. vqs are already gone. */ 479 return; 480 } 481 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 482 kfree(buf); 483 port->outvq_full = false; 484 } 485 } 486 487 static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, 488 bool nonblock) 489 { 490 struct scatterlist sg[1]; 491 struct virtqueue *out_vq; 492 ssize_t ret; 493 unsigned long flags; 494 unsigned int len; 495 496 out_vq = port->out_vq; 497 498 spin_lock_irqsave(&port->outvq_lock, flags); 499 500 reclaim_consumed_buffers(port); 501 502 sg_init_one(sg, in_buf, in_count); 503 ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); 504 505 /* Tell Host to go! */ 506 virtqueue_kick(out_vq); 507 508 if (ret < 0) { 509 in_count = 0; 510 goto done; 511 } 512 513 if (ret == 0) 514 port->outvq_full = true; 515 516 if (nonblock) 517 goto done; 518 519 /* 520 * Wait till the host acknowledges it pushed out the data we 521 * sent. This is done for data from the hvc_console; the tty 522 * operations are performed with spinlocks held so we can't 523 * sleep here. An alternative would be to copy the data to a 524 * buffer and relax the spinning requirement. The downside is 525 * we need to kmalloc a GFP_ATOMIC buffer each time the 526 * console driver writes something out. 527 */ 528 while (!virtqueue_get_buf(out_vq, &len)) 529 cpu_relax(); 530 done: 531 spin_unlock_irqrestore(&port->outvq_lock, flags); 532 /* 533 * We're expected to return the amount of data we wrote -- all 534 * of it 535 */ 536 return in_count; 537 } 538 539 /* 540 * Give out the data that's requested from the buffer that we have 541 * queued up. 542 */ 543 static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, 544 bool to_user) 545 { 546 struct port_buffer *buf; 547 unsigned long flags; 548 549 if (!out_count || !port_has_data(port)) 550 return 0; 551 552 buf = port->inbuf; 553 out_count = min(out_count, buf->len - buf->offset); 554 555 if (to_user) { 556 ssize_t ret; 557 558 ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); 559 if (ret) 560 return -EFAULT; 561 } else { 562 memcpy(out_buf, buf->buf + buf->offset, out_count); 563 } 564 565 buf->offset += out_count; 566 567 if (buf->offset == buf->len) { 568 /* 569 * We're done using all the data in this buffer. 570 * Re-queue so that the Host can send us more data. 571 */ 572 spin_lock_irqsave(&port->inbuf_lock, flags); 573 port->inbuf = NULL; 574 575 if (add_inbuf(port->in_vq, buf) < 0) 576 dev_warn(port->dev, "failed add_buf\n"); 577 578 spin_unlock_irqrestore(&port->inbuf_lock, flags); 579 } 580 /* Return the number of bytes actually copied */ 581 return out_count; 582 } 583 584 /* The condition that must be true for polling to end */ 585 static bool will_read_block(struct port *port) 586 { 587 if (!port->guest_connected) { 588 /* Port got hot-unplugged. Let's exit. */ 589 return false; 590 } 591 return !port_has_data(port) && port->host_connected; 592 } 593 594 static bool will_write_block(struct port *port) 595 { 596 bool ret; 597 598 if (!port->guest_connected) { 599 /* Port got hot-unplugged. Let's exit. */ 600 return false; 601 } 602 if (!port->host_connected) 603 return true; 604 605 spin_lock_irq(&port->outvq_lock); 606 /* 607 * Check if the Host has consumed any buffers since we last 608 * sent data (this is only applicable for nonblocking ports). 609 */ 610 reclaim_consumed_buffers(port); 611 ret = port->outvq_full; 612 spin_unlock_irq(&port->outvq_lock); 613 614 return ret; 615 } 616 617 static ssize_t port_fops_read(struct file *filp, char __user *ubuf, 618 size_t count, loff_t *offp) 619 { 620 struct port *port; 621 ssize_t ret; 622 623 port = filp->private_data; 624 625 if (!port_has_data(port)) { 626 /* 627 * If nothing's connected on the host just return 0 in 628 * case of list_empty; this tells the userspace app 629 * that there's no connection 630 */ 631 if (!port->host_connected) 632 return 0; 633 if (filp->f_flags & O_NONBLOCK) 634 return -EAGAIN; 635 636 ret = wait_event_interruptible(port->waitqueue, 637 !will_read_block(port)); 638 if (ret < 0) 639 return ret; 640 } 641 /* Port got hot-unplugged. */ 642 if (!port->guest_connected) 643 return -ENODEV; 644 /* 645 * We could've received a disconnection message while we were 646 * waiting for more data. 647 * 648 * This check is not clubbed in the if() statement above as we 649 * might receive some data as well as the host could get 650 * disconnected after we got woken up from our wait. So we 651 * really want to give off whatever data we have and only then 652 * check for host_connected. 653 */ 654 if (!port_has_data(port) && !port->host_connected) 655 return 0; 656 657 return fill_readbuf(port, ubuf, count, true); 658 } 659 660 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, 661 size_t count, loff_t *offp) 662 { 663 struct port *port; 664 char *buf; 665 ssize_t ret; 666 bool nonblock; 667 668 /* Userspace could be out to fool us */ 669 if (!count) 670 return 0; 671 672 port = filp->private_data; 673 674 nonblock = filp->f_flags & O_NONBLOCK; 675 676 if (will_write_block(port)) { 677 if (nonblock) 678 return -EAGAIN; 679 680 ret = wait_event_interruptible(port->waitqueue, 681 !will_write_block(port)); 682 if (ret < 0) 683 return ret; 684 } 685 /* Port got hot-unplugged. */ 686 if (!port->guest_connected) 687 return -ENODEV; 688 689 count = min((size_t)(32 * 1024), count); 690 691 buf = kmalloc(count, GFP_KERNEL); 692 if (!buf) 693 return -ENOMEM; 694 695 ret = copy_from_user(buf, ubuf, count); 696 if (ret) { 697 ret = -EFAULT; 698 goto free_buf; 699 } 700 701 /* 702 * We now ask send_buf() to not spin for generic ports -- we 703 * can re-use the same code path that non-blocking file 704 * descriptors take for blocking file descriptors since the 705 * wait is already done and we're certain the write will go 706 * through to the host. 707 */ 708 nonblock = true; 709 ret = send_buf(port, buf, count, nonblock); 710 711 if (nonblock && ret > 0) 712 goto out; 713 714 free_buf: 715 kfree(buf); 716 out: 717 return ret; 718 } 719 720 static unsigned int port_fops_poll(struct file *filp, poll_table *wait) 721 { 722 struct port *port; 723 unsigned int ret; 724 725 port = filp->private_data; 726 poll_wait(filp, &port->waitqueue, wait); 727 728 if (!port->guest_connected) { 729 /* Port got unplugged */ 730 return POLLHUP; 731 } 732 ret = 0; 733 if (!will_read_block(port)) 734 ret |= POLLIN | POLLRDNORM; 735 if (!will_write_block(port)) 736 ret |= POLLOUT; 737 if (!port->host_connected) 738 ret |= POLLHUP; 739 740 return ret; 741 } 742 743 static void remove_port(struct kref *kref); 744 745 static int port_fops_release(struct inode *inode, struct file *filp) 746 { 747 struct port *port; 748 749 port = filp->private_data; 750 751 /* Notify host of port being closed */ 752 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); 753 754 spin_lock_irq(&port->inbuf_lock); 755 port->guest_connected = false; 756 757 discard_port_data(port); 758 759 spin_unlock_irq(&port->inbuf_lock); 760 761 spin_lock_irq(&port->outvq_lock); 762 reclaim_consumed_buffers(port); 763 spin_unlock_irq(&port->outvq_lock); 764 765 /* 766 * Locks aren't necessary here as a port can't be opened after 767 * unplug, and if a port isn't unplugged, a kref would already 768 * exist for the port. Plus, taking ports_lock here would 769 * create a dependency on other locks taken by functions 770 * inside remove_port if we're the last holder of the port, 771 * creating many problems. 772 */ 773 kref_put(&port->kref, remove_port); 774 775 return 0; 776 } 777 778 static int port_fops_open(struct inode *inode, struct file *filp) 779 { 780 struct cdev *cdev = inode->i_cdev; 781 struct port *port; 782 int ret; 783 784 port = find_port_by_devt(cdev->dev); 785 filp->private_data = port; 786 787 /* Prevent against a port getting hot-unplugged at the same time */ 788 spin_lock_irq(&port->portdev->ports_lock); 789 kref_get(&port->kref); 790 spin_unlock_irq(&port->portdev->ports_lock); 791 792 /* 793 * Don't allow opening of console port devices -- that's done 794 * via /dev/hvc 795 */ 796 if (is_console_port(port)) { 797 ret = -ENXIO; 798 goto out; 799 } 800 801 /* Allow only one process to open a particular port at a time */ 802 spin_lock_irq(&port->inbuf_lock); 803 if (port->guest_connected) { 804 spin_unlock_irq(&port->inbuf_lock); 805 ret = -EMFILE; 806 goto out; 807 } 808 809 port->guest_connected = true; 810 spin_unlock_irq(&port->inbuf_lock); 811 812 spin_lock_irq(&port->outvq_lock); 813 /* 814 * There might be a chance that we missed reclaiming a few 815 * buffers in the window of the port getting previously closed 816 * and opening now. 817 */ 818 reclaim_consumed_buffers(port); 819 spin_unlock_irq(&port->outvq_lock); 820 821 nonseekable_open(inode, filp); 822 823 /* Notify host of port being opened */ 824 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); 825 826 return 0; 827 out: 828 kref_put(&port->kref, remove_port); 829 return ret; 830 } 831 832 static int port_fops_fasync(int fd, struct file *filp, int mode) 833 { 834 struct port *port; 835 836 port = filp->private_data; 837 return fasync_helper(fd, filp, mode, &port->async_queue); 838 } 839 840 /* 841 * The file operations that we support: programs in the guest can open 842 * a console device, read from it, write to it, poll for data and 843 * close it. The devices are at 844 * /dev/vport<device number>p<port number> 845 */ 846 static const struct file_operations port_fops = { 847 .owner = THIS_MODULE, 848 .open = port_fops_open, 849 .read = port_fops_read, 850 .write = port_fops_write, 851 .poll = port_fops_poll, 852 .release = port_fops_release, 853 .fasync = port_fops_fasync, 854 .llseek = no_llseek, 855 }; 856 857 /* 858 * The put_chars() callback is pretty straightforward. 859 * 860 * We turn the characters into a scatter-gather list, add it to the 861 * output queue and then kick the Host. Then we sit here waiting for 862 * it to finish: inefficient in theory, but in practice 863 * implementations will do it immediately (lguest's Launcher does). 864 */ 865 static int put_chars(u32 vtermno, const char *buf, int count) 866 { 867 struct port *port; 868 869 if (unlikely(early_put_chars)) 870 return early_put_chars(vtermno, buf, count); 871 872 port = find_port_by_vtermno(vtermno); 873 if (!port) 874 return -EPIPE; 875 876 return send_buf(port, (void *)buf, count, false); 877 } 878 879 /* 880 * get_chars() is the callback from the hvc_console infrastructure 881 * when an interrupt is received. 882 * 883 * We call out to fill_readbuf that gets us the required data from the 884 * buffers that are queued up. 885 */ 886 static int get_chars(u32 vtermno, char *buf, int count) 887 { 888 struct port *port; 889 890 /* If we've not set up the port yet, we have no input to give. */ 891 if (unlikely(early_put_chars)) 892 return 0; 893 894 port = find_port_by_vtermno(vtermno); 895 if (!port) 896 return -EPIPE; 897 898 /* If we don't have an input queue yet, we can't get input. */ 899 BUG_ON(!port->in_vq); 900 901 return fill_readbuf(port, buf, count, false); 902 } 903 904 static void resize_console(struct port *port) 905 { 906 struct virtio_device *vdev; 907 908 /* The port could have been hot-unplugged */ 909 if (!port || !is_console_port(port)) 910 return; 911 912 vdev = port->portdev->vdev; 913 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) 914 hvc_resize(port->cons.hvc, port->cons.ws); 915 } 916 917 /* We set the configuration at this point, since we now have a tty */ 918 static int notifier_add_vio(struct hvc_struct *hp, int data) 919 { 920 struct port *port; 921 922 port = find_port_by_vtermno(hp->vtermno); 923 if (!port) 924 return -EINVAL; 925 926 hp->irq_requested = 1; 927 resize_console(port); 928 929 return 0; 930 } 931 932 static void notifier_del_vio(struct hvc_struct *hp, int data) 933 { 934 hp->irq_requested = 0; 935 } 936 937 /* The operations for console ports. */ 938 static const struct hv_ops hv_ops = { 939 .get_chars = get_chars, 940 .put_chars = put_chars, 941 .notifier_add = notifier_add_vio, 942 .notifier_del = notifier_del_vio, 943 .notifier_hangup = notifier_del_vio, 944 }; 945 946 /* 947 * Console drivers are initialized very early so boot messages can go 948 * out, so we do things slightly differently from the generic virtio 949 * initialization of the net and block drivers. 950 * 951 * At this stage, the console is output-only. It's too early to set 952 * up a virtqueue, so we let the drivers do some boutique early-output 953 * thing. 954 */ 955 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) 956 { 957 early_put_chars = put_chars; 958 return hvc_instantiate(0, 0, &hv_ops); 959 } 960 961 int init_port_console(struct port *port) 962 { 963 int ret; 964 965 /* 966 * The Host's telling us this port is a console port. Hook it 967 * up with an hvc console. 968 * 969 * To set up and manage our virtual console, we call 970 * hvc_alloc(). 971 * 972 * The first argument of hvc_alloc() is the virtual console 973 * number. The second argument is the parameter for the 974 * notification mechanism (like irq number). We currently 975 * leave this as zero, virtqueues have implicit notifications. 976 * 977 * The third argument is a "struct hv_ops" containing the 978 * put_chars() get_chars(), notifier_add() and notifier_del() 979 * pointers. The final argument is the output buffer size: we 980 * can do any size, so we put PAGE_SIZE here. 981 */ 982 port->cons.vtermno = pdrvdata.next_vtermno; 983 984 port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); 985 if (IS_ERR(port->cons.hvc)) { 986 ret = PTR_ERR(port->cons.hvc); 987 dev_err(port->dev, 988 "error %d allocating hvc for port\n", ret); 989 port->cons.hvc = NULL; 990 return ret; 991 } 992 spin_lock_irq(&pdrvdata_lock); 993 pdrvdata.next_vtermno++; 994 list_add_tail(&port->cons.list, &pdrvdata.consoles); 995 spin_unlock_irq(&pdrvdata_lock); 996 port->guest_connected = true; 997 998 /* 999 * Start using the new console output if this is the first 1000 * console to come up. 1001 */ 1002 if (early_put_chars) 1003 early_put_chars = NULL; 1004 1005 /* Notify host of port being opened */ 1006 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); 1007 1008 return 0; 1009 } 1010 1011 static ssize_t show_port_name(struct device *dev, 1012 struct device_attribute *attr, char *buffer) 1013 { 1014 struct port *port; 1015 1016 port = dev_get_drvdata(dev); 1017 1018 return sprintf(buffer, "%s\n", port->name); 1019 } 1020 1021 static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); 1022 1023 static struct attribute *port_sysfs_entries[] = { 1024 &dev_attr_name.attr, 1025 NULL 1026 }; 1027 1028 static struct attribute_group port_attribute_group = { 1029 .name = NULL, /* put in device directory */ 1030 .attrs = port_sysfs_entries, 1031 }; 1032 1033 static int debugfs_open(struct inode *inode, struct file *filp) 1034 { 1035 filp->private_data = inode->i_private; 1036 return 0; 1037 } 1038 1039 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, 1040 size_t count, loff_t *offp) 1041 { 1042 struct port *port; 1043 char *buf; 1044 ssize_t ret, out_offset, out_count; 1045 1046 out_count = 1024; 1047 buf = kmalloc(out_count, GFP_KERNEL); 1048 if (!buf) 1049 return -ENOMEM; 1050 1051 port = filp->private_data; 1052 out_offset = 0; 1053 out_offset += snprintf(buf + out_offset, out_count, 1054 "name: %s\n", port->name ? port->name : ""); 1055 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1056 "guest_connected: %d\n", port->guest_connected); 1057 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1058 "host_connected: %d\n", port->host_connected); 1059 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1060 "outvq_full: %d\n", port->outvq_full); 1061 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1062 "is_console: %s\n", 1063 is_console_port(port) ? "yes" : "no"); 1064 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1065 "console_vtermno: %u\n", port->cons.vtermno); 1066 1067 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 1068 kfree(buf); 1069 return ret; 1070 } 1071 1072 static const struct file_operations port_debugfs_ops = { 1073 .owner = THIS_MODULE, 1074 .open = debugfs_open, 1075 .read = debugfs_read, 1076 }; 1077 1078 static void set_console_size(struct port *port, u16 rows, u16 cols) 1079 { 1080 if (!port || !is_console_port(port)) 1081 return; 1082 1083 port->cons.ws.ws_row = rows; 1084 port->cons.ws.ws_col = cols; 1085 } 1086 1087 static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) 1088 { 1089 struct port_buffer *buf; 1090 unsigned int nr_added_bufs; 1091 int ret; 1092 1093 nr_added_bufs = 0; 1094 do { 1095 buf = alloc_buf(PAGE_SIZE); 1096 if (!buf) 1097 break; 1098 1099 spin_lock_irq(lock); 1100 ret = add_inbuf(vq, buf); 1101 if (ret < 0) { 1102 spin_unlock_irq(lock); 1103 free_buf(buf); 1104 break; 1105 } 1106 nr_added_bufs++; 1107 spin_unlock_irq(lock); 1108 } while (ret > 0); 1109 1110 return nr_added_bufs; 1111 } 1112 1113 static void send_sigio_to_port(struct port *port) 1114 { 1115 if (port->async_queue && port->guest_connected) 1116 kill_fasync(&port->async_queue, SIGIO, POLL_OUT); 1117 } 1118 1119 static int add_port(struct ports_device *portdev, u32 id) 1120 { 1121 char debugfs_name[16]; 1122 struct port *port; 1123 struct port_buffer *buf; 1124 dev_t devt; 1125 unsigned int nr_added_bufs; 1126 int err; 1127 1128 port = kmalloc(sizeof(*port), GFP_KERNEL); 1129 if (!port) { 1130 err = -ENOMEM; 1131 goto fail; 1132 } 1133 kref_init(&port->kref); 1134 1135 port->portdev = portdev; 1136 port->id = id; 1137 1138 port->name = NULL; 1139 port->inbuf = NULL; 1140 port->cons.hvc = NULL; 1141 port->async_queue = NULL; 1142 1143 port->cons.ws.ws_row = port->cons.ws.ws_col = 0; 1144 1145 port->host_connected = port->guest_connected = false; 1146 1147 port->outvq_full = false; 1148 1149 port->in_vq = portdev->in_vqs[port->id]; 1150 port->out_vq = portdev->out_vqs[port->id]; 1151 1152 port->cdev = cdev_alloc(); 1153 if (!port->cdev) { 1154 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); 1155 err = -ENOMEM; 1156 goto free_port; 1157 } 1158 port->cdev->ops = &port_fops; 1159 1160 devt = MKDEV(portdev->chr_major, id); 1161 err = cdev_add(port->cdev, devt, 1); 1162 if (err < 0) { 1163 dev_err(&port->portdev->vdev->dev, 1164 "Error %d adding cdev for port %u\n", err, id); 1165 goto free_cdev; 1166 } 1167 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, 1168 devt, port, "vport%up%u", 1169 port->portdev->drv_index, id); 1170 if (IS_ERR(port->dev)) { 1171 err = PTR_ERR(port->dev); 1172 dev_err(&port->portdev->vdev->dev, 1173 "Error %d creating device for port %u\n", 1174 err, id); 1175 goto free_cdev; 1176 } 1177 1178 spin_lock_init(&port->inbuf_lock); 1179 spin_lock_init(&port->outvq_lock); 1180 init_waitqueue_head(&port->waitqueue); 1181 1182 /* Fill the in_vq with buffers so the host can send us data. */ 1183 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); 1184 if (!nr_added_bufs) { 1185 dev_err(port->dev, "Error allocating inbufs\n"); 1186 err = -ENOMEM; 1187 goto free_device; 1188 } 1189 1190 /* 1191 * If we're not using multiport support, this has to be a console port 1192 */ 1193 if (!use_multiport(port->portdev)) { 1194 err = init_port_console(port); 1195 if (err) 1196 goto free_inbufs; 1197 } 1198 1199 spin_lock_irq(&portdev->ports_lock); 1200 list_add_tail(&port->list, &port->portdev->ports); 1201 spin_unlock_irq(&portdev->ports_lock); 1202 1203 /* 1204 * Tell the Host we're set so that it can send us various 1205 * configuration parameters for this port (eg, port name, 1206 * caching, whether this is a console port, etc.) 1207 */ 1208 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1209 1210 if (pdrvdata.debugfs_dir) { 1211 /* 1212 * Finally, create the debugfs file that we can use to 1213 * inspect a port's state at any time 1214 */ 1215 sprintf(debugfs_name, "vport%up%u", 1216 port->portdev->drv_index, id); 1217 port->debugfs_file = debugfs_create_file(debugfs_name, 0444, 1218 pdrvdata.debugfs_dir, 1219 port, 1220 &port_debugfs_ops); 1221 } 1222 return 0; 1223 1224 free_inbufs: 1225 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1226 free_buf(buf); 1227 free_device: 1228 device_destroy(pdrvdata.class, port->dev->devt); 1229 free_cdev: 1230 cdev_del(port->cdev); 1231 free_port: 1232 kfree(port); 1233 fail: 1234 /* The host might want to notify management sw about port add failure */ 1235 __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); 1236 return err; 1237 } 1238 1239 /* No users remain, remove all port-specific data. */ 1240 static void remove_port(struct kref *kref) 1241 { 1242 struct port *port; 1243 1244 port = container_of(kref, struct port, kref); 1245 1246 sysfs_remove_group(&port->dev->kobj, &port_attribute_group); 1247 device_destroy(pdrvdata.class, port->dev->devt); 1248 cdev_del(port->cdev); 1249 1250 kfree(port->name); 1251 1252 debugfs_remove(port->debugfs_file); 1253 1254 kfree(port); 1255 } 1256 1257 /* 1258 * Port got unplugged. Remove port from portdev's list and drop the 1259 * kref reference. If no userspace has this port opened, it will 1260 * result in immediate removal the port. 1261 */ 1262 static void unplug_port(struct port *port) 1263 { 1264 struct port_buffer *buf; 1265 1266 spin_lock_irq(&port->portdev->ports_lock); 1267 list_del(&port->list); 1268 spin_unlock_irq(&port->portdev->ports_lock); 1269 1270 if (port->guest_connected) { 1271 port->guest_connected = false; 1272 port->host_connected = false; 1273 wake_up_interruptible(&port->waitqueue); 1274 1275 /* Let the app know the port is going down. */ 1276 send_sigio_to_port(port); 1277 } 1278 1279 if (is_console_port(port)) { 1280 spin_lock_irq(&pdrvdata_lock); 1281 list_del(&port->cons.list); 1282 spin_unlock_irq(&pdrvdata_lock); 1283 #if 0 1284 /* 1285 * hvc_remove() not called as removing one hvc port 1286 * results in other hvc ports getting frozen. 1287 * 1288 * Once this is resolved in hvc, this functionality 1289 * will be enabled. Till that is done, the -EPIPE 1290 * return from get_chars() above will help 1291 * hvc_console.c to clean up on ports we remove here. 1292 */ 1293 hvc_remove(port->cons.hvc); 1294 #endif 1295 } 1296 1297 /* Remove unused data this port might have received. */ 1298 discard_port_data(port); 1299 1300 reclaim_consumed_buffers(port); 1301 1302 /* Remove buffers we queued up for the Host to send us data in. */ 1303 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1304 free_buf(buf); 1305 1306 /* 1307 * We should just assume the device itself has gone off -- 1308 * else a close on an open port later will try to send out a 1309 * control message. 1310 */ 1311 port->portdev = NULL; 1312 1313 /* 1314 * Locks around here are not necessary - a port can't be 1315 * opened after we removed the port struct from ports_list 1316 * above. 1317 */ 1318 kref_put(&port->kref, remove_port); 1319 } 1320 1321 /* Any private messages that the Host and Guest want to share */ 1322 static void handle_control_message(struct ports_device *portdev, 1323 struct port_buffer *buf) 1324 { 1325 struct virtio_console_control *cpkt; 1326 struct port *port; 1327 size_t name_size; 1328 int err; 1329 1330 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); 1331 1332 port = find_port_by_id(portdev, cpkt->id); 1333 if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { 1334 /* No valid header at start of buffer. Drop it. */ 1335 dev_dbg(&portdev->vdev->dev, 1336 "Invalid index %u in control packet\n", cpkt->id); 1337 return; 1338 } 1339 1340 switch (cpkt->event) { 1341 case VIRTIO_CONSOLE_PORT_ADD: 1342 if (port) { 1343 dev_dbg(&portdev->vdev->dev, 1344 "Port %u already added\n", port->id); 1345 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1346 break; 1347 } 1348 if (cpkt->id >= portdev->config.max_nr_ports) { 1349 dev_warn(&portdev->vdev->dev, 1350 "Request for adding port with out-of-bound id %u, max. supported id: %u\n", 1351 cpkt->id, portdev->config.max_nr_ports - 1); 1352 break; 1353 } 1354 add_port(portdev, cpkt->id); 1355 break; 1356 case VIRTIO_CONSOLE_PORT_REMOVE: 1357 unplug_port(port); 1358 break; 1359 case VIRTIO_CONSOLE_CONSOLE_PORT: 1360 if (!cpkt->value) 1361 break; 1362 if (is_console_port(port)) 1363 break; 1364 1365 init_port_console(port); 1366 /* 1367 * Could remove the port here in case init fails - but 1368 * have to notify the host first. 1369 */ 1370 break; 1371 case VIRTIO_CONSOLE_RESIZE: { 1372 struct { 1373 __u16 rows; 1374 __u16 cols; 1375 } size; 1376 1377 if (!is_console_port(port)) 1378 break; 1379 1380 memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), 1381 sizeof(size)); 1382 set_console_size(port, size.rows, size.cols); 1383 1384 port->cons.hvc->irq_requested = 1; 1385 resize_console(port); 1386 break; 1387 } 1388 case VIRTIO_CONSOLE_PORT_OPEN: 1389 port->host_connected = cpkt->value; 1390 wake_up_interruptible(&port->waitqueue); 1391 /* 1392 * If the host port got closed and the host had any 1393 * unconsumed buffers, we'll be able to reclaim them 1394 * now. 1395 */ 1396 spin_lock_irq(&port->outvq_lock); 1397 reclaim_consumed_buffers(port); 1398 spin_unlock_irq(&port->outvq_lock); 1399 1400 /* 1401 * If the guest is connected, it'll be interested in 1402 * knowing the host connection state changed. 1403 */ 1404 send_sigio_to_port(port); 1405 break; 1406 case VIRTIO_CONSOLE_PORT_NAME: 1407 /* 1408 * Skip the size of the header and the cpkt to get the size 1409 * of the name that was sent 1410 */ 1411 name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; 1412 1413 port->name = kmalloc(name_size, GFP_KERNEL); 1414 if (!port->name) { 1415 dev_err(port->dev, 1416 "Not enough space to store port name\n"); 1417 break; 1418 } 1419 strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), 1420 name_size - 1); 1421 port->name[name_size - 1] = 0; 1422 1423 /* 1424 * Since we only have one sysfs attribute, 'name', 1425 * create it only if we have a name for the port. 1426 */ 1427 err = sysfs_create_group(&port->dev->kobj, 1428 &port_attribute_group); 1429 if (err) { 1430 dev_err(port->dev, 1431 "Error %d creating sysfs device attributes\n", 1432 err); 1433 } else { 1434 /* 1435 * Generate a udev event so that appropriate 1436 * symlinks can be created based on udev 1437 * rules. 1438 */ 1439 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); 1440 } 1441 break; 1442 } 1443 } 1444 1445 static void control_work_handler(struct work_struct *work) 1446 { 1447 struct ports_device *portdev; 1448 struct virtqueue *vq; 1449 struct port_buffer *buf; 1450 unsigned int len; 1451 1452 portdev = container_of(work, struct ports_device, control_work); 1453 vq = portdev->c_ivq; 1454 1455 spin_lock(&portdev->cvq_lock); 1456 while ((buf = virtqueue_get_buf(vq, &len))) { 1457 spin_unlock(&portdev->cvq_lock); 1458 1459 buf->len = len; 1460 buf->offset = 0; 1461 1462 handle_control_message(portdev, buf); 1463 1464 spin_lock(&portdev->cvq_lock); 1465 if (add_inbuf(portdev->c_ivq, buf) < 0) { 1466 dev_warn(&portdev->vdev->dev, 1467 "Error adding buffer to queue\n"); 1468 free_buf(buf); 1469 } 1470 } 1471 spin_unlock(&portdev->cvq_lock); 1472 } 1473 1474 static void out_intr(struct virtqueue *vq) 1475 { 1476 struct port *port; 1477 1478 port = find_port_by_vq(vq->vdev->priv, vq); 1479 if (!port) 1480 return; 1481 1482 wake_up_interruptible(&port->waitqueue); 1483 } 1484 1485 static void in_intr(struct virtqueue *vq) 1486 { 1487 struct port *port; 1488 unsigned long flags; 1489 1490 port = find_port_by_vq(vq->vdev->priv, vq); 1491 if (!port) 1492 return; 1493 1494 spin_lock_irqsave(&port->inbuf_lock, flags); 1495 if (!port->inbuf) 1496 port->inbuf = get_inbuf(port); 1497 1498 /* 1499 * Don't queue up data when port is closed. This condition 1500 * can be reached when a console port is not yet connected (no 1501 * tty is spawned) and the host sends out data to console 1502 * ports. For generic serial ports, the host won't 1503 * (shouldn't) send data till the guest is connected. 1504 */ 1505 if (!port->guest_connected) 1506 discard_port_data(port); 1507 1508 spin_unlock_irqrestore(&port->inbuf_lock, flags); 1509 1510 wake_up_interruptible(&port->waitqueue); 1511 1512 /* Send a SIGIO indicating new data in case the process asked for it */ 1513 send_sigio_to_port(port); 1514 1515 if (is_console_port(port) && hvc_poll(port->cons.hvc)) 1516 hvc_kick(); 1517 } 1518 1519 static void control_intr(struct virtqueue *vq) 1520 { 1521 struct ports_device *portdev; 1522 1523 portdev = vq->vdev->priv; 1524 schedule_work(&portdev->control_work); 1525 } 1526 1527 static void config_intr(struct virtio_device *vdev) 1528 { 1529 struct ports_device *portdev; 1530 1531 portdev = vdev->priv; 1532 1533 if (!use_multiport(portdev)) { 1534 struct port *port; 1535 u16 rows, cols; 1536 1537 vdev->config->get(vdev, 1538 offsetof(struct virtio_console_config, cols), 1539 &cols, sizeof(u16)); 1540 vdev->config->get(vdev, 1541 offsetof(struct virtio_console_config, rows), 1542 &rows, sizeof(u16)); 1543 1544 port = find_port_by_id(portdev, 0); 1545 set_console_size(port, rows, cols); 1546 1547 /* 1548 * We'll use this way of resizing only for legacy 1549 * support. For newer userspace 1550 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages 1551 * to indicate console size changes so that it can be 1552 * done per-port. 1553 */ 1554 resize_console(port); 1555 } 1556 } 1557 1558 static int init_vqs(struct ports_device *portdev) 1559 { 1560 vq_callback_t **io_callbacks; 1561 char **io_names; 1562 struct virtqueue **vqs; 1563 u32 i, j, nr_ports, nr_queues; 1564 int err; 1565 1566 nr_ports = portdev->config.max_nr_ports; 1567 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; 1568 1569 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); 1570 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); 1571 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); 1572 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1573 GFP_KERNEL); 1574 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1575 GFP_KERNEL); 1576 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || 1577 !portdev->out_vqs) { 1578 err = -ENOMEM; 1579 goto free; 1580 } 1581 1582 /* 1583 * For backward compat (newer host but older guest), the host 1584 * spawns a console port first and also inits the vqs for port 1585 * 0 before others. 1586 */ 1587 j = 0; 1588 io_callbacks[j] = in_intr; 1589 io_callbacks[j + 1] = out_intr; 1590 io_names[j] = "input"; 1591 io_names[j + 1] = "output"; 1592 j += 2; 1593 1594 if (use_multiport(portdev)) { 1595 io_callbacks[j] = control_intr; 1596 io_callbacks[j + 1] = NULL; 1597 io_names[j] = "control-i"; 1598 io_names[j + 1] = "control-o"; 1599 1600 for (i = 1; i < nr_ports; i++) { 1601 j += 2; 1602 io_callbacks[j] = in_intr; 1603 io_callbacks[j + 1] = out_intr; 1604 io_names[j] = "input"; 1605 io_names[j + 1] = "output"; 1606 } 1607 } 1608 /* Find the queues. */ 1609 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, 1610 io_callbacks, 1611 (const char **)io_names); 1612 if (err) 1613 goto free; 1614 1615 j = 0; 1616 portdev->in_vqs[0] = vqs[0]; 1617 portdev->out_vqs[0] = vqs[1]; 1618 j += 2; 1619 if (use_multiport(portdev)) { 1620 portdev->c_ivq = vqs[j]; 1621 portdev->c_ovq = vqs[j + 1]; 1622 1623 for (i = 1; i < nr_ports; i++) { 1624 j += 2; 1625 portdev->in_vqs[i] = vqs[j]; 1626 portdev->out_vqs[i] = vqs[j + 1]; 1627 } 1628 } 1629 kfree(io_names); 1630 kfree(io_callbacks); 1631 kfree(vqs); 1632 1633 return 0; 1634 1635 free: 1636 kfree(portdev->out_vqs); 1637 kfree(portdev->in_vqs); 1638 kfree(io_names); 1639 kfree(io_callbacks); 1640 kfree(vqs); 1641 1642 return err; 1643 } 1644 1645 static const struct file_operations portdev_fops = { 1646 .owner = THIS_MODULE, 1647 }; 1648 1649 /* 1650 * Once we're further in boot, we get probed like any other virtio 1651 * device. 1652 * 1653 * If the host also supports multiple console ports, we check the 1654 * config space to see how many ports the host has spawned. We 1655 * initialize each port found. 1656 */ 1657 static int __devinit virtcons_probe(struct virtio_device *vdev) 1658 { 1659 struct ports_device *portdev; 1660 int err; 1661 bool multiport; 1662 1663 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); 1664 if (!portdev) { 1665 err = -ENOMEM; 1666 goto fail; 1667 } 1668 1669 /* Attach this portdev to this virtio_device, and vice-versa. */ 1670 portdev->vdev = vdev; 1671 vdev->priv = portdev; 1672 1673 spin_lock_irq(&pdrvdata_lock); 1674 portdev->drv_index = pdrvdata.index++; 1675 spin_unlock_irq(&pdrvdata_lock); 1676 1677 portdev->chr_major = register_chrdev(0, "virtio-portsdev", 1678 &portdev_fops); 1679 if (portdev->chr_major < 0) { 1680 dev_err(&vdev->dev, 1681 "Error %d registering chrdev for device %u\n", 1682 portdev->chr_major, portdev->drv_index); 1683 err = portdev->chr_major; 1684 goto free; 1685 } 1686 1687 multiport = false; 1688 portdev->config.max_nr_ports = 1; 1689 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { 1690 multiport = true; 1691 vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; 1692 1693 vdev->config->get(vdev, offsetof(struct virtio_console_config, 1694 max_nr_ports), 1695 &portdev->config.max_nr_ports, 1696 sizeof(portdev->config.max_nr_ports)); 1697 } 1698 1699 /* Let the Host know we support multiple ports.*/ 1700 vdev->config->finalize_features(vdev); 1701 1702 err = init_vqs(portdev); 1703 if (err < 0) { 1704 dev_err(&vdev->dev, "Error %d initializing vqs\n", err); 1705 goto free_chrdev; 1706 } 1707 1708 spin_lock_init(&portdev->ports_lock); 1709 INIT_LIST_HEAD(&portdev->ports); 1710 1711 if (multiport) { 1712 unsigned int nr_added_bufs; 1713 1714 spin_lock_init(&portdev->cvq_lock); 1715 INIT_WORK(&portdev->control_work, &control_work_handler); 1716 1717 nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); 1718 if (!nr_added_bufs) { 1719 dev_err(&vdev->dev, 1720 "Error allocating buffers for control queue\n"); 1721 err = -ENOMEM; 1722 goto free_vqs; 1723 } 1724 } else { 1725 /* 1726 * For backward compatibility: Create a console port 1727 * if we're running on older host. 1728 */ 1729 add_port(portdev, 0); 1730 } 1731 1732 spin_lock_irq(&pdrvdata_lock); 1733 list_add_tail(&portdev->list, &pdrvdata.portdevs); 1734 spin_unlock_irq(&pdrvdata_lock); 1735 1736 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1737 VIRTIO_CONSOLE_DEVICE_READY, 1); 1738 return 0; 1739 1740 free_vqs: 1741 /* The host might want to notify mgmt sw about device add failure */ 1742 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1743 VIRTIO_CONSOLE_DEVICE_READY, 0); 1744 vdev->config->del_vqs(vdev); 1745 kfree(portdev->in_vqs); 1746 kfree(portdev->out_vqs); 1747 free_chrdev: 1748 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1749 free: 1750 kfree(portdev); 1751 fail: 1752 return err; 1753 } 1754 1755 static void virtcons_remove(struct virtio_device *vdev) 1756 { 1757 struct ports_device *portdev; 1758 struct port *port, *port2; 1759 1760 portdev = vdev->priv; 1761 1762 spin_lock_irq(&pdrvdata_lock); 1763 list_del(&portdev->list); 1764 spin_unlock_irq(&pdrvdata_lock); 1765 1766 /* Disable interrupts for vqs */ 1767 vdev->config->reset(vdev); 1768 /* Finish up work that's lined up */ 1769 cancel_work_sync(&portdev->control_work); 1770 1771 list_for_each_entry_safe(port, port2, &portdev->ports, list) 1772 unplug_port(port); 1773 1774 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1775 1776 /* 1777 * When yanking out a device, we immediately lose the 1778 * (device-side) queues. So there's no point in keeping the 1779 * guest side around till we drop our final reference. This 1780 * also means that any ports which are in an open state will 1781 * have to just stop using the port, as the vqs are going 1782 * away. 1783 */ 1784 if (use_multiport(portdev)) { 1785 struct port_buffer *buf; 1786 unsigned int len; 1787 1788 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1789 free_buf(buf); 1790 1791 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 1792 free_buf(buf); 1793 } 1794 1795 vdev->config->del_vqs(vdev); 1796 kfree(portdev->in_vqs); 1797 kfree(portdev->out_vqs); 1798 1799 kfree(portdev); 1800 } 1801 1802 static struct virtio_device_id id_table[] = { 1803 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, 1804 { 0 }, 1805 }; 1806 1807 static unsigned int features[] = { 1808 VIRTIO_CONSOLE_F_SIZE, 1809 VIRTIO_CONSOLE_F_MULTIPORT, 1810 }; 1811 1812 static struct virtio_driver virtio_console = { 1813 .feature_table = features, 1814 .feature_table_size = ARRAY_SIZE(features), 1815 .driver.name = KBUILD_MODNAME, 1816 .driver.owner = THIS_MODULE, 1817 .id_table = id_table, 1818 .probe = virtcons_probe, 1819 .remove = virtcons_remove, 1820 .config_changed = config_intr, 1821 }; 1822 1823 static int __init init(void) 1824 { 1825 int err; 1826 1827 pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); 1828 if (IS_ERR(pdrvdata.class)) { 1829 err = PTR_ERR(pdrvdata.class); 1830 pr_err("Error %d creating virtio-ports class\n", err); 1831 return err; 1832 } 1833 1834 pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); 1835 if (!pdrvdata.debugfs_dir) { 1836 pr_warning("Error %ld creating debugfs dir for virtio-ports\n", 1837 PTR_ERR(pdrvdata.debugfs_dir)); 1838 } 1839 INIT_LIST_HEAD(&pdrvdata.consoles); 1840 INIT_LIST_HEAD(&pdrvdata.portdevs); 1841 1842 return register_virtio_driver(&virtio_console); 1843 } 1844 1845 static void __exit fini(void) 1846 { 1847 unregister_virtio_driver(&virtio_console); 1848 1849 class_destroy(pdrvdata.class); 1850 if (pdrvdata.debugfs_dir) 1851 debugfs_remove_recursive(pdrvdata.debugfs_dir); 1852 } 1853 module_init(init); 1854 module_exit(fini); 1855 1856 MODULE_DEVICE_TABLE(virtio, id_table); 1857 MODULE_DESCRIPTION("Virtio console driver"); 1858 MODULE_LICENSE("GPL"); 1859