1 /* 2 * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation 3 * Copyright (C) 2009, 2010 Red Hat, Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 #include <linux/cdev.h> 20 #include <linux/debugfs.h> 21 #include <linux/device.h> 22 #include <linux/err.h> 23 #include <linux/fs.h> 24 #include <linux/init.h> 25 #include <linux/list.h> 26 #include <linux/poll.h> 27 #include <linux/sched.h> 28 #include <linux/slab.h> 29 #include <linux/spinlock.h> 30 #include <linux/virtio.h> 31 #include <linux/virtio_console.h> 32 #include <linux/wait.h> 33 #include <linux/workqueue.h> 34 #include "hvc_console.h" 35 36 /* 37 * This is a global struct for storing common data for all the devices 38 * this driver handles. 39 * 40 * Mainly, it has a linked list for all the consoles in one place so 41 * that callbacks from hvc for get_chars(), put_chars() work properly 42 * across multiple devices and multiple ports per device. 43 */ 44 struct ports_driver_data { 45 /* Used for registering chardevs */ 46 struct class *class; 47 48 /* Used for exporting per-port information to debugfs */ 49 struct dentry *debugfs_dir; 50 51 /* List of all the devices we're handling */ 52 struct list_head portdevs; 53 54 /* Number of devices this driver is handling */ 55 unsigned int index; 56 57 /* 58 * This is used to keep track of the number of hvc consoles 59 * spawned by this driver. This number is given as the first 60 * argument to hvc_alloc(). To correctly map an initial 61 * console spawned via hvc_instantiate to the console being 62 * hooked up via hvc_alloc, we need to pass the same vtermno. 63 * 64 * We also just assume the first console being initialised was 65 * the first one that got used as the initial console. 66 */ 67 unsigned int next_vtermno; 68 69 /* All the console devices handled by this driver */ 70 struct list_head consoles; 71 }; 72 static struct ports_driver_data pdrvdata; 73 74 DEFINE_SPINLOCK(pdrvdata_lock); 75 76 /* This struct holds information that's relevant only for console ports */ 77 struct console { 78 /* We'll place all consoles in a list in the pdrvdata struct */ 79 struct list_head list; 80 81 /* The hvc device associated with this console port */ 82 struct hvc_struct *hvc; 83 84 /* The size of the console */ 85 struct winsize ws; 86 87 /* 88 * This number identifies the number that we used to register 89 * with hvc in hvc_instantiate() and hvc_alloc(); this is the 90 * number passed on by the hvc callbacks to us to 91 * differentiate between the other console ports handled by 92 * this driver 93 */ 94 u32 vtermno; 95 }; 96 97 struct port_buffer { 98 char *buf; 99 100 /* size of the buffer in *buf above */ 101 size_t size; 102 103 /* used length of the buffer */ 104 size_t len; 105 /* offset in the buf from which to consume data */ 106 size_t offset; 107 }; 108 109 /* 110 * This is a per-device struct that stores data common to all the 111 * ports for that device (vdev->priv). 112 */ 113 struct ports_device { 114 /* Next portdev in the list, head is in the pdrvdata struct */ 115 struct list_head list; 116 117 /* 118 * Workqueue handlers where we process deferred work after 119 * notification 120 */ 121 struct work_struct control_work; 122 123 struct list_head ports; 124 125 /* To protect the list of ports */ 126 spinlock_t ports_lock; 127 128 /* To protect the vq operations for the control channel */ 129 spinlock_t cvq_lock; 130 131 /* The current config space is stored here */ 132 struct virtio_console_config config; 133 134 /* The virtio device we're associated with */ 135 struct virtio_device *vdev; 136 137 /* 138 * A couple of virtqueues for the control channel: one for 139 * guest->host transfers, one for host->guest transfers 140 */ 141 struct virtqueue *c_ivq, *c_ovq; 142 143 /* Array of per-port IO virtqueues */ 144 struct virtqueue **in_vqs, **out_vqs; 145 146 /* Used for numbering devices for sysfs and debugfs */ 147 unsigned int drv_index; 148 149 /* Major number for this device. Ports will be created as minors. */ 150 int chr_major; 151 }; 152 153 /* This struct holds the per-port data */ 154 struct port { 155 /* Next port in the list, head is in the ports_device */ 156 struct list_head list; 157 158 /* Pointer to the parent virtio_console device */ 159 struct ports_device *portdev; 160 161 /* The current buffer from which data has to be fed to readers */ 162 struct port_buffer *inbuf; 163 164 /* 165 * To protect the operations on the in_vq associated with this 166 * port. Has to be a spinlock because it can be called from 167 * interrupt context (get_char()). 168 */ 169 spinlock_t inbuf_lock; 170 171 /* Protect the operations on the out_vq. */ 172 spinlock_t outvq_lock; 173 174 /* The IO vqs for this port */ 175 struct virtqueue *in_vq, *out_vq; 176 177 /* File in the debugfs directory that exposes this port's information */ 178 struct dentry *debugfs_file; 179 180 /* 181 * The entries in this struct will be valid if this port is 182 * hooked up to an hvc console 183 */ 184 struct console cons; 185 186 /* Each port associates with a separate char device */ 187 struct cdev *cdev; 188 struct device *dev; 189 190 /* Reference-counting to handle port hot-unplugs and file operations */ 191 struct kref kref; 192 193 /* A waitqueue for poll() or blocking read operations */ 194 wait_queue_head_t waitqueue; 195 196 /* The 'name' of the port that we expose via sysfs properties */ 197 char *name; 198 199 /* We can notify apps of host connect / disconnect events via SIGIO */ 200 struct fasync_struct *async_queue; 201 202 /* The 'id' to identify the port with the Host */ 203 u32 id; 204 205 bool outvq_full; 206 207 /* Is the host device open */ 208 bool host_connected; 209 210 /* We should allow only one process to open a port */ 211 bool guest_connected; 212 }; 213 214 /* This is the very early arch-specified put chars function. */ 215 static int (*early_put_chars)(u32, const char *, int); 216 217 static struct port *find_port_by_vtermno(u32 vtermno) 218 { 219 struct port *port; 220 struct console *cons; 221 unsigned long flags; 222 223 spin_lock_irqsave(&pdrvdata_lock, flags); 224 list_for_each_entry(cons, &pdrvdata.consoles, list) { 225 if (cons->vtermno == vtermno) { 226 port = container_of(cons, struct port, cons); 227 goto out; 228 } 229 } 230 port = NULL; 231 out: 232 spin_unlock_irqrestore(&pdrvdata_lock, flags); 233 return port; 234 } 235 236 static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, 237 dev_t dev) 238 { 239 struct port *port; 240 unsigned long flags; 241 242 spin_lock_irqsave(&portdev->ports_lock, flags); 243 list_for_each_entry(port, &portdev->ports, list) 244 if (port->cdev->dev == dev) 245 goto out; 246 port = NULL; 247 out: 248 spin_unlock_irqrestore(&portdev->ports_lock, flags); 249 250 return port; 251 } 252 253 static struct port *find_port_by_devt(dev_t dev) 254 { 255 struct ports_device *portdev; 256 struct port *port; 257 unsigned long flags; 258 259 spin_lock_irqsave(&pdrvdata_lock, flags); 260 list_for_each_entry(portdev, &pdrvdata.portdevs, list) { 261 port = find_port_by_devt_in_portdev(portdev, dev); 262 if (port) 263 goto out; 264 } 265 port = NULL; 266 out: 267 spin_unlock_irqrestore(&pdrvdata_lock, flags); 268 return port; 269 } 270 271 static struct port *find_port_by_id(struct ports_device *portdev, u32 id) 272 { 273 struct port *port; 274 unsigned long flags; 275 276 spin_lock_irqsave(&portdev->ports_lock, flags); 277 list_for_each_entry(port, &portdev->ports, list) 278 if (port->id == id) 279 goto out; 280 port = NULL; 281 out: 282 spin_unlock_irqrestore(&portdev->ports_lock, flags); 283 284 return port; 285 } 286 287 static struct port *find_port_by_vq(struct ports_device *portdev, 288 struct virtqueue *vq) 289 { 290 struct port *port; 291 unsigned long flags; 292 293 spin_lock_irqsave(&portdev->ports_lock, flags); 294 list_for_each_entry(port, &portdev->ports, list) 295 if (port->in_vq == vq || port->out_vq == vq) 296 goto out; 297 port = NULL; 298 out: 299 spin_unlock_irqrestore(&portdev->ports_lock, flags); 300 return port; 301 } 302 303 static bool is_console_port(struct port *port) 304 { 305 if (port->cons.hvc) 306 return true; 307 return false; 308 } 309 310 static inline bool use_multiport(struct ports_device *portdev) 311 { 312 /* 313 * This condition can be true when put_chars is called from 314 * early_init 315 */ 316 if (!portdev->vdev) 317 return 0; 318 return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); 319 } 320 321 static void free_buf(struct port_buffer *buf) 322 { 323 kfree(buf->buf); 324 kfree(buf); 325 } 326 327 static struct port_buffer *alloc_buf(size_t buf_size) 328 { 329 struct port_buffer *buf; 330 331 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 332 if (!buf) 333 goto fail; 334 buf->buf = kzalloc(buf_size, GFP_KERNEL); 335 if (!buf->buf) 336 goto free_buf; 337 buf->len = 0; 338 buf->offset = 0; 339 buf->size = buf_size; 340 return buf; 341 342 free_buf: 343 kfree(buf); 344 fail: 345 return NULL; 346 } 347 348 /* Callers should take appropriate locks */ 349 static void *get_inbuf(struct port *port) 350 { 351 struct port_buffer *buf; 352 struct virtqueue *vq; 353 unsigned int len; 354 355 vq = port->in_vq; 356 buf = virtqueue_get_buf(vq, &len); 357 if (buf) { 358 buf->len = len; 359 buf->offset = 0; 360 } 361 return buf; 362 } 363 364 /* 365 * Create a scatter-gather list representing our input buffer and put 366 * it in the queue. 367 * 368 * Callers should take appropriate locks. 369 */ 370 static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) 371 { 372 struct scatterlist sg[1]; 373 int ret; 374 375 sg_init_one(sg, buf->buf, buf->size); 376 377 ret = virtqueue_add_buf(vq, sg, 0, 1, buf); 378 virtqueue_kick(vq); 379 return ret; 380 } 381 382 /* Discard any unread data this port has. Callers lockers. */ 383 static void discard_port_data(struct port *port) 384 { 385 struct port_buffer *buf; 386 struct virtqueue *vq; 387 unsigned int len; 388 int ret; 389 390 vq = port->in_vq; 391 if (port->inbuf) 392 buf = port->inbuf; 393 else 394 buf = virtqueue_get_buf(vq, &len); 395 396 ret = 0; 397 while (buf) { 398 if (add_inbuf(vq, buf) < 0) { 399 ret++; 400 free_buf(buf); 401 } 402 buf = virtqueue_get_buf(vq, &len); 403 } 404 port->inbuf = NULL; 405 if (ret) 406 dev_warn(port->dev, "Errors adding %d buffers back to vq\n", 407 ret); 408 } 409 410 static bool port_has_data(struct port *port) 411 { 412 unsigned long flags; 413 bool ret; 414 415 spin_lock_irqsave(&port->inbuf_lock, flags); 416 if (port->inbuf) { 417 ret = true; 418 goto out; 419 } 420 port->inbuf = get_inbuf(port); 421 if (port->inbuf) { 422 ret = true; 423 goto out; 424 } 425 ret = false; 426 out: 427 spin_unlock_irqrestore(&port->inbuf_lock, flags); 428 return ret; 429 } 430 431 static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, 432 unsigned int event, unsigned int value) 433 { 434 struct scatterlist sg[1]; 435 struct virtio_console_control cpkt; 436 struct virtqueue *vq; 437 unsigned int len; 438 439 if (!use_multiport(portdev)) 440 return 0; 441 442 cpkt.id = port_id; 443 cpkt.event = event; 444 cpkt.value = value; 445 446 vq = portdev->c_ovq; 447 448 sg_init_one(sg, &cpkt, sizeof(cpkt)); 449 if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) { 450 virtqueue_kick(vq); 451 while (!virtqueue_get_buf(vq, &len)) 452 cpu_relax(); 453 } 454 return 0; 455 } 456 457 static ssize_t send_control_msg(struct port *port, unsigned int event, 458 unsigned int value) 459 { 460 /* Did the port get unplugged before userspace closed it? */ 461 if (port->portdev) 462 return __send_control_msg(port->portdev, port->id, event, value); 463 return 0; 464 } 465 466 /* Callers must take the port->outvq_lock */ 467 static void reclaim_consumed_buffers(struct port *port) 468 { 469 void *buf; 470 unsigned int len; 471 472 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 473 kfree(buf); 474 port->outvq_full = false; 475 } 476 } 477 478 static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, 479 bool nonblock) 480 { 481 struct scatterlist sg[1]; 482 struct virtqueue *out_vq; 483 ssize_t ret; 484 unsigned long flags; 485 unsigned int len; 486 487 out_vq = port->out_vq; 488 489 spin_lock_irqsave(&port->outvq_lock, flags); 490 491 reclaim_consumed_buffers(port); 492 493 sg_init_one(sg, in_buf, in_count); 494 ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf); 495 496 /* Tell Host to go! */ 497 virtqueue_kick(out_vq); 498 499 if (ret < 0) { 500 in_count = 0; 501 goto done; 502 } 503 504 if (ret == 0) 505 port->outvq_full = true; 506 507 if (nonblock) 508 goto done; 509 510 /* 511 * Wait till the host acknowledges it pushed out the data we 512 * sent. This is done for data from the hvc_console; the tty 513 * operations are performed with spinlocks held so we can't 514 * sleep here. An alternative would be to copy the data to a 515 * buffer and relax the spinning requirement. The downside is 516 * we need to kmalloc a GFP_ATOMIC buffer each time the 517 * console driver writes something out. 518 */ 519 while (!virtqueue_get_buf(out_vq, &len)) 520 cpu_relax(); 521 done: 522 spin_unlock_irqrestore(&port->outvq_lock, flags); 523 /* 524 * We're expected to return the amount of data we wrote -- all 525 * of it 526 */ 527 return in_count; 528 } 529 530 /* 531 * Give out the data that's requested from the buffer that we have 532 * queued up. 533 */ 534 static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, 535 bool to_user) 536 { 537 struct port_buffer *buf; 538 unsigned long flags; 539 540 if (!out_count || !port_has_data(port)) 541 return 0; 542 543 buf = port->inbuf; 544 out_count = min(out_count, buf->len - buf->offset); 545 546 if (to_user) { 547 ssize_t ret; 548 549 ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count); 550 if (ret) 551 return -EFAULT; 552 } else { 553 memcpy(out_buf, buf->buf + buf->offset, out_count); 554 } 555 556 buf->offset += out_count; 557 558 if (buf->offset == buf->len) { 559 /* 560 * We're done using all the data in this buffer. 561 * Re-queue so that the Host can send us more data. 562 */ 563 spin_lock_irqsave(&port->inbuf_lock, flags); 564 port->inbuf = NULL; 565 566 if (add_inbuf(port->in_vq, buf) < 0) 567 dev_warn(port->dev, "failed add_buf\n"); 568 569 spin_unlock_irqrestore(&port->inbuf_lock, flags); 570 } 571 /* Return the number of bytes actually copied */ 572 return out_count; 573 } 574 575 /* The condition that must be true for polling to end */ 576 static bool will_read_block(struct port *port) 577 { 578 if (!port->guest_connected) { 579 /* Port got hot-unplugged. Let's exit. */ 580 return false; 581 } 582 return !port_has_data(port) && port->host_connected; 583 } 584 585 static bool will_write_block(struct port *port) 586 { 587 bool ret; 588 589 if (!port->guest_connected) { 590 /* Port got hot-unplugged. Let's exit. */ 591 return false; 592 } 593 if (!port->host_connected) 594 return true; 595 596 spin_lock_irq(&port->outvq_lock); 597 /* 598 * Check if the Host has consumed any buffers since we last 599 * sent data (this is only applicable for nonblocking ports). 600 */ 601 reclaim_consumed_buffers(port); 602 ret = port->outvq_full; 603 spin_unlock_irq(&port->outvq_lock); 604 605 return ret; 606 } 607 608 static ssize_t port_fops_read(struct file *filp, char __user *ubuf, 609 size_t count, loff_t *offp) 610 { 611 struct port *port; 612 ssize_t ret; 613 614 port = filp->private_data; 615 616 if (!port_has_data(port)) { 617 /* 618 * If nothing's connected on the host just return 0 in 619 * case of list_empty; this tells the userspace app 620 * that there's no connection 621 */ 622 if (!port->host_connected) 623 return 0; 624 if (filp->f_flags & O_NONBLOCK) 625 return -EAGAIN; 626 627 ret = wait_event_interruptible(port->waitqueue, 628 !will_read_block(port)); 629 if (ret < 0) 630 return ret; 631 } 632 /* Port got hot-unplugged. */ 633 if (!port->guest_connected) 634 return -ENODEV; 635 /* 636 * We could've received a disconnection message while we were 637 * waiting for more data. 638 * 639 * This check is not clubbed in the if() statement above as we 640 * might receive some data as well as the host could get 641 * disconnected after we got woken up from our wait. So we 642 * really want to give off whatever data we have and only then 643 * check for host_connected. 644 */ 645 if (!port_has_data(port) && !port->host_connected) 646 return 0; 647 648 return fill_readbuf(port, ubuf, count, true); 649 } 650 651 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, 652 size_t count, loff_t *offp) 653 { 654 struct port *port; 655 char *buf; 656 ssize_t ret; 657 bool nonblock; 658 659 /* Userspace could be out to fool us */ 660 if (!count) 661 return 0; 662 663 port = filp->private_data; 664 665 nonblock = filp->f_flags & O_NONBLOCK; 666 667 if (will_write_block(port)) { 668 if (nonblock) 669 return -EAGAIN; 670 671 ret = wait_event_interruptible(port->waitqueue, 672 !will_write_block(port)); 673 if (ret < 0) 674 return ret; 675 } 676 /* Port got hot-unplugged. */ 677 if (!port->guest_connected) 678 return -ENODEV; 679 680 count = min((size_t)(32 * 1024), count); 681 682 buf = kmalloc(count, GFP_KERNEL); 683 if (!buf) 684 return -ENOMEM; 685 686 ret = copy_from_user(buf, ubuf, count); 687 if (ret) { 688 ret = -EFAULT; 689 goto free_buf; 690 } 691 692 /* 693 * We now ask send_buf() to not spin for generic ports -- we 694 * can re-use the same code path that non-blocking file 695 * descriptors take for blocking file descriptors since the 696 * wait is already done and we're certain the write will go 697 * through to the host. 698 */ 699 nonblock = true; 700 ret = send_buf(port, buf, count, nonblock); 701 702 if (nonblock && ret > 0) 703 goto out; 704 705 free_buf: 706 kfree(buf); 707 out: 708 return ret; 709 } 710 711 static unsigned int port_fops_poll(struct file *filp, poll_table *wait) 712 { 713 struct port *port; 714 unsigned int ret; 715 716 port = filp->private_data; 717 poll_wait(filp, &port->waitqueue, wait); 718 719 if (!port->guest_connected) { 720 /* Port got unplugged */ 721 return POLLHUP; 722 } 723 ret = 0; 724 if (!will_read_block(port)) 725 ret |= POLLIN | POLLRDNORM; 726 if (!will_write_block(port)) 727 ret |= POLLOUT; 728 if (!port->host_connected) 729 ret |= POLLHUP; 730 731 return ret; 732 } 733 734 static void remove_port(struct kref *kref); 735 736 static int port_fops_release(struct inode *inode, struct file *filp) 737 { 738 struct port *port; 739 740 port = filp->private_data; 741 742 /* Notify host of port being closed */ 743 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); 744 745 spin_lock_irq(&port->inbuf_lock); 746 port->guest_connected = false; 747 748 discard_port_data(port); 749 750 spin_unlock_irq(&port->inbuf_lock); 751 752 spin_lock_irq(&port->outvq_lock); 753 reclaim_consumed_buffers(port); 754 spin_unlock_irq(&port->outvq_lock); 755 756 /* 757 * Locks aren't necessary here as a port can't be opened after 758 * unplug, and if a port isn't unplugged, a kref would already 759 * exist for the port. Plus, taking ports_lock here would 760 * create a dependency on other locks taken by functions 761 * inside remove_port if we're the last holder of the port, 762 * creating many problems. 763 */ 764 kref_put(&port->kref, remove_port); 765 766 return 0; 767 } 768 769 static int port_fops_open(struct inode *inode, struct file *filp) 770 { 771 struct cdev *cdev = inode->i_cdev; 772 struct port *port; 773 int ret; 774 775 port = find_port_by_devt(cdev->dev); 776 filp->private_data = port; 777 778 /* Prevent against a port getting hot-unplugged at the same time */ 779 spin_lock_irq(&port->portdev->ports_lock); 780 kref_get(&port->kref); 781 spin_unlock_irq(&port->portdev->ports_lock); 782 783 /* 784 * Don't allow opening of console port devices -- that's done 785 * via /dev/hvc 786 */ 787 if (is_console_port(port)) { 788 ret = -ENXIO; 789 goto out; 790 } 791 792 /* Allow only one process to open a particular port at a time */ 793 spin_lock_irq(&port->inbuf_lock); 794 if (port->guest_connected) { 795 spin_unlock_irq(&port->inbuf_lock); 796 ret = -EMFILE; 797 goto out; 798 } 799 800 port->guest_connected = true; 801 spin_unlock_irq(&port->inbuf_lock); 802 803 spin_lock_irq(&port->outvq_lock); 804 /* 805 * There might be a chance that we missed reclaiming a few 806 * buffers in the window of the port getting previously closed 807 * and opening now. 808 */ 809 reclaim_consumed_buffers(port); 810 spin_unlock_irq(&port->outvq_lock); 811 812 nonseekable_open(inode, filp); 813 814 /* Notify host of port being opened */ 815 send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); 816 817 return 0; 818 out: 819 kref_put(&port->kref, remove_port); 820 return ret; 821 } 822 823 static int port_fops_fasync(int fd, struct file *filp, int mode) 824 { 825 struct port *port; 826 827 port = filp->private_data; 828 return fasync_helper(fd, filp, mode, &port->async_queue); 829 } 830 831 /* 832 * The file operations that we support: programs in the guest can open 833 * a console device, read from it, write to it, poll for data and 834 * close it. The devices are at 835 * /dev/vport<device number>p<port number> 836 */ 837 static const struct file_operations port_fops = { 838 .owner = THIS_MODULE, 839 .open = port_fops_open, 840 .read = port_fops_read, 841 .write = port_fops_write, 842 .poll = port_fops_poll, 843 .release = port_fops_release, 844 .fasync = port_fops_fasync, 845 .llseek = no_llseek, 846 }; 847 848 /* 849 * The put_chars() callback is pretty straightforward. 850 * 851 * We turn the characters into a scatter-gather list, add it to the 852 * output queue and then kick the Host. Then we sit here waiting for 853 * it to finish: inefficient in theory, but in practice 854 * implementations will do it immediately (lguest's Launcher does). 855 */ 856 static int put_chars(u32 vtermno, const char *buf, int count) 857 { 858 struct port *port; 859 860 if (unlikely(early_put_chars)) 861 return early_put_chars(vtermno, buf, count); 862 863 port = find_port_by_vtermno(vtermno); 864 if (!port) 865 return -EPIPE; 866 867 return send_buf(port, (void *)buf, count, false); 868 } 869 870 /* 871 * get_chars() is the callback from the hvc_console infrastructure 872 * when an interrupt is received. 873 * 874 * We call out to fill_readbuf that gets us the required data from the 875 * buffers that are queued up. 876 */ 877 static int get_chars(u32 vtermno, char *buf, int count) 878 { 879 struct port *port; 880 881 /* If we've not set up the port yet, we have no input to give. */ 882 if (unlikely(early_put_chars)) 883 return 0; 884 885 port = find_port_by_vtermno(vtermno); 886 if (!port) 887 return -EPIPE; 888 889 /* If we don't have an input queue yet, we can't get input. */ 890 BUG_ON(!port->in_vq); 891 892 return fill_readbuf(port, buf, count, false); 893 } 894 895 static void resize_console(struct port *port) 896 { 897 struct virtio_device *vdev; 898 899 /* The port could have been hot-unplugged */ 900 if (!port || !is_console_port(port)) 901 return; 902 903 vdev = port->portdev->vdev; 904 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) 905 hvc_resize(port->cons.hvc, port->cons.ws); 906 } 907 908 /* We set the configuration at this point, since we now have a tty */ 909 static int notifier_add_vio(struct hvc_struct *hp, int data) 910 { 911 struct port *port; 912 913 port = find_port_by_vtermno(hp->vtermno); 914 if (!port) 915 return -EINVAL; 916 917 hp->irq_requested = 1; 918 resize_console(port); 919 920 return 0; 921 } 922 923 static void notifier_del_vio(struct hvc_struct *hp, int data) 924 { 925 hp->irq_requested = 0; 926 } 927 928 /* The operations for console ports. */ 929 static const struct hv_ops hv_ops = { 930 .get_chars = get_chars, 931 .put_chars = put_chars, 932 .notifier_add = notifier_add_vio, 933 .notifier_del = notifier_del_vio, 934 .notifier_hangup = notifier_del_vio, 935 }; 936 937 /* 938 * Console drivers are initialized very early so boot messages can go 939 * out, so we do things slightly differently from the generic virtio 940 * initialization of the net and block drivers. 941 * 942 * At this stage, the console is output-only. It's too early to set 943 * up a virtqueue, so we let the drivers do some boutique early-output 944 * thing. 945 */ 946 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)) 947 { 948 early_put_chars = put_chars; 949 return hvc_instantiate(0, 0, &hv_ops); 950 } 951 952 int init_port_console(struct port *port) 953 { 954 int ret; 955 956 /* 957 * The Host's telling us this port is a console port. Hook it 958 * up with an hvc console. 959 * 960 * To set up and manage our virtual console, we call 961 * hvc_alloc(). 962 * 963 * The first argument of hvc_alloc() is the virtual console 964 * number. The second argument is the parameter for the 965 * notification mechanism (like irq number). We currently 966 * leave this as zero, virtqueues have implicit notifications. 967 * 968 * The third argument is a "struct hv_ops" containing the 969 * put_chars() get_chars(), notifier_add() and notifier_del() 970 * pointers. The final argument is the output buffer size: we 971 * can do any size, so we put PAGE_SIZE here. 972 */ 973 port->cons.vtermno = pdrvdata.next_vtermno; 974 975 port->cons.hvc = hvc_alloc(port->cons.vtermno, 0, &hv_ops, PAGE_SIZE); 976 if (IS_ERR(port->cons.hvc)) { 977 ret = PTR_ERR(port->cons.hvc); 978 dev_err(port->dev, 979 "error %d allocating hvc for port\n", ret); 980 port->cons.hvc = NULL; 981 return ret; 982 } 983 spin_lock_irq(&pdrvdata_lock); 984 pdrvdata.next_vtermno++; 985 list_add_tail(&port->cons.list, &pdrvdata.consoles); 986 spin_unlock_irq(&pdrvdata_lock); 987 port->guest_connected = true; 988 989 /* 990 * Start using the new console output if this is the first 991 * console to come up. 992 */ 993 if (early_put_chars) 994 early_put_chars = NULL; 995 996 /* Notify host of port being opened */ 997 send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 1); 998 999 return 0; 1000 } 1001 1002 static ssize_t show_port_name(struct device *dev, 1003 struct device_attribute *attr, char *buffer) 1004 { 1005 struct port *port; 1006 1007 port = dev_get_drvdata(dev); 1008 1009 return sprintf(buffer, "%s\n", port->name); 1010 } 1011 1012 static DEVICE_ATTR(name, S_IRUGO, show_port_name, NULL); 1013 1014 static struct attribute *port_sysfs_entries[] = { 1015 &dev_attr_name.attr, 1016 NULL 1017 }; 1018 1019 static struct attribute_group port_attribute_group = { 1020 .name = NULL, /* put in device directory */ 1021 .attrs = port_sysfs_entries, 1022 }; 1023 1024 static int debugfs_open(struct inode *inode, struct file *filp) 1025 { 1026 filp->private_data = inode->i_private; 1027 return 0; 1028 } 1029 1030 static ssize_t debugfs_read(struct file *filp, char __user *ubuf, 1031 size_t count, loff_t *offp) 1032 { 1033 struct port *port; 1034 char *buf; 1035 ssize_t ret, out_offset, out_count; 1036 1037 out_count = 1024; 1038 buf = kmalloc(out_count, GFP_KERNEL); 1039 if (!buf) 1040 return -ENOMEM; 1041 1042 port = filp->private_data; 1043 out_offset = 0; 1044 out_offset += snprintf(buf + out_offset, out_count, 1045 "name: %s\n", port->name ? port->name : ""); 1046 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1047 "guest_connected: %d\n", port->guest_connected); 1048 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1049 "host_connected: %d\n", port->host_connected); 1050 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1051 "outvq_full: %d\n", port->outvq_full); 1052 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1053 "is_console: %s\n", 1054 is_console_port(port) ? "yes" : "no"); 1055 out_offset += snprintf(buf + out_offset, out_count - out_offset, 1056 "console_vtermno: %u\n", port->cons.vtermno); 1057 1058 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 1059 kfree(buf); 1060 return ret; 1061 } 1062 1063 static const struct file_operations port_debugfs_ops = { 1064 .owner = THIS_MODULE, 1065 .open = debugfs_open, 1066 .read = debugfs_read, 1067 }; 1068 1069 static void set_console_size(struct port *port, u16 rows, u16 cols) 1070 { 1071 if (!port || !is_console_port(port)) 1072 return; 1073 1074 port->cons.ws.ws_row = rows; 1075 port->cons.ws.ws_col = cols; 1076 } 1077 1078 static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) 1079 { 1080 struct port_buffer *buf; 1081 unsigned int nr_added_bufs; 1082 int ret; 1083 1084 nr_added_bufs = 0; 1085 do { 1086 buf = alloc_buf(PAGE_SIZE); 1087 if (!buf) 1088 break; 1089 1090 spin_lock_irq(lock); 1091 ret = add_inbuf(vq, buf); 1092 if (ret < 0) { 1093 spin_unlock_irq(lock); 1094 free_buf(buf); 1095 break; 1096 } 1097 nr_added_bufs++; 1098 spin_unlock_irq(lock); 1099 } while (ret > 0); 1100 1101 return nr_added_bufs; 1102 } 1103 1104 static void send_sigio_to_port(struct port *port) 1105 { 1106 if (port->async_queue && port->guest_connected) 1107 kill_fasync(&port->async_queue, SIGIO, POLL_OUT); 1108 } 1109 1110 static int add_port(struct ports_device *portdev, u32 id) 1111 { 1112 char debugfs_name[16]; 1113 struct port *port; 1114 struct port_buffer *buf; 1115 dev_t devt; 1116 unsigned int nr_added_bufs; 1117 int err; 1118 1119 port = kmalloc(sizeof(*port), GFP_KERNEL); 1120 if (!port) { 1121 err = -ENOMEM; 1122 goto fail; 1123 } 1124 kref_init(&port->kref); 1125 1126 port->portdev = portdev; 1127 port->id = id; 1128 1129 port->name = NULL; 1130 port->inbuf = NULL; 1131 port->cons.hvc = NULL; 1132 port->async_queue = NULL; 1133 1134 port->cons.ws.ws_row = port->cons.ws.ws_col = 0; 1135 1136 port->host_connected = port->guest_connected = false; 1137 1138 port->outvq_full = false; 1139 1140 port->in_vq = portdev->in_vqs[port->id]; 1141 port->out_vq = portdev->out_vqs[port->id]; 1142 1143 port->cdev = cdev_alloc(); 1144 if (!port->cdev) { 1145 dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); 1146 err = -ENOMEM; 1147 goto free_port; 1148 } 1149 port->cdev->ops = &port_fops; 1150 1151 devt = MKDEV(portdev->chr_major, id); 1152 err = cdev_add(port->cdev, devt, 1); 1153 if (err < 0) { 1154 dev_err(&port->portdev->vdev->dev, 1155 "Error %d adding cdev for port %u\n", err, id); 1156 goto free_cdev; 1157 } 1158 port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, 1159 devt, port, "vport%up%u", 1160 port->portdev->drv_index, id); 1161 if (IS_ERR(port->dev)) { 1162 err = PTR_ERR(port->dev); 1163 dev_err(&port->portdev->vdev->dev, 1164 "Error %d creating device for port %u\n", 1165 err, id); 1166 goto free_cdev; 1167 } 1168 1169 spin_lock_init(&port->inbuf_lock); 1170 spin_lock_init(&port->outvq_lock); 1171 init_waitqueue_head(&port->waitqueue); 1172 1173 /* Fill the in_vq with buffers so the host can send us data. */ 1174 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); 1175 if (!nr_added_bufs) { 1176 dev_err(port->dev, "Error allocating inbufs\n"); 1177 err = -ENOMEM; 1178 goto free_device; 1179 } 1180 1181 /* 1182 * If we're not using multiport support, this has to be a console port 1183 */ 1184 if (!use_multiport(port->portdev)) { 1185 err = init_port_console(port); 1186 if (err) 1187 goto free_inbufs; 1188 } 1189 1190 spin_lock_irq(&portdev->ports_lock); 1191 list_add_tail(&port->list, &port->portdev->ports); 1192 spin_unlock_irq(&portdev->ports_lock); 1193 1194 /* 1195 * Tell the Host we're set so that it can send us various 1196 * configuration parameters for this port (eg, port name, 1197 * caching, whether this is a console port, etc.) 1198 */ 1199 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1200 1201 if (pdrvdata.debugfs_dir) { 1202 /* 1203 * Finally, create the debugfs file that we can use to 1204 * inspect a port's state at any time 1205 */ 1206 sprintf(debugfs_name, "vport%up%u", 1207 port->portdev->drv_index, id); 1208 port->debugfs_file = debugfs_create_file(debugfs_name, 0444, 1209 pdrvdata.debugfs_dir, 1210 port, 1211 &port_debugfs_ops); 1212 } 1213 return 0; 1214 1215 free_inbufs: 1216 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1217 free_buf(buf); 1218 free_device: 1219 device_destroy(pdrvdata.class, port->dev->devt); 1220 free_cdev: 1221 cdev_del(port->cdev); 1222 free_port: 1223 kfree(port); 1224 fail: 1225 /* The host might want to notify management sw about port add failure */ 1226 __send_control_msg(portdev, id, VIRTIO_CONSOLE_PORT_READY, 0); 1227 return err; 1228 } 1229 1230 /* No users remain, remove all port-specific data. */ 1231 static void remove_port(struct kref *kref) 1232 { 1233 struct port *port; 1234 1235 port = container_of(kref, struct port, kref); 1236 1237 sysfs_remove_group(&port->dev->kobj, &port_attribute_group); 1238 device_destroy(pdrvdata.class, port->dev->devt); 1239 cdev_del(port->cdev); 1240 1241 kfree(port->name); 1242 1243 debugfs_remove(port->debugfs_file); 1244 1245 kfree(port); 1246 } 1247 1248 /* 1249 * Port got unplugged. Remove port from portdev's list and drop the 1250 * kref reference. If no userspace has this port opened, it will 1251 * result in immediate removal the port. 1252 */ 1253 static void unplug_port(struct port *port) 1254 { 1255 struct port_buffer *buf; 1256 1257 spin_lock_irq(&port->portdev->ports_lock); 1258 list_del(&port->list); 1259 spin_unlock_irq(&port->portdev->ports_lock); 1260 1261 if (port->guest_connected) { 1262 port->guest_connected = false; 1263 port->host_connected = false; 1264 wake_up_interruptible(&port->waitqueue); 1265 1266 /* Let the app know the port is going down. */ 1267 send_sigio_to_port(port); 1268 } 1269 1270 if (is_console_port(port)) { 1271 spin_lock_irq(&pdrvdata_lock); 1272 list_del(&port->cons.list); 1273 spin_unlock_irq(&pdrvdata_lock); 1274 #if 0 1275 /* 1276 * hvc_remove() not called as removing one hvc port 1277 * results in other hvc ports getting frozen. 1278 * 1279 * Once this is resolved in hvc, this functionality 1280 * will be enabled. Till that is done, the -EPIPE 1281 * return from get_chars() above will help 1282 * hvc_console.c to clean up on ports we remove here. 1283 */ 1284 hvc_remove(port->cons.hvc); 1285 #endif 1286 } 1287 1288 /* Remove unused data this port might have received. */ 1289 discard_port_data(port); 1290 1291 reclaim_consumed_buffers(port); 1292 1293 /* Remove buffers we queued up for the Host to send us data in. */ 1294 while ((buf = virtqueue_detach_unused_buf(port->in_vq))) 1295 free_buf(buf); 1296 1297 /* 1298 * We should just assume the device itself has gone off -- 1299 * else a close on an open port later will try to send out a 1300 * control message. 1301 */ 1302 port->portdev = NULL; 1303 1304 /* 1305 * Locks around here are not necessary - a port can't be 1306 * opened after we removed the port struct from ports_list 1307 * above. 1308 */ 1309 kref_put(&port->kref, remove_port); 1310 } 1311 1312 /* Any private messages that the Host and Guest want to share */ 1313 static void handle_control_message(struct ports_device *portdev, 1314 struct port_buffer *buf) 1315 { 1316 struct virtio_console_control *cpkt; 1317 struct port *port; 1318 size_t name_size; 1319 int err; 1320 1321 cpkt = (struct virtio_console_control *)(buf->buf + buf->offset); 1322 1323 port = find_port_by_id(portdev, cpkt->id); 1324 if (!port && cpkt->event != VIRTIO_CONSOLE_PORT_ADD) { 1325 /* No valid header at start of buffer. Drop it. */ 1326 dev_dbg(&portdev->vdev->dev, 1327 "Invalid index %u in control packet\n", cpkt->id); 1328 return; 1329 } 1330 1331 switch (cpkt->event) { 1332 case VIRTIO_CONSOLE_PORT_ADD: 1333 if (port) { 1334 dev_dbg(&portdev->vdev->dev, 1335 "Port %u already added\n", port->id); 1336 send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1); 1337 break; 1338 } 1339 if (cpkt->id >= portdev->config.max_nr_ports) { 1340 dev_warn(&portdev->vdev->dev, 1341 "Request for adding port with out-of-bound id %u, max. supported id: %u\n", 1342 cpkt->id, portdev->config.max_nr_ports - 1); 1343 break; 1344 } 1345 add_port(portdev, cpkt->id); 1346 break; 1347 case VIRTIO_CONSOLE_PORT_REMOVE: 1348 unplug_port(port); 1349 break; 1350 case VIRTIO_CONSOLE_CONSOLE_PORT: 1351 if (!cpkt->value) 1352 break; 1353 if (is_console_port(port)) 1354 break; 1355 1356 init_port_console(port); 1357 /* 1358 * Could remove the port here in case init fails - but 1359 * have to notify the host first. 1360 */ 1361 break; 1362 case VIRTIO_CONSOLE_RESIZE: { 1363 struct { 1364 __u16 rows; 1365 __u16 cols; 1366 } size; 1367 1368 if (!is_console_port(port)) 1369 break; 1370 1371 memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), 1372 sizeof(size)); 1373 set_console_size(port, size.rows, size.cols); 1374 1375 port->cons.hvc->irq_requested = 1; 1376 resize_console(port); 1377 break; 1378 } 1379 case VIRTIO_CONSOLE_PORT_OPEN: 1380 port->host_connected = cpkt->value; 1381 wake_up_interruptible(&port->waitqueue); 1382 /* 1383 * If the host port got closed and the host had any 1384 * unconsumed buffers, we'll be able to reclaim them 1385 * now. 1386 */ 1387 spin_lock_irq(&port->outvq_lock); 1388 reclaim_consumed_buffers(port); 1389 spin_unlock_irq(&port->outvq_lock); 1390 1391 /* 1392 * If the guest is connected, it'll be interested in 1393 * knowing the host connection state changed. 1394 */ 1395 send_sigio_to_port(port); 1396 break; 1397 case VIRTIO_CONSOLE_PORT_NAME: 1398 /* 1399 * Skip the size of the header and the cpkt to get the size 1400 * of the name that was sent 1401 */ 1402 name_size = buf->len - buf->offset - sizeof(*cpkt) + 1; 1403 1404 port->name = kmalloc(name_size, GFP_KERNEL); 1405 if (!port->name) { 1406 dev_err(port->dev, 1407 "Not enough space to store port name\n"); 1408 break; 1409 } 1410 strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt), 1411 name_size - 1); 1412 port->name[name_size - 1] = 0; 1413 1414 /* 1415 * Since we only have one sysfs attribute, 'name', 1416 * create it only if we have a name for the port. 1417 */ 1418 err = sysfs_create_group(&port->dev->kobj, 1419 &port_attribute_group); 1420 if (err) { 1421 dev_err(port->dev, 1422 "Error %d creating sysfs device attributes\n", 1423 err); 1424 } else { 1425 /* 1426 * Generate a udev event so that appropriate 1427 * symlinks can be created based on udev 1428 * rules. 1429 */ 1430 kobject_uevent(&port->dev->kobj, KOBJ_CHANGE); 1431 } 1432 break; 1433 } 1434 } 1435 1436 static void control_work_handler(struct work_struct *work) 1437 { 1438 struct ports_device *portdev; 1439 struct virtqueue *vq; 1440 struct port_buffer *buf; 1441 unsigned int len; 1442 1443 portdev = container_of(work, struct ports_device, control_work); 1444 vq = portdev->c_ivq; 1445 1446 spin_lock(&portdev->cvq_lock); 1447 while ((buf = virtqueue_get_buf(vq, &len))) { 1448 spin_unlock(&portdev->cvq_lock); 1449 1450 buf->len = len; 1451 buf->offset = 0; 1452 1453 handle_control_message(portdev, buf); 1454 1455 spin_lock(&portdev->cvq_lock); 1456 if (add_inbuf(portdev->c_ivq, buf) < 0) { 1457 dev_warn(&portdev->vdev->dev, 1458 "Error adding buffer to queue\n"); 1459 free_buf(buf); 1460 } 1461 } 1462 spin_unlock(&portdev->cvq_lock); 1463 } 1464 1465 static void in_intr(struct virtqueue *vq) 1466 { 1467 struct port *port; 1468 unsigned long flags; 1469 1470 port = find_port_by_vq(vq->vdev->priv, vq); 1471 if (!port) 1472 return; 1473 1474 spin_lock_irqsave(&port->inbuf_lock, flags); 1475 if (!port->inbuf) 1476 port->inbuf = get_inbuf(port); 1477 1478 /* 1479 * Don't queue up data when port is closed. This condition 1480 * can be reached when a console port is not yet connected (no 1481 * tty is spawned) and the host sends out data to console 1482 * ports. For generic serial ports, the host won't 1483 * (shouldn't) send data till the guest is connected. 1484 */ 1485 if (!port->guest_connected) 1486 discard_port_data(port); 1487 1488 spin_unlock_irqrestore(&port->inbuf_lock, flags); 1489 1490 wake_up_interruptible(&port->waitqueue); 1491 1492 /* Send a SIGIO indicating new data in case the process asked for it */ 1493 send_sigio_to_port(port); 1494 1495 if (is_console_port(port) && hvc_poll(port->cons.hvc)) 1496 hvc_kick(); 1497 } 1498 1499 static void control_intr(struct virtqueue *vq) 1500 { 1501 struct ports_device *portdev; 1502 1503 portdev = vq->vdev->priv; 1504 schedule_work(&portdev->control_work); 1505 } 1506 1507 static void config_intr(struct virtio_device *vdev) 1508 { 1509 struct ports_device *portdev; 1510 1511 portdev = vdev->priv; 1512 1513 if (!use_multiport(portdev)) { 1514 struct port *port; 1515 u16 rows, cols; 1516 1517 vdev->config->get(vdev, 1518 offsetof(struct virtio_console_config, cols), 1519 &cols, sizeof(u16)); 1520 vdev->config->get(vdev, 1521 offsetof(struct virtio_console_config, rows), 1522 &rows, sizeof(u16)); 1523 1524 port = find_port_by_id(portdev, 0); 1525 set_console_size(port, rows, cols); 1526 1527 /* 1528 * We'll use this way of resizing only for legacy 1529 * support. For newer userspace 1530 * (VIRTIO_CONSOLE_F_MULTPORT+), use control messages 1531 * to indicate console size changes so that it can be 1532 * done per-port. 1533 */ 1534 resize_console(port); 1535 } 1536 } 1537 1538 static int init_vqs(struct ports_device *portdev) 1539 { 1540 vq_callback_t **io_callbacks; 1541 char **io_names; 1542 struct virtqueue **vqs; 1543 u32 i, j, nr_ports, nr_queues; 1544 int err; 1545 1546 nr_ports = portdev->config.max_nr_ports; 1547 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; 1548 1549 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); 1550 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); 1551 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); 1552 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1553 GFP_KERNEL); 1554 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1555 GFP_KERNEL); 1556 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || 1557 !portdev->out_vqs) { 1558 err = -ENOMEM; 1559 goto free; 1560 } 1561 1562 /* 1563 * For backward compat (newer host but older guest), the host 1564 * spawns a console port first and also inits the vqs for port 1565 * 0 before others. 1566 */ 1567 j = 0; 1568 io_callbacks[j] = in_intr; 1569 io_callbacks[j + 1] = NULL; 1570 io_names[j] = "input"; 1571 io_names[j + 1] = "output"; 1572 j += 2; 1573 1574 if (use_multiport(portdev)) { 1575 io_callbacks[j] = control_intr; 1576 io_callbacks[j + 1] = NULL; 1577 io_names[j] = "control-i"; 1578 io_names[j + 1] = "control-o"; 1579 1580 for (i = 1; i < nr_ports; i++) { 1581 j += 2; 1582 io_callbacks[j] = in_intr; 1583 io_callbacks[j + 1] = NULL; 1584 io_names[j] = "input"; 1585 io_names[j + 1] = "output"; 1586 } 1587 } 1588 /* Find the queues. */ 1589 err = portdev->vdev->config->find_vqs(portdev->vdev, nr_queues, vqs, 1590 io_callbacks, 1591 (const char **)io_names); 1592 if (err) 1593 goto free; 1594 1595 j = 0; 1596 portdev->in_vqs[0] = vqs[0]; 1597 portdev->out_vqs[0] = vqs[1]; 1598 j += 2; 1599 if (use_multiport(portdev)) { 1600 portdev->c_ivq = vqs[j]; 1601 portdev->c_ovq = vqs[j + 1]; 1602 1603 for (i = 1; i < nr_ports; i++) { 1604 j += 2; 1605 portdev->in_vqs[i] = vqs[j]; 1606 portdev->out_vqs[i] = vqs[j + 1]; 1607 } 1608 } 1609 kfree(io_names); 1610 kfree(io_callbacks); 1611 kfree(vqs); 1612 1613 return 0; 1614 1615 free: 1616 kfree(portdev->out_vqs); 1617 kfree(portdev->in_vqs); 1618 kfree(io_names); 1619 kfree(io_callbacks); 1620 kfree(vqs); 1621 1622 return err; 1623 } 1624 1625 static const struct file_operations portdev_fops = { 1626 .owner = THIS_MODULE, 1627 }; 1628 1629 /* 1630 * Once we're further in boot, we get probed like any other virtio 1631 * device. 1632 * 1633 * If the host also supports multiple console ports, we check the 1634 * config space to see how many ports the host has spawned. We 1635 * initialize each port found. 1636 */ 1637 static int __devinit virtcons_probe(struct virtio_device *vdev) 1638 { 1639 struct ports_device *portdev; 1640 int err; 1641 bool multiport; 1642 1643 portdev = kmalloc(sizeof(*portdev), GFP_KERNEL); 1644 if (!portdev) { 1645 err = -ENOMEM; 1646 goto fail; 1647 } 1648 1649 /* Attach this portdev to this virtio_device, and vice-versa. */ 1650 portdev->vdev = vdev; 1651 vdev->priv = portdev; 1652 1653 spin_lock_irq(&pdrvdata_lock); 1654 portdev->drv_index = pdrvdata.index++; 1655 spin_unlock_irq(&pdrvdata_lock); 1656 1657 portdev->chr_major = register_chrdev(0, "virtio-portsdev", 1658 &portdev_fops); 1659 if (portdev->chr_major < 0) { 1660 dev_err(&vdev->dev, 1661 "Error %d registering chrdev for device %u\n", 1662 portdev->chr_major, portdev->drv_index); 1663 err = portdev->chr_major; 1664 goto free; 1665 } 1666 1667 multiport = false; 1668 portdev->config.max_nr_ports = 1; 1669 if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT)) { 1670 multiport = true; 1671 vdev->features[0] |= 1 << VIRTIO_CONSOLE_F_MULTIPORT; 1672 1673 vdev->config->get(vdev, offsetof(struct virtio_console_config, 1674 max_nr_ports), 1675 &portdev->config.max_nr_ports, 1676 sizeof(portdev->config.max_nr_ports)); 1677 } 1678 1679 /* Let the Host know we support multiple ports.*/ 1680 vdev->config->finalize_features(vdev); 1681 1682 err = init_vqs(portdev); 1683 if (err < 0) { 1684 dev_err(&vdev->dev, "Error %d initializing vqs\n", err); 1685 goto free_chrdev; 1686 } 1687 1688 spin_lock_init(&portdev->ports_lock); 1689 INIT_LIST_HEAD(&portdev->ports); 1690 1691 if (multiport) { 1692 unsigned int nr_added_bufs; 1693 1694 spin_lock_init(&portdev->cvq_lock); 1695 INIT_WORK(&portdev->control_work, &control_work_handler); 1696 1697 nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); 1698 if (!nr_added_bufs) { 1699 dev_err(&vdev->dev, 1700 "Error allocating buffers for control queue\n"); 1701 err = -ENOMEM; 1702 goto free_vqs; 1703 } 1704 } else { 1705 /* 1706 * For backward compatibility: Create a console port 1707 * if we're running on older host. 1708 */ 1709 add_port(portdev, 0); 1710 } 1711 1712 spin_lock_irq(&pdrvdata_lock); 1713 list_add_tail(&portdev->list, &pdrvdata.portdevs); 1714 spin_unlock_irq(&pdrvdata_lock); 1715 1716 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1717 VIRTIO_CONSOLE_DEVICE_READY, 1); 1718 return 0; 1719 1720 free_vqs: 1721 /* The host might want to notify mgmt sw about device add failure */ 1722 __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, 1723 VIRTIO_CONSOLE_DEVICE_READY, 0); 1724 vdev->config->del_vqs(vdev); 1725 kfree(portdev->in_vqs); 1726 kfree(portdev->out_vqs); 1727 free_chrdev: 1728 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1729 free: 1730 kfree(portdev); 1731 fail: 1732 return err; 1733 } 1734 1735 static void virtcons_remove(struct virtio_device *vdev) 1736 { 1737 struct ports_device *portdev; 1738 struct port *port, *port2; 1739 1740 portdev = vdev->priv; 1741 1742 spin_lock_irq(&pdrvdata_lock); 1743 list_del(&portdev->list); 1744 spin_unlock_irq(&pdrvdata_lock); 1745 1746 /* Disable interrupts for vqs */ 1747 vdev->config->reset(vdev); 1748 /* Finish up work that's lined up */ 1749 cancel_work_sync(&portdev->control_work); 1750 1751 list_for_each_entry_safe(port, port2, &portdev->ports, list) 1752 unplug_port(port); 1753 1754 unregister_chrdev(portdev->chr_major, "virtio-portsdev"); 1755 1756 /* 1757 * When yanking out a device, we immediately lose the 1758 * (device-side) queues. So there's no point in keeping the 1759 * guest side around till we drop our final reference. This 1760 * also means that any ports which are in an open state will 1761 * have to just stop using the port, as the vqs are going 1762 * away. 1763 */ 1764 if (use_multiport(portdev)) { 1765 struct port_buffer *buf; 1766 unsigned int len; 1767 1768 while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) 1769 free_buf(buf); 1770 1771 while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) 1772 free_buf(buf); 1773 } 1774 1775 vdev->config->del_vqs(vdev); 1776 kfree(portdev->in_vqs); 1777 kfree(portdev->out_vqs); 1778 1779 kfree(portdev); 1780 } 1781 1782 static struct virtio_device_id id_table[] = { 1783 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID }, 1784 { 0 }, 1785 }; 1786 1787 static unsigned int features[] = { 1788 VIRTIO_CONSOLE_F_SIZE, 1789 VIRTIO_CONSOLE_F_MULTIPORT, 1790 }; 1791 1792 static struct virtio_driver virtio_console = { 1793 .feature_table = features, 1794 .feature_table_size = ARRAY_SIZE(features), 1795 .driver.name = KBUILD_MODNAME, 1796 .driver.owner = THIS_MODULE, 1797 .id_table = id_table, 1798 .probe = virtcons_probe, 1799 .remove = virtcons_remove, 1800 .config_changed = config_intr, 1801 }; 1802 1803 static int __init init(void) 1804 { 1805 int err; 1806 1807 pdrvdata.class = class_create(THIS_MODULE, "virtio-ports"); 1808 if (IS_ERR(pdrvdata.class)) { 1809 err = PTR_ERR(pdrvdata.class); 1810 pr_err("Error %d creating virtio-ports class\n", err); 1811 return err; 1812 } 1813 1814 pdrvdata.debugfs_dir = debugfs_create_dir("virtio-ports", NULL); 1815 if (!pdrvdata.debugfs_dir) { 1816 pr_warning("Error %ld creating debugfs dir for virtio-ports\n", 1817 PTR_ERR(pdrvdata.debugfs_dir)); 1818 } 1819 INIT_LIST_HEAD(&pdrvdata.consoles); 1820 INIT_LIST_HEAD(&pdrvdata.portdevs); 1821 1822 return register_virtio_driver(&virtio_console); 1823 } 1824 1825 static void __exit fini(void) 1826 { 1827 unregister_virtio_driver(&virtio_console); 1828 1829 class_destroy(pdrvdata.class); 1830 if (pdrvdata.debugfs_dir) 1831 debugfs_remove_recursive(pdrvdata.debugfs_dir); 1832 } 1833 module_init(init); 1834 module_exit(fini); 1835 1836 MODULE_DEVICE_TABLE(virtio, id_table); 1837 MODULE_DESCRIPTION("Virtio console driver"); 1838 MODULE_LICENSE("GPL"); 1839