1 /* 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/irqreturn.h" 7 #include "linux/kd.h" 8 #include "chan_kern.h" 9 #include "irq_kern.h" 10 #include "irq_user.h" 11 #include "kern_util.h" 12 #include "os.h" 13 14 #define LINE_BUFSIZE 4096 15 16 static irqreturn_t line_interrupt(int irq, void *data) 17 { 18 struct chan *chan = data; 19 struct line *line = chan->line; 20 struct tty_struct *tty = line->tty; 21 22 if (line) 23 chan_interrupt(&line->chan_list, &line->task, tty, irq); 24 return IRQ_HANDLED; 25 } 26 27 static void line_timer_cb(struct work_struct *work) 28 { 29 struct line *line = container_of(work, struct line, task.work); 30 31 if (!line->throttled) 32 chan_interrupt(&line->chan_list, &line->task, line->tty, 33 line->driver->read_irq); 34 } 35 36 /* 37 * Returns the free space inside the ring buffer of this line. 38 * 39 * Should be called while holding line->lock (this does not modify data). 40 */ 41 static int write_room(struct line *line) 42 { 43 int n; 44 45 if (line->buffer == NULL) 46 return LINE_BUFSIZE - 1; 47 48 /* This is for the case where the buffer is wrapped! */ 49 n = line->head - line->tail; 50 51 if (n <= 0) 52 n += LINE_BUFSIZE; /* The other case */ 53 return n - 1; 54 } 55 56 int line_write_room(struct tty_struct *tty) 57 { 58 struct line *line = tty->driver_data; 59 unsigned long flags; 60 int room; 61 62 spin_lock_irqsave(&line->lock, flags); 63 room = write_room(line); 64 spin_unlock_irqrestore(&line->lock, flags); 65 66 return room; 67 } 68 69 int line_chars_in_buffer(struct tty_struct *tty) 70 { 71 struct line *line = tty->driver_data; 72 unsigned long flags; 73 int ret; 74 75 spin_lock_irqsave(&line->lock, flags); 76 /* write_room subtracts 1 for the needed NULL, so we readd it.*/ 77 ret = LINE_BUFSIZE - (write_room(line) + 1); 78 spin_unlock_irqrestore(&line->lock, flags); 79 80 return ret; 81 } 82 83 /* 84 * This copies the content of buf into the circular buffer associated with 85 * this line. 86 * The return value is the number of characters actually copied, i.e. the ones 87 * for which there was space: this function is not supposed to ever flush out 88 * the circular buffer. 89 * 90 * Must be called while holding line->lock! 91 */ 92 static int buffer_data(struct line *line, const char *buf, int len) 93 { 94 int end, room; 95 96 if (line->buffer == NULL) { 97 line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC); 98 if (line->buffer == NULL) { 99 printk(KERN_ERR "buffer_data - atomic allocation " 100 "failed\n"); 101 return 0; 102 } 103 line->head = line->buffer; 104 line->tail = line->buffer; 105 } 106 107 room = write_room(line); 108 len = (len > room) ? room : len; 109 110 end = line->buffer + LINE_BUFSIZE - line->tail; 111 112 if (len < end) { 113 memcpy(line->tail, buf, len); 114 line->tail += len; 115 } 116 else { 117 /* The circular buffer is wrapping */ 118 memcpy(line->tail, buf, end); 119 buf += end; 120 memcpy(line->buffer, buf, len - end); 121 line->tail = line->buffer + len - end; 122 } 123 124 return len; 125 } 126 127 /* 128 * Flushes the ring buffer to the output channels. That is, write_chan is 129 * called, passing it line->head as buffer, and an appropriate count. 130 * 131 * On exit, returns 1 when the buffer is empty, 132 * 0 when the buffer is not empty on exit, 133 * and -errno when an error occurred. 134 * 135 * Must be called while holding line->lock!*/ 136 static int flush_buffer(struct line *line) 137 { 138 int n, count; 139 140 if ((line->buffer == NULL) || (line->head == line->tail)) 141 return 1; 142 143 if (line->tail < line->head) { 144 /* line->buffer + LINE_BUFSIZE is the end of the buffer! */ 145 count = line->buffer + LINE_BUFSIZE - line->head; 146 147 n = write_chan(&line->chan_list, line->head, count, 148 line->driver->write_irq); 149 if (n < 0) 150 return n; 151 if (n == count) { 152 /* 153 * We have flushed from ->head to buffer end, now we 154 * must flush only from the beginning to ->tail. 155 */ 156 line->head = line->buffer; 157 } else { 158 line->head += n; 159 return 0; 160 } 161 } 162 163 count = line->tail - line->head; 164 n = write_chan(&line->chan_list, line->head, count, 165 line->driver->write_irq); 166 167 if (n < 0) 168 return n; 169 170 line->head += n; 171 return line->head == line->tail; 172 } 173 174 void line_flush_buffer(struct tty_struct *tty) 175 { 176 struct line *line = tty->driver_data; 177 unsigned long flags; 178 int err; 179 180 spin_lock_irqsave(&line->lock, flags); 181 err = flush_buffer(line); 182 spin_unlock_irqrestore(&line->lock, flags); 183 } 184 185 /* 186 * We map both ->flush_chars and ->put_char (which go in pair) onto 187 * ->flush_buffer and ->write. Hope it's not that bad. 188 */ 189 void line_flush_chars(struct tty_struct *tty) 190 { 191 line_flush_buffer(tty); 192 } 193 194 int line_put_char(struct tty_struct *tty, unsigned char ch) 195 { 196 return line_write(tty, &ch, sizeof(ch)); 197 } 198 199 int line_write(struct tty_struct *tty, const unsigned char *buf, int len) 200 { 201 struct line *line = tty->driver_data; 202 unsigned long flags; 203 int n, ret = 0; 204 205 spin_lock_irqsave(&line->lock, flags); 206 if (line->head != line->tail) 207 ret = buffer_data(line, buf, len); 208 else { 209 n = write_chan(&line->chan_list, buf, len, 210 line->driver->write_irq); 211 if (n < 0) { 212 ret = n; 213 goto out_up; 214 } 215 216 len -= n; 217 ret += n; 218 if (len > 0) 219 ret += buffer_data(line, buf + n, len); 220 } 221 out_up: 222 spin_unlock_irqrestore(&line->lock, flags); 223 return ret; 224 } 225 226 void line_set_termios(struct tty_struct *tty, struct ktermios * old) 227 { 228 /* nothing */ 229 } 230 231 static const struct { 232 int cmd; 233 char *level; 234 char *name; 235 } tty_ioctls[] = { 236 /* don't print these, they flood the log ... */ 237 { TCGETS, NULL, "TCGETS" }, 238 { TCSETS, NULL, "TCSETS" }, 239 { TCSETSW, NULL, "TCSETSW" }, 240 { TCFLSH, NULL, "TCFLSH" }, 241 { TCSBRK, NULL, "TCSBRK" }, 242 243 /* general tty stuff */ 244 { TCSETSF, KERN_DEBUG, "TCSETSF" }, 245 { TCGETA, KERN_DEBUG, "TCGETA" }, 246 { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, 247 { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, 248 { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, 249 250 /* linux-specific ones */ 251 { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, 252 { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, 253 { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, 254 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, 255 }; 256 257 int line_ioctl(struct tty_struct *tty, struct file * file, 258 unsigned int cmd, unsigned long arg) 259 { 260 int ret; 261 int i; 262 263 ret = 0; 264 switch(cmd) { 265 #ifdef TIOCGETP 266 case TIOCGETP: 267 case TIOCSETP: 268 case TIOCSETN: 269 #endif 270 #ifdef TIOCGETC 271 case TIOCGETC: 272 case TIOCSETC: 273 #endif 274 #ifdef TIOCGLTC 275 case TIOCGLTC: 276 case TIOCSLTC: 277 #endif 278 case TCGETS: 279 case TCSETSF: 280 case TCSETSW: 281 case TCSETS: 282 case TCGETA: 283 case TCSETAF: 284 case TCSETAW: 285 case TCSETA: 286 case TCXONC: 287 case TCFLSH: 288 case TIOCOUTQ: 289 case TIOCINQ: 290 case TIOCGLCKTRMIOS: 291 case TIOCSLCKTRMIOS: 292 case TIOCPKT: 293 case TIOCGSOFTCAR: 294 case TIOCSSOFTCAR: 295 return -ENOIOCTLCMD; 296 #if 0 297 case TCwhatever: 298 /* do something */ 299 break; 300 #endif 301 default: 302 for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) 303 if (cmd == tty_ioctls[i].cmd) 304 break; 305 if (i == ARRAY_SIZE(tty_ioctls)) { 306 printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", 307 __func__, tty->name, cmd); 308 } 309 ret = -ENOIOCTLCMD; 310 break; 311 } 312 return ret; 313 } 314 315 void line_throttle(struct tty_struct *tty) 316 { 317 struct line *line = tty->driver_data; 318 319 deactivate_chan(&line->chan_list, line->driver->read_irq); 320 line->throttled = 1; 321 } 322 323 void line_unthrottle(struct tty_struct *tty) 324 { 325 struct line *line = tty->driver_data; 326 327 line->throttled = 0; 328 chan_interrupt(&line->chan_list, &line->task, tty, 329 line->driver->read_irq); 330 331 /* 332 * Maybe there is enough stuff pending that calling the interrupt 333 * throttles us again. In this case, line->throttled will be 1 334 * again and we shouldn't turn the interrupt back on. 335 */ 336 if (!line->throttled) 337 reactivate_chan(&line->chan_list, line->driver->read_irq); 338 } 339 340 static irqreturn_t line_write_interrupt(int irq, void *data) 341 { 342 struct chan *chan = data; 343 struct line *line = chan->line; 344 struct tty_struct *tty = line->tty; 345 int err; 346 347 /* 348 * Interrupts are disabled here because we registered the interrupt with 349 * IRQF_DISABLED (see line_setup_irq). 350 */ 351 352 spin_lock(&line->lock); 353 err = flush_buffer(line); 354 if (err == 0) { 355 return IRQ_NONE; 356 } else if (err < 0) { 357 line->head = line->buffer; 358 line->tail = line->buffer; 359 } 360 spin_unlock(&line->lock); 361 362 if (tty == NULL) 363 return IRQ_NONE; 364 365 if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags) && 366 (tty->ldisc.write_wakeup != NULL)) 367 (tty->ldisc.write_wakeup)(tty); 368 369 /* 370 * BLOCKING mode 371 * In blocking mode, everything sleeps on tty->write_wait. 372 * Sleeping in the console driver would break non-blocking 373 * writes. 374 */ 375 376 if (waitqueue_active(&tty->write_wait)) 377 wake_up_interruptible(&tty->write_wait); 378 return IRQ_HANDLED; 379 } 380 381 int line_setup_irq(int fd, int input, int output, struct line *line, void *data) 382 { 383 const struct line_driver *driver = line->driver; 384 int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM; 385 386 if (input) 387 err = um_request_irq(driver->read_irq, fd, IRQ_READ, 388 line_interrupt, flags, 389 driver->read_irq_name, data); 390 if (err) 391 return err; 392 if (output) 393 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, 394 line_write_interrupt, flags, 395 driver->write_irq_name, data); 396 line->have_irq = 1; 397 return err; 398 } 399 400 /* 401 * Normally, a driver like this can rely mostly on the tty layer 402 * locking, particularly when it comes to the driver structure. 403 * However, in this case, mconsole requests can come in "from the 404 * side", and race with opens and closes. 405 * 406 * mconsole config requests will want to be sure the device isn't in 407 * use, and get_config, open, and close will want a stable 408 * configuration. The checking and modification of the configuration 409 * is done under a spinlock. Checking whether the device is in use is 410 * line->tty->count > 1, also under the spinlock. 411 * 412 * tty->count serves to decide whether the device should be enabled or 413 * disabled on the host. If it's equal to 1, then we are doing the 414 * first open or last close. Otherwise, open and close just return. 415 */ 416 417 int line_open(struct line *lines, struct tty_struct *tty) 418 { 419 struct line *line = &lines[tty->index]; 420 int err = -ENODEV; 421 422 spin_lock(&line->count_lock); 423 if (!line->valid) 424 goto out_unlock; 425 426 err = 0; 427 if (tty->count > 1) 428 goto out_unlock; 429 430 spin_unlock(&line->count_lock); 431 432 tty->driver_data = line; 433 line->tty = tty; 434 435 err = enable_chan(line); 436 if (err) 437 return err; 438 439 INIT_DELAYED_WORK(&line->task, line_timer_cb); 440 441 if (!line->sigio) { 442 chan_enable_winch(&line->chan_list, tty); 443 line->sigio = 1; 444 } 445 446 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 447 &tty->winsize.ws_col); 448 449 return err; 450 451 out_unlock: 452 spin_unlock(&line->count_lock); 453 return err; 454 } 455 456 static void unregister_winch(struct tty_struct *tty); 457 458 void line_close(struct tty_struct *tty, struct file * filp) 459 { 460 struct line *line = tty->driver_data; 461 462 /* 463 * If line_open fails (and tty->driver_data is never set), 464 * tty_open will call line_close. So just return in this case. 465 */ 466 if (line == NULL) 467 return; 468 469 /* We ignore the error anyway! */ 470 flush_buffer(line); 471 472 spin_lock(&line->count_lock); 473 if (!line->valid) 474 goto out_unlock; 475 476 if (tty->count > 1) 477 goto out_unlock; 478 479 spin_unlock(&line->count_lock); 480 481 line->tty = NULL; 482 tty->driver_data = NULL; 483 484 if (line->sigio) { 485 unregister_winch(tty); 486 line->sigio = 0; 487 } 488 489 return; 490 491 out_unlock: 492 spin_unlock(&line->count_lock); 493 } 494 495 void close_lines(struct line *lines, int nlines) 496 { 497 int i; 498 499 for(i = 0; i < nlines; i++) 500 close_chan(&lines[i].chan_list, 0); 501 } 502 503 static int setup_one_line(struct line *lines, int n, char *init, int init_prio, 504 char **error_out) 505 { 506 struct line *line = &lines[n]; 507 int err = -EINVAL; 508 509 spin_lock(&line->count_lock); 510 511 if (line->tty != NULL) { 512 *error_out = "Device is already open"; 513 goto out; 514 } 515 516 if (line->init_pri <= init_prio) { 517 line->init_pri = init_prio; 518 if (!strcmp(init, "none")) 519 line->valid = 0; 520 else { 521 line->init_str = init; 522 line->valid = 1; 523 } 524 } 525 err = 0; 526 out: 527 spin_unlock(&line->count_lock); 528 return err; 529 } 530 531 /* 532 * Common setup code for both startup command line and mconsole initialization. 533 * @lines contains the array (of size @num) to modify; 534 * @init is the setup string; 535 * @error_out is an error string in the case of failure; 536 */ 537 538 int line_setup(struct line *lines, unsigned int num, char *init, 539 char **error_out) 540 { 541 int i, n, err; 542 char *end; 543 544 if (*init == '=') { 545 /* 546 * We said con=/ssl= instead of con#=, so we are configuring all 547 * consoles at once. 548 */ 549 n = -1; 550 } 551 else { 552 n = simple_strtoul(init, &end, 0); 553 if (*end != '=') { 554 *error_out = "Couldn't parse device number"; 555 return -EINVAL; 556 } 557 init = end; 558 } 559 init++; 560 561 if (n >= (signed int) num) { 562 *error_out = "Device number out of range"; 563 return -EINVAL; 564 } 565 else if (n >= 0) { 566 err = setup_one_line(lines, n, init, INIT_ONE, error_out); 567 if (err) 568 return err; 569 } 570 else { 571 for(i = 0; i < num; i++) { 572 err = setup_one_line(lines, i, init, INIT_ALL, 573 error_out); 574 if (err) 575 return err; 576 } 577 } 578 return n == -1 ? num : n; 579 } 580 581 int line_config(struct line *lines, unsigned int num, char *str, 582 const struct chan_opts *opts, char **error_out) 583 { 584 struct line *line; 585 char *new; 586 int n; 587 588 if (*str == '=') { 589 *error_out = "Can't configure all devices from mconsole"; 590 return -EINVAL; 591 } 592 593 new = kstrdup(str, GFP_KERNEL); 594 if (new == NULL) { 595 *error_out = "Failed to allocate memory"; 596 return -ENOMEM; 597 } 598 n = line_setup(lines, num, new, error_out); 599 if (n < 0) 600 return n; 601 602 line = &lines[n]; 603 return parse_chan_pair(line->init_str, line, n, opts, error_out); 604 } 605 606 int line_get_config(char *name, struct line *lines, unsigned int num, char *str, 607 int size, char **error_out) 608 { 609 struct line *line; 610 char *end; 611 int dev, n = 0; 612 613 dev = simple_strtoul(name, &end, 0); 614 if ((*end != '\0') || (end == name)) { 615 *error_out = "line_get_config failed to parse device number"; 616 return 0; 617 } 618 619 if ((dev < 0) || (dev >= num)) { 620 *error_out = "device number out of range"; 621 return 0; 622 } 623 624 line = &lines[dev]; 625 626 spin_lock(&line->count_lock); 627 if (!line->valid) 628 CONFIG_CHUNK(str, size, n, "none", 1); 629 else if (line->tty == NULL) 630 CONFIG_CHUNK(str, size, n, line->init_str, 1); 631 else n = chan_config_string(&line->chan_list, str, size, error_out); 632 spin_unlock(&line->count_lock); 633 634 return n; 635 } 636 637 int line_id(char **str, int *start_out, int *end_out) 638 { 639 char *end; 640 int n; 641 642 n = simple_strtoul(*str, &end, 0); 643 if ((*end != '\0') || (end == *str)) 644 return -1; 645 646 *str = end; 647 *start_out = n; 648 *end_out = n; 649 return n; 650 } 651 652 int line_remove(struct line *lines, unsigned int num, int n, char **error_out) 653 { 654 int err; 655 char config[sizeof("conxxxx=none\0")]; 656 657 sprintf(config, "%d=none", n); 658 err = line_setup(lines, num, config, error_out); 659 if (err >= 0) 660 err = 0; 661 return err; 662 } 663 664 struct tty_driver *register_lines(struct line_driver *line_driver, 665 const struct tty_operations *ops, 666 struct line *lines, int nlines) 667 { 668 int i; 669 struct tty_driver *driver = alloc_tty_driver(nlines); 670 671 if (!driver) 672 return NULL; 673 674 driver->driver_name = line_driver->name; 675 driver->name = line_driver->device_name; 676 driver->major = line_driver->major; 677 driver->minor_start = line_driver->minor_start; 678 driver->type = line_driver->type; 679 driver->subtype = line_driver->subtype; 680 driver->flags = TTY_DRIVER_REAL_RAW; 681 driver->init_termios = tty_std_termios; 682 tty_set_operations(driver, ops); 683 684 if (tty_register_driver(driver)) { 685 printk(KERN_ERR "register_lines : can't register %s driver\n", 686 line_driver->name); 687 put_tty_driver(driver); 688 return NULL; 689 } 690 691 for(i = 0; i < nlines; i++) { 692 if (!lines[i].valid) 693 tty_unregister_device(driver, i); 694 } 695 696 mconsole_register_dev(&line_driver->mc); 697 return driver; 698 } 699 700 static DEFINE_SPINLOCK(winch_handler_lock); 701 static LIST_HEAD(winch_handlers); 702 703 void lines_init(struct line *lines, int nlines, struct chan_opts *opts) 704 { 705 struct line *line; 706 char *error; 707 int i; 708 709 for(i = 0; i < nlines; i++) { 710 line = &lines[i]; 711 INIT_LIST_HEAD(&line->chan_list); 712 713 if (line->init_str == NULL) 714 continue; 715 716 line->init_str = kstrdup(line->init_str, GFP_KERNEL); 717 if (line->init_str == NULL) 718 printk(KERN_ERR "lines_init - kstrdup returned NULL\n"); 719 720 if (parse_chan_pair(line->init_str, line, i, opts, &error)) { 721 printk(KERN_ERR "parse_chan_pair failed for " 722 "device %d : %s\n", i, error); 723 line->valid = 0; 724 } 725 } 726 } 727 728 struct winch { 729 struct list_head list; 730 int fd; 731 int tty_fd; 732 int pid; 733 struct tty_struct *tty; 734 unsigned long stack; 735 }; 736 737 static void free_winch(struct winch *winch, int free_irq_ok) 738 { 739 list_del(&winch->list); 740 741 if (winch->pid != -1) 742 os_kill_process(winch->pid, 1); 743 if (winch->fd != -1) 744 os_close_file(winch->fd); 745 if (winch->stack != 0) 746 free_stack(winch->stack, 0); 747 if (free_irq_ok) 748 free_irq(WINCH_IRQ, winch); 749 kfree(winch); 750 } 751 752 static irqreturn_t winch_interrupt(int irq, void *data) 753 { 754 struct winch *winch = data; 755 struct tty_struct *tty; 756 struct line *line; 757 int err; 758 char c; 759 760 if (winch->fd != -1) { 761 err = generic_read(winch->fd, &c, NULL); 762 if (err < 0) { 763 if (err != -EAGAIN) { 764 printk(KERN_ERR "winch_interrupt : " 765 "read failed, errno = %d\n", -err); 766 printk(KERN_ERR "fd %d is losing SIGWINCH " 767 "support\n", winch->tty_fd); 768 free_winch(winch, 0); 769 return IRQ_HANDLED; 770 } 771 goto out; 772 } 773 } 774 tty = winch->tty; 775 if (tty != NULL) { 776 line = tty->driver_data; 777 if (line != NULL) { 778 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 779 &tty->winsize.ws_col); 780 kill_pgrp(tty->pgrp, SIGWINCH, 1); 781 } 782 } 783 out: 784 if (winch->fd != -1) 785 reactivate_fd(winch->fd, WINCH_IRQ); 786 return IRQ_HANDLED; 787 } 788 789 void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, 790 unsigned long stack) 791 { 792 struct winch *winch; 793 794 winch = kmalloc(sizeof(*winch), GFP_KERNEL); 795 if (winch == NULL) { 796 printk(KERN_ERR "register_winch_irq - kmalloc failed\n"); 797 goto cleanup; 798 } 799 800 *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), 801 .fd = fd, 802 .tty_fd = tty_fd, 803 .pid = pid, 804 .tty = tty, 805 .stack = stack }); 806 807 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 808 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM, 809 "winch", winch) < 0) { 810 printk(KERN_ERR "register_winch_irq - failed to register " 811 "IRQ\n"); 812 goto out_free; 813 } 814 815 spin_lock(&winch_handler_lock); 816 list_add(&winch->list, &winch_handlers); 817 spin_unlock(&winch_handler_lock); 818 819 return; 820 821 out_free: 822 kfree(winch); 823 cleanup: 824 os_kill_process(pid, 1); 825 os_close_file(fd); 826 if (stack != 0) 827 free_stack(stack, 0); 828 } 829 830 static void unregister_winch(struct tty_struct *tty) 831 { 832 struct list_head *ele; 833 struct winch *winch; 834 835 spin_lock(&winch_handler_lock); 836 837 list_for_each(ele, &winch_handlers) { 838 winch = list_entry(ele, struct winch, list); 839 if (winch->tty == tty) { 840 free_winch(winch, 1); 841 break; 842 } 843 } 844 spin_unlock(&winch_handler_lock); 845 } 846 847 static void winch_cleanup(void) 848 { 849 struct list_head *ele, *next; 850 struct winch *winch; 851 852 spin_lock(&winch_handler_lock); 853 854 list_for_each_safe(ele, next, &winch_handlers) { 855 winch = list_entry(ele, struct winch, list); 856 free_winch(winch, 1); 857 } 858 859 spin_unlock(&winch_handler_lock); 860 } 861 __uml_exitcall(winch_cleanup); 862 863 char *add_xterm_umid(char *base) 864 { 865 char *umid, *title; 866 int len; 867 868 umid = get_umid(); 869 if (*umid == '\0') 870 return base; 871 872 len = strlen(base) + strlen(" ()") + strlen(umid) + 1; 873 title = kmalloc(len, GFP_KERNEL); 874 if (title == NULL) { 875 printk(KERN_ERR "Failed to allocate buffer for xterm title\n"); 876 return base; 877 } 878 879 snprintf(title, len, "%s (%s)", base, umid); 880 return title; 881 } 882