1 /* 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/irqreturn.h" 7 #include "linux/kd.h" 8 #include "linux/sched.h" 9 #include "chan_kern.h" 10 #include "irq_kern.h" 11 #include "irq_user.h" 12 #include "kern_util.h" 13 #include "os.h" 14 15 #define LINE_BUFSIZE 4096 16 17 static irqreturn_t line_interrupt(int irq, void *data) 18 { 19 struct chan *chan = data; 20 struct line *line = chan->line; 21 struct tty_struct *tty = line->tty; 22 23 if (line) 24 chan_interrupt(&line->chan_list, &line->task, tty, irq); 25 return IRQ_HANDLED; 26 } 27 28 static void line_timer_cb(struct work_struct *work) 29 { 30 struct line *line = container_of(work, struct line, task.work); 31 32 if (!line->throttled) 33 chan_interrupt(&line->chan_list, &line->task, line->tty, 34 line->driver->read_irq); 35 } 36 37 /* 38 * Returns the free space inside the ring buffer of this line. 39 * 40 * Should be called while holding line->lock (this does not modify data). 41 */ 42 static int write_room(struct line *line) 43 { 44 int n; 45 46 if (line->buffer == NULL) 47 return LINE_BUFSIZE - 1; 48 49 /* This is for the case where the buffer is wrapped! */ 50 n = line->head - line->tail; 51 52 if (n <= 0) 53 n += LINE_BUFSIZE; /* The other case */ 54 return n - 1; 55 } 56 57 int line_write_room(struct tty_struct *tty) 58 { 59 struct line *line = tty->driver_data; 60 unsigned long flags; 61 int room; 62 63 spin_lock_irqsave(&line->lock, flags); 64 room = write_room(line); 65 spin_unlock_irqrestore(&line->lock, flags); 66 67 return room; 68 } 69 70 int line_chars_in_buffer(struct tty_struct *tty) 71 { 72 struct line *line = tty->driver_data; 73 unsigned long flags; 74 int ret; 75 76 spin_lock_irqsave(&line->lock, flags); 77 /* write_room subtracts 1 for the needed NULL, so we readd it.*/ 78 ret = LINE_BUFSIZE - (write_room(line) + 1); 79 spin_unlock_irqrestore(&line->lock, flags); 80 81 return ret; 82 } 83 84 /* 85 * This copies the content of buf into the circular buffer associated with 86 * this line. 87 * The return value is the number of characters actually copied, i.e. the ones 88 * for which there was space: this function is not supposed to ever flush out 89 * the circular buffer. 90 * 91 * Must be called while holding line->lock! 92 */ 93 static int buffer_data(struct line *line, const char *buf, int len) 94 { 95 int end, room; 96 97 if (line->buffer == NULL) { 98 line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC); 99 if (line->buffer == NULL) { 100 printk(KERN_ERR "buffer_data - atomic allocation " 101 "failed\n"); 102 return 0; 103 } 104 line->head = line->buffer; 105 line->tail = line->buffer; 106 } 107 108 room = write_room(line); 109 len = (len > room) ? room : len; 110 111 end = line->buffer + LINE_BUFSIZE - line->tail; 112 113 if (len < end) { 114 memcpy(line->tail, buf, len); 115 line->tail += len; 116 } 117 else { 118 /* The circular buffer is wrapping */ 119 memcpy(line->tail, buf, end); 120 buf += end; 121 memcpy(line->buffer, buf, len - end); 122 line->tail = line->buffer + len - end; 123 } 124 125 return len; 126 } 127 128 /* 129 * Flushes the ring buffer to the output channels. That is, write_chan is 130 * called, passing it line->head as buffer, and an appropriate count. 131 * 132 * On exit, returns 1 when the buffer is empty, 133 * 0 when the buffer is not empty on exit, 134 * and -errno when an error occurred. 135 * 136 * Must be called while holding line->lock!*/ 137 static int flush_buffer(struct line *line) 138 { 139 int n, count; 140 141 if ((line->buffer == NULL) || (line->head == line->tail)) 142 return 1; 143 144 if (line->tail < line->head) { 145 /* line->buffer + LINE_BUFSIZE is the end of the buffer! */ 146 count = line->buffer + LINE_BUFSIZE - line->head; 147 148 n = write_chan(&line->chan_list, line->head, count, 149 line->driver->write_irq); 150 if (n < 0) 151 return n; 152 if (n == count) { 153 /* 154 * We have flushed from ->head to buffer end, now we 155 * must flush only from the beginning to ->tail. 156 */ 157 line->head = line->buffer; 158 } else { 159 line->head += n; 160 return 0; 161 } 162 } 163 164 count = line->tail - line->head; 165 n = write_chan(&line->chan_list, line->head, count, 166 line->driver->write_irq); 167 168 if (n < 0) 169 return n; 170 171 line->head += n; 172 return line->head == line->tail; 173 } 174 175 void line_flush_buffer(struct tty_struct *tty) 176 { 177 struct line *line = tty->driver_data; 178 unsigned long flags; 179 int err; 180 181 spin_lock_irqsave(&line->lock, flags); 182 err = flush_buffer(line); 183 spin_unlock_irqrestore(&line->lock, flags); 184 } 185 186 /* 187 * We map both ->flush_chars and ->put_char (which go in pair) onto 188 * ->flush_buffer and ->write. Hope it's not that bad. 189 */ 190 void line_flush_chars(struct tty_struct *tty) 191 { 192 line_flush_buffer(tty); 193 } 194 195 int line_put_char(struct tty_struct *tty, unsigned char ch) 196 { 197 return line_write(tty, &ch, sizeof(ch)); 198 } 199 200 int line_write(struct tty_struct *tty, const unsigned char *buf, int len) 201 { 202 struct line *line = tty->driver_data; 203 unsigned long flags; 204 int n, ret = 0; 205 206 spin_lock_irqsave(&line->lock, flags); 207 if (line->head != line->tail) 208 ret = buffer_data(line, buf, len); 209 else { 210 n = write_chan(&line->chan_list, buf, len, 211 line->driver->write_irq); 212 if (n < 0) { 213 ret = n; 214 goto out_up; 215 } 216 217 len -= n; 218 ret += n; 219 if (len > 0) 220 ret += buffer_data(line, buf + n, len); 221 } 222 out_up: 223 spin_unlock_irqrestore(&line->lock, flags); 224 return ret; 225 } 226 227 void line_set_termios(struct tty_struct *tty, struct ktermios * old) 228 { 229 /* nothing */ 230 } 231 232 static const struct { 233 int cmd; 234 char *level; 235 char *name; 236 } tty_ioctls[] = { 237 /* don't print these, they flood the log ... */ 238 { TCGETS, NULL, "TCGETS" }, 239 { TCSETS, NULL, "TCSETS" }, 240 { TCSETSW, NULL, "TCSETSW" }, 241 { TCFLSH, NULL, "TCFLSH" }, 242 { TCSBRK, NULL, "TCSBRK" }, 243 244 /* general tty stuff */ 245 { TCSETSF, KERN_DEBUG, "TCSETSF" }, 246 { TCGETA, KERN_DEBUG, "TCGETA" }, 247 { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, 248 { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, 249 { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, 250 251 /* linux-specific ones */ 252 { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, 253 { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, 254 { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, 255 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, 256 }; 257 258 int line_ioctl(struct tty_struct *tty, struct file * file, 259 unsigned int cmd, unsigned long arg) 260 { 261 int ret; 262 int i; 263 264 ret = 0; 265 switch(cmd) { 266 #ifdef TIOCGETP 267 case TIOCGETP: 268 case TIOCSETP: 269 case TIOCSETN: 270 #endif 271 #ifdef TIOCGETC 272 case TIOCGETC: 273 case TIOCSETC: 274 #endif 275 #ifdef TIOCGLTC 276 case TIOCGLTC: 277 case TIOCSLTC: 278 #endif 279 /* Note: these are out of date as we now have TCGETS2 etc but this 280 whole lot should probably go away */ 281 case TCGETS: 282 case TCSETSF: 283 case TCSETSW: 284 case TCSETS: 285 case TCGETA: 286 case TCSETAF: 287 case TCSETAW: 288 case TCSETA: 289 case TCXONC: 290 case TCFLSH: 291 case TIOCOUTQ: 292 case TIOCINQ: 293 case TIOCGLCKTRMIOS: 294 case TIOCSLCKTRMIOS: 295 case TIOCPKT: 296 case TIOCGSOFTCAR: 297 case TIOCSSOFTCAR: 298 return -ENOIOCTLCMD; 299 #if 0 300 case TCwhatever: 301 /* do something */ 302 break; 303 #endif 304 default: 305 for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) 306 if (cmd == tty_ioctls[i].cmd) 307 break; 308 if (i == ARRAY_SIZE(tty_ioctls)) { 309 printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", 310 __func__, tty->name, cmd); 311 } 312 ret = -ENOIOCTLCMD; 313 break; 314 } 315 return ret; 316 } 317 318 void line_throttle(struct tty_struct *tty) 319 { 320 struct line *line = tty->driver_data; 321 322 deactivate_chan(&line->chan_list, line->driver->read_irq); 323 line->throttled = 1; 324 } 325 326 void line_unthrottle(struct tty_struct *tty) 327 { 328 struct line *line = tty->driver_data; 329 330 line->throttled = 0; 331 chan_interrupt(&line->chan_list, &line->task, tty, 332 line->driver->read_irq); 333 334 /* 335 * Maybe there is enough stuff pending that calling the interrupt 336 * throttles us again. In this case, line->throttled will be 1 337 * again and we shouldn't turn the interrupt back on. 338 */ 339 if (!line->throttled) 340 reactivate_chan(&line->chan_list, line->driver->read_irq); 341 } 342 343 static irqreturn_t line_write_interrupt(int irq, void *data) 344 { 345 struct chan *chan = data; 346 struct line *line = chan->line; 347 struct tty_struct *tty = line->tty; 348 int err; 349 350 /* 351 * Interrupts are disabled here because we registered the interrupt with 352 * IRQF_DISABLED (see line_setup_irq). 353 */ 354 355 spin_lock(&line->lock); 356 err = flush_buffer(line); 357 if (err == 0) { 358 return IRQ_NONE; 359 } else if (err < 0) { 360 line->head = line->buffer; 361 line->tail = line->buffer; 362 } 363 spin_unlock(&line->lock); 364 365 if (tty == NULL) 366 return IRQ_NONE; 367 368 tty_wakeup(tty); 369 return IRQ_HANDLED; 370 } 371 372 int line_setup_irq(int fd, int input, int output, struct line *line, void *data) 373 { 374 const struct line_driver *driver = line->driver; 375 int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM; 376 377 if (input) 378 err = um_request_irq(driver->read_irq, fd, IRQ_READ, 379 line_interrupt, flags, 380 driver->read_irq_name, data); 381 if (err) 382 return err; 383 if (output) 384 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, 385 line_write_interrupt, flags, 386 driver->write_irq_name, data); 387 line->have_irq = 1; 388 return err; 389 } 390 391 /* 392 * Normally, a driver like this can rely mostly on the tty layer 393 * locking, particularly when it comes to the driver structure. 394 * However, in this case, mconsole requests can come in "from the 395 * side", and race with opens and closes. 396 * 397 * mconsole config requests will want to be sure the device isn't in 398 * use, and get_config, open, and close will want a stable 399 * configuration. The checking and modification of the configuration 400 * is done under a spinlock. Checking whether the device is in use is 401 * line->tty->count > 1, also under the spinlock. 402 * 403 * tty->count serves to decide whether the device should be enabled or 404 * disabled on the host. If it's equal to 1, then we are doing the 405 * first open or last close. Otherwise, open and close just return. 406 */ 407 408 int line_open(struct line *lines, struct tty_struct *tty) 409 { 410 struct line *line = &lines[tty->index]; 411 int err = -ENODEV; 412 413 spin_lock(&line->count_lock); 414 if (!line->valid) 415 goto out_unlock; 416 417 err = 0; 418 if (tty->count > 1) 419 goto out_unlock; 420 421 spin_unlock(&line->count_lock); 422 423 tty->driver_data = line; 424 line->tty = tty; 425 426 err = enable_chan(line); 427 if (err) 428 return err; 429 430 INIT_DELAYED_WORK(&line->task, line_timer_cb); 431 432 if (!line->sigio) { 433 chan_enable_winch(&line->chan_list, tty); 434 line->sigio = 1; 435 } 436 437 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 438 &tty->winsize.ws_col); 439 440 return err; 441 442 out_unlock: 443 spin_unlock(&line->count_lock); 444 return err; 445 } 446 447 static void unregister_winch(struct tty_struct *tty); 448 449 void line_close(struct tty_struct *tty, struct file * filp) 450 { 451 struct line *line = tty->driver_data; 452 453 /* 454 * If line_open fails (and tty->driver_data is never set), 455 * tty_open will call line_close. So just return in this case. 456 */ 457 if (line == NULL) 458 return; 459 460 /* We ignore the error anyway! */ 461 flush_buffer(line); 462 463 spin_lock(&line->count_lock); 464 if (!line->valid) 465 goto out_unlock; 466 467 if (tty->count > 1) 468 goto out_unlock; 469 470 spin_unlock(&line->count_lock); 471 472 line->tty = NULL; 473 tty->driver_data = NULL; 474 475 if (line->sigio) { 476 unregister_winch(tty); 477 line->sigio = 0; 478 } 479 480 return; 481 482 out_unlock: 483 spin_unlock(&line->count_lock); 484 } 485 486 void close_lines(struct line *lines, int nlines) 487 { 488 int i; 489 490 for(i = 0; i < nlines; i++) 491 close_chan(&lines[i].chan_list, 0); 492 } 493 494 static int setup_one_line(struct line *lines, int n, char *init, int init_prio, 495 char **error_out) 496 { 497 struct line *line = &lines[n]; 498 int err = -EINVAL; 499 500 spin_lock(&line->count_lock); 501 502 if (line->tty != NULL) { 503 *error_out = "Device is already open"; 504 goto out; 505 } 506 507 if (line->init_pri <= init_prio) { 508 line->init_pri = init_prio; 509 if (!strcmp(init, "none")) 510 line->valid = 0; 511 else { 512 line->init_str = init; 513 line->valid = 1; 514 } 515 } 516 err = 0; 517 out: 518 spin_unlock(&line->count_lock); 519 return err; 520 } 521 522 /* 523 * Common setup code for both startup command line and mconsole initialization. 524 * @lines contains the array (of size @num) to modify; 525 * @init is the setup string; 526 * @error_out is an error string in the case of failure; 527 */ 528 529 int line_setup(struct line *lines, unsigned int num, char *init, 530 char **error_out) 531 { 532 int i, n, err; 533 char *end; 534 535 if (*init == '=') { 536 /* 537 * We said con=/ssl= instead of con#=, so we are configuring all 538 * consoles at once. 539 */ 540 n = -1; 541 } 542 else { 543 n = simple_strtoul(init, &end, 0); 544 if (*end != '=') { 545 *error_out = "Couldn't parse device number"; 546 return -EINVAL; 547 } 548 init = end; 549 } 550 init++; 551 552 if (n >= (signed int) num) { 553 *error_out = "Device number out of range"; 554 return -EINVAL; 555 } 556 else if (n >= 0) { 557 err = setup_one_line(lines, n, init, INIT_ONE, error_out); 558 if (err) 559 return err; 560 } 561 else { 562 for(i = 0; i < num; i++) { 563 err = setup_one_line(lines, i, init, INIT_ALL, 564 error_out); 565 if (err) 566 return err; 567 } 568 } 569 return n == -1 ? num : n; 570 } 571 572 int line_config(struct line *lines, unsigned int num, char *str, 573 const struct chan_opts *opts, char **error_out) 574 { 575 struct line *line; 576 char *new; 577 int n; 578 579 if (*str == '=') { 580 *error_out = "Can't configure all devices from mconsole"; 581 return -EINVAL; 582 } 583 584 new = kstrdup(str, GFP_KERNEL); 585 if (new == NULL) { 586 *error_out = "Failed to allocate memory"; 587 return -ENOMEM; 588 } 589 n = line_setup(lines, num, new, error_out); 590 if (n < 0) 591 return n; 592 593 line = &lines[n]; 594 return parse_chan_pair(line->init_str, line, n, opts, error_out); 595 } 596 597 int line_get_config(char *name, struct line *lines, unsigned int num, char *str, 598 int size, char **error_out) 599 { 600 struct line *line; 601 char *end; 602 int dev, n = 0; 603 604 dev = simple_strtoul(name, &end, 0); 605 if ((*end != '\0') || (end == name)) { 606 *error_out = "line_get_config failed to parse device number"; 607 return 0; 608 } 609 610 if ((dev < 0) || (dev >= num)) { 611 *error_out = "device number out of range"; 612 return 0; 613 } 614 615 line = &lines[dev]; 616 617 spin_lock(&line->count_lock); 618 if (!line->valid) 619 CONFIG_CHUNK(str, size, n, "none", 1); 620 else if (line->tty == NULL) 621 CONFIG_CHUNK(str, size, n, line->init_str, 1); 622 else n = chan_config_string(&line->chan_list, str, size, error_out); 623 spin_unlock(&line->count_lock); 624 625 return n; 626 } 627 628 int line_id(char **str, int *start_out, int *end_out) 629 { 630 char *end; 631 int n; 632 633 n = simple_strtoul(*str, &end, 0); 634 if ((*end != '\0') || (end == *str)) 635 return -1; 636 637 *str = end; 638 *start_out = n; 639 *end_out = n; 640 return n; 641 } 642 643 int line_remove(struct line *lines, unsigned int num, int n, char **error_out) 644 { 645 int err; 646 char config[sizeof("conxxxx=none\0")]; 647 648 sprintf(config, "%d=none", n); 649 err = line_setup(lines, num, config, error_out); 650 if (err >= 0) 651 err = 0; 652 return err; 653 } 654 655 struct tty_driver *register_lines(struct line_driver *line_driver, 656 const struct tty_operations *ops, 657 struct line *lines, int nlines) 658 { 659 int i; 660 struct tty_driver *driver = alloc_tty_driver(nlines); 661 662 if (!driver) 663 return NULL; 664 665 driver->driver_name = line_driver->name; 666 driver->name = line_driver->device_name; 667 driver->major = line_driver->major; 668 driver->minor_start = line_driver->minor_start; 669 driver->type = line_driver->type; 670 driver->subtype = line_driver->subtype; 671 driver->flags = TTY_DRIVER_REAL_RAW; 672 driver->init_termios = tty_std_termios; 673 tty_set_operations(driver, ops); 674 675 if (tty_register_driver(driver)) { 676 printk(KERN_ERR "register_lines : can't register %s driver\n", 677 line_driver->name); 678 put_tty_driver(driver); 679 return NULL; 680 } 681 682 for(i = 0; i < nlines; i++) { 683 if (!lines[i].valid) 684 tty_unregister_device(driver, i); 685 } 686 687 mconsole_register_dev(&line_driver->mc); 688 return driver; 689 } 690 691 static DEFINE_SPINLOCK(winch_handler_lock); 692 static LIST_HEAD(winch_handlers); 693 694 void lines_init(struct line *lines, int nlines, struct chan_opts *opts) 695 { 696 struct line *line; 697 char *error; 698 int i; 699 700 for(i = 0; i < nlines; i++) { 701 line = &lines[i]; 702 INIT_LIST_HEAD(&line->chan_list); 703 704 if (line->init_str == NULL) 705 continue; 706 707 line->init_str = kstrdup(line->init_str, GFP_KERNEL); 708 if (line->init_str == NULL) 709 printk(KERN_ERR "lines_init - kstrdup returned NULL\n"); 710 711 if (parse_chan_pair(line->init_str, line, i, opts, &error)) { 712 printk(KERN_ERR "parse_chan_pair failed for " 713 "device %d : %s\n", i, error); 714 line->valid = 0; 715 } 716 } 717 } 718 719 struct winch { 720 struct list_head list; 721 int fd; 722 int tty_fd; 723 int pid; 724 struct tty_struct *tty; 725 unsigned long stack; 726 }; 727 728 static void free_winch(struct winch *winch, int free_irq_ok) 729 { 730 list_del(&winch->list); 731 732 if (winch->pid != -1) 733 os_kill_process(winch->pid, 1); 734 if (winch->fd != -1) 735 os_close_file(winch->fd); 736 if (winch->stack != 0) 737 free_stack(winch->stack, 0); 738 if (free_irq_ok) 739 free_irq(WINCH_IRQ, winch); 740 kfree(winch); 741 } 742 743 static irqreturn_t winch_interrupt(int irq, void *data) 744 { 745 struct winch *winch = data; 746 struct tty_struct *tty; 747 struct line *line; 748 int err; 749 char c; 750 751 if (winch->fd != -1) { 752 err = generic_read(winch->fd, &c, NULL); 753 if (err < 0) { 754 if (err != -EAGAIN) { 755 printk(KERN_ERR "winch_interrupt : " 756 "read failed, errno = %d\n", -err); 757 printk(KERN_ERR "fd %d is losing SIGWINCH " 758 "support\n", winch->tty_fd); 759 free_winch(winch, 0); 760 return IRQ_HANDLED; 761 } 762 goto out; 763 } 764 } 765 tty = winch->tty; 766 if (tty != NULL) { 767 line = tty->driver_data; 768 if (line != NULL) { 769 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 770 &tty->winsize.ws_col); 771 kill_pgrp(tty->pgrp, SIGWINCH, 1); 772 } 773 } 774 out: 775 if (winch->fd != -1) 776 reactivate_fd(winch->fd, WINCH_IRQ); 777 return IRQ_HANDLED; 778 } 779 780 void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, 781 unsigned long stack) 782 { 783 struct winch *winch; 784 785 winch = kmalloc(sizeof(*winch), GFP_KERNEL); 786 if (winch == NULL) { 787 printk(KERN_ERR "register_winch_irq - kmalloc failed\n"); 788 goto cleanup; 789 } 790 791 *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), 792 .fd = fd, 793 .tty_fd = tty_fd, 794 .pid = pid, 795 .tty = tty, 796 .stack = stack }); 797 798 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 799 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM, 800 "winch", winch) < 0) { 801 printk(KERN_ERR "register_winch_irq - failed to register " 802 "IRQ\n"); 803 goto out_free; 804 } 805 806 spin_lock(&winch_handler_lock); 807 list_add(&winch->list, &winch_handlers); 808 spin_unlock(&winch_handler_lock); 809 810 return; 811 812 out_free: 813 kfree(winch); 814 cleanup: 815 os_kill_process(pid, 1); 816 os_close_file(fd); 817 if (stack != 0) 818 free_stack(stack, 0); 819 } 820 821 static void unregister_winch(struct tty_struct *tty) 822 { 823 struct list_head *ele; 824 struct winch *winch; 825 826 spin_lock(&winch_handler_lock); 827 828 list_for_each(ele, &winch_handlers) { 829 winch = list_entry(ele, struct winch, list); 830 if (winch->tty == tty) { 831 free_winch(winch, 1); 832 break; 833 } 834 } 835 spin_unlock(&winch_handler_lock); 836 } 837 838 static void winch_cleanup(void) 839 { 840 struct list_head *ele, *next; 841 struct winch *winch; 842 843 spin_lock(&winch_handler_lock); 844 845 list_for_each_safe(ele, next, &winch_handlers) { 846 winch = list_entry(ele, struct winch, list); 847 free_winch(winch, 1); 848 } 849 850 spin_unlock(&winch_handler_lock); 851 } 852 __uml_exitcall(winch_cleanup); 853 854 char *add_xterm_umid(char *base) 855 { 856 char *umid, *title; 857 int len; 858 859 umid = get_umid(); 860 if (*umid == '\0') 861 return base; 862 863 len = strlen(base) + strlen(" ()") + strlen(umid) + 1; 864 title = kmalloc(len, GFP_KERNEL); 865 if (title == NULL) { 866 printk(KERN_ERR "Failed to allocate buffer for xterm title\n"); 867 return base; 868 } 869 870 snprintf(title, len, "%s (%s)", base, umid); 871 return title; 872 } 873