1 /* 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/irqreturn.h" 7 #include "linux/kd.h" 8 #include "chan_kern.h" 9 #include "irq_kern.h" 10 #include "irq_user.h" 11 #include "kern_util.h" 12 #include "os.h" 13 14 #define LINE_BUFSIZE 4096 15 16 static irqreturn_t line_interrupt(int irq, void *data) 17 { 18 struct chan *chan = data; 19 struct line *line = chan->line; 20 struct tty_struct *tty = line->tty; 21 22 if (line) 23 chan_interrupt(&line->chan_list, &line->task, tty, irq); 24 return IRQ_HANDLED; 25 } 26 27 static void line_timer_cb(struct work_struct *work) 28 { 29 struct line *line = container_of(work, struct line, task.work); 30 31 if (!line->throttled) 32 chan_interrupt(&line->chan_list, &line->task, line->tty, 33 line->driver->read_irq); 34 } 35 36 /* 37 * Returns the free space inside the ring buffer of this line. 38 * 39 * Should be called while holding line->lock (this does not modify data). 40 */ 41 static int write_room(struct line *line) 42 { 43 int n; 44 45 if (line->buffer == NULL) 46 return LINE_BUFSIZE - 1; 47 48 /* This is for the case where the buffer is wrapped! */ 49 n = line->head - line->tail; 50 51 if (n <= 0) 52 n += LINE_BUFSIZE; /* The other case */ 53 return n - 1; 54 } 55 56 int line_write_room(struct tty_struct *tty) 57 { 58 struct line *line = tty->driver_data; 59 unsigned long flags; 60 int room; 61 62 spin_lock_irqsave(&line->lock, flags); 63 room = write_room(line); 64 spin_unlock_irqrestore(&line->lock, flags); 65 66 return room; 67 } 68 69 int line_chars_in_buffer(struct tty_struct *tty) 70 { 71 struct line *line = tty->driver_data; 72 unsigned long flags; 73 int ret; 74 75 spin_lock_irqsave(&line->lock, flags); 76 /* write_room subtracts 1 for the needed NULL, so we readd it.*/ 77 ret = LINE_BUFSIZE - (write_room(line) + 1); 78 spin_unlock_irqrestore(&line->lock, flags); 79 80 return ret; 81 } 82 83 /* 84 * This copies the content of buf into the circular buffer associated with 85 * this line. 86 * The return value is the number of characters actually copied, i.e. the ones 87 * for which there was space: this function is not supposed to ever flush out 88 * the circular buffer. 89 * 90 * Must be called while holding line->lock! 91 */ 92 static int buffer_data(struct line *line, const char *buf, int len) 93 { 94 int end, room; 95 96 if (line->buffer == NULL) { 97 line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC); 98 if (line->buffer == NULL) { 99 printk(KERN_ERR "buffer_data - atomic allocation " 100 "failed\n"); 101 return 0; 102 } 103 line->head = line->buffer; 104 line->tail = line->buffer; 105 } 106 107 room = write_room(line); 108 len = (len > room) ? room : len; 109 110 end = line->buffer + LINE_BUFSIZE - line->tail; 111 112 if (len < end) { 113 memcpy(line->tail, buf, len); 114 line->tail += len; 115 } 116 else { 117 /* The circular buffer is wrapping */ 118 memcpy(line->tail, buf, end); 119 buf += end; 120 memcpy(line->buffer, buf, len - end); 121 line->tail = line->buffer + len - end; 122 } 123 124 return len; 125 } 126 127 /* 128 * Flushes the ring buffer to the output channels. That is, write_chan is 129 * called, passing it line->head as buffer, and an appropriate count. 130 * 131 * On exit, returns 1 when the buffer is empty, 132 * 0 when the buffer is not empty on exit, 133 * and -errno when an error occurred. 134 * 135 * Must be called while holding line->lock!*/ 136 static int flush_buffer(struct line *line) 137 { 138 int n, count; 139 140 if ((line->buffer == NULL) || (line->head == line->tail)) 141 return 1; 142 143 if (line->tail < line->head) { 144 /* line->buffer + LINE_BUFSIZE is the end of the buffer! */ 145 count = line->buffer + LINE_BUFSIZE - line->head; 146 147 n = write_chan(&line->chan_list, line->head, count, 148 line->driver->write_irq); 149 if (n < 0) 150 return n; 151 if (n == count) { 152 /* 153 * We have flushed from ->head to buffer end, now we 154 * must flush only from the beginning to ->tail. 155 */ 156 line->head = line->buffer; 157 } else { 158 line->head += n; 159 return 0; 160 } 161 } 162 163 count = line->tail - line->head; 164 n = write_chan(&line->chan_list, line->head, count, 165 line->driver->write_irq); 166 167 if (n < 0) 168 return n; 169 170 line->head += n; 171 return line->head == line->tail; 172 } 173 174 void line_flush_buffer(struct tty_struct *tty) 175 { 176 struct line *line = tty->driver_data; 177 unsigned long flags; 178 int err; 179 180 spin_lock_irqsave(&line->lock, flags); 181 err = flush_buffer(line); 182 spin_unlock_irqrestore(&line->lock, flags); 183 } 184 185 /* 186 * We map both ->flush_chars and ->put_char (which go in pair) onto 187 * ->flush_buffer and ->write. Hope it's not that bad. 188 */ 189 void line_flush_chars(struct tty_struct *tty) 190 { 191 line_flush_buffer(tty); 192 } 193 194 int line_put_char(struct tty_struct *tty, unsigned char ch) 195 { 196 return line_write(tty, &ch, sizeof(ch)); 197 } 198 199 int line_write(struct tty_struct *tty, const unsigned char *buf, int len) 200 { 201 struct line *line = tty->driver_data; 202 unsigned long flags; 203 int n, ret = 0; 204 205 spin_lock_irqsave(&line->lock, flags); 206 if (line->head != line->tail) 207 ret = buffer_data(line, buf, len); 208 else { 209 n = write_chan(&line->chan_list, buf, len, 210 line->driver->write_irq); 211 if (n < 0) { 212 ret = n; 213 goto out_up; 214 } 215 216 len -= n; 217 ret += n; 218 if (len > 0) 219 ret += buffer_data(line, buf + n, len); 220 } 221 out_up: 222 spin_unlock_irqrestore(&line->lock, flags); 223 return ret; 224 } 225 226 void line_set_termios(struct tty_struct *tty, struct ktermios * old) 227 { 228 /* nothing */ 229 } 230 231 static const struct { 232 int cmd; 233 char *level; 234 char *name; 235 } tty_ioctls[] = { 236 /* don't print these, they flood the log ... */ 237 { TCGETS, NULL, "TCGETS" }, 238 { TCSETS, NULL, "TCSETS" }, 239 { TCSETSW, NULL, "TCSETSW" }, 240 { TCFLSH, NULL, "TCFLSH" }, 241 { TCSBRK, NULL, "TCSBRK" }, 242 243 /* general tty stuff */ 244 { TCSETSF, KERN_DEBUG, "TCSETSF" }, 245 { TCGETA, KERN_DEBUG, "TCGETA" }, 246 { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, 247 { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, 248 { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, 249 250 /* linux-specific ones */ 251 { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, 252 { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, 253 { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, 254 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, 255 }; 256 257 int line_ioctl(struct tty_struct *tty, struct file * file, 258 unsigned int cmd, unsigned long arg) 259 { 260 int ret; 261 int i; 262 263 ret = 0; 264 switch(cmd) { 265 #ifdef TIOCGETP 266 case TIOCGETP: 267 case TIOCSETP: 268 case TIOCSETN: 269 #endif 270 #ifdef TIOCGETC 271 case TIOCGETC: 272 case TIOCSETC: 273 #endif 274 #ifdef TIOCGLTC 275 case TIOCGLTC: 276 case TIOCSLTC: 277 #endif 278 /* Note: these are out of date as we now have TCGETS2 etc but this 279 whole lot should probably go away */ 280 case TCGETS: 281 case TCSETSF: 282 case TCSETSW: 283 case TCSETS: 284 case TCGETA: 285 case TCSETAF: 286 case TCSETAW: 287 case TCSETA: 288 case TCXONC: 289 case TCFLSH: 290 case TIOCOUTQ: 291 case TIOCINQ: 292 case TIOCGLCKTRMIOS: 293 case TIOCSLCKTRMIOS: 294 case TIOCPKT: 295 case TIOCGSOFTCAR: 296 case TIOCSSOFTCAR: 297 return -ENOIOCTLCMD; 298 #if 0 299 case TCwhatever: 300 /* do something */ 301 break; 302 #endif 303 default: 304 for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) 305 if (cmd == tty_ioctls[i].cmd) 306 break; 307 if (i == ARRAY_SIZE(tty_ioctls)) { 308 printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", 309 __func__, tty->name, cmd); 310 } 311 ret = -ENOIOCTLCMD; 312 break; 313 } 314 return ret; 315 } 316 317 void line_throttle(struct tty_struct *tty) 318 { 319 struct line *line = tty->driver_data; 320 321 deactivate_chan(&line->chan_list, line->driver->read_irq); 322 line->throttled = 1; 323 } 324 325 void line_unthrottle(struct tty_struct *tty) 326 { 327 struct line *line = tty->driver_data; 328 329 line->throttled = 0; 330 chan_interrupt(&line->chan_list, &line->task, tty, 331 line->driver->read_irq); 332 333 /* 334 * Maybe there is enough stuff pending that calling the interrupt 335 * throttles us again. In this case, line->throttled will be 1 336 * again and we shouldn't turn the interrupt back on. 337 */ 338 if (!line->throttled) 339 reactivate_chan(&line->chan_list, line->driver->read_irq); 340 } 341 342 static irqreturn_t line_write_interrupt(int irq, void *data) 343 { 344 struct chan *chan = data; 345 struct line *line = chan->line; 346 struct tty_struct *tty = line->tty; 347 int err; 348 349 /* 350 * Interrupts are disabled here because we registered the interrupt with 351 * IRQF_DISABLED (see line_setup_irq). 352 */ 353 354 spin_lock(&line->lock); 355 err = flush_buffer(line); 356 if (err == 0) { 357 return IRQ_NONE; 358 } else if (err < 0) { 359 line->head = line->buffer; 360 line->tail = line->buffer; 361 } 362 spin_unlock(&line->lock); 363 364 if (tty == NULL) 365 return IRQ_NONE; 366 367 tty_wakeup(tty); 368 return IRQ_HANDLED; 369 } 370 371 int line_setup_irq(int fd, int input, int output, struct line *line, void *data) 372 { 373 const struct line_driver *driver = line->driver; 374 int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM; 375 376 if (input) 377 err = um_request_irq(driver->read_irq, fd, IRQ_READ, 378 line_interrupt, flags, 379 driver->read_irq_name, data); 380 if (err) 381 return err; 382 if (output) 383 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, 384 line_write_interrupt, flags, 385 driver->write_irq_name, data); 386 line->have_irq = 1; 387 return err; 388 } 389 390 /* 391 * Normally, a driver like this can rely mostly on the tty layer 392 * locking, particularly when it comes to the driver structure. 393 * However, in this case, mconsole requests can come in "from the 394 * side", and race with opens and closes. 395 * 396 * mconsole config requests will want to be sure the device isn't in 397 * use, and get_config, open, and close will want a stable 398 * configuration. The checking and modification of the configuration 399 * is done under a spinlock. Checking whether the device is in use is 400 * line->tty->count > 1, also under the spinlock. 401 * 402 * tty->count serves to decide whether the device should be enabled or 403 * disabled on the host. If it's equal to 1, then we are doing the 404 * first open or last close. Otherwise, open and close just return. 405 */ 406 407 int line_open(struct line *lines, struct tty_struct *tty) 408 { 409 struct line *line = &lines[tty->index]; 410 int err = -ENODEV; 411 412 spin_lock(&line->count_lock); 413 if (!line->valid) 414 goto out_unlock; 415 416 err = 0; 417 if (tty->count > 1) 418 goto out_unlock; 419 420 spin_unlock(&line->count_lock); 421 422 tty->driver_data = line; 423 line->tty = tty; 424 425 err = enable_chan(line); 426 if (err) 427 return err; 428 429 INIT_DELAYED_WORK(&line->task, line_timer_cb); 430 431 if (!line->sigio) { 432 chan_enable_winch(&line->chan_list, tty); 433 line->sigio = 1; 434 } 435 436 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 437 &tty->winsize.ws_col); 438 439 return err; 440 441 out_unlock: 442 spin_unlock(&line->count_lock); 443 return err; 444 } 445 446 static void unregister_winch(struct tty_struct *tty); 447 448 void line_close(struct tty_struct *tty, struct file * filp) 449 { 450 struct line *line = tty->driver_data; 451 452 /* 453 * If line_open fails (and tty->driver_data is never set), 454 * tty_open will call line_close. So just return in this case. 455 */ 456 if (line == NULL) 457 return; 458 459 /* We ignore the error anyway! */ 460 flush_buffer(line); 461 462 spin_lock(&line->count_lock); 463 if (!line->valid) 464 goto out_unlock; 465 466 if (tty->count > 1) 467 goto out_unlock; 468 469 spin_unlock(&line->count_lock); 470 471 line->tty = NULL; 472 tty->driver_data = NULL; 473 474 if (line->sigio) { 475 unregister_winch(tty); 476 line->sigio = 0; 477 } 478 479 return; 480 481 out_unlock: 482 spin_unlock(&line->count_lock); 483 } 484 485 void close_lines(struct line *lines, int nlines) 486 { 487 int i; 488 489 for(i = 0; i < nlines; i++) 490 close_chan(&lines[i].chan_list, 0); 491 } 492 493 static int setup_one_line(struct line *lines, int n, char *init, int init_prio, 494 char **error_out) 495 { 496 struct line *line = &lines[n]; 497 int err = -EINVAL; 498 499 spin_lock(&line->count_lock); 500 501 if (line->tty != NULL) { 502 *error_out = "Device is already open"; 503 goto out; 504 } 505 506 if (line->init_pri <= init_prio) { 507 line->init_pri = init_prio; 508 if (!strcmp(init, "none")) 509 line->valid = 0; 510 else { 511 line->init_str = init; 512 line->valid = 1; 513 } 514 } 515 err = 0; 516 out: 517 spin_unlock(&line->count_lock); 518 return err; 519 } 520 521 /* 522 * Common setup code for both startup command line and mconsole initialization. 523 * @lines contains the array (of size @num) to modify; 524 * @init is the setup string; 525 * @error_out is an error string in the case of failure; 526 */ 527 528 int line_setup(struct line *lines, unsigned int num, char *init, 529 char **error_out) 530 { 531 int i, n, err; 532 char *end; 533 534 if (*init == '=') { 535 /* 536 * We said con=/ssl= instead of con#=, so we are configuring all 537 * consoles at once. 538 */ 539 n = -1; 540 } 541 else { 542 n = simple_strtoul(init, &end, 0); 543 if (*end != '=') { 544 *error_out = "Couldn't parse device number"; 545 return -EINVAL; 546 } 547 init = end; 548 } 549 init++; 550 551 if (n >= (signed int) num) { 552 *error_out = "Device number out of range"; 553 return -EINVAL; 554 } 555 else if (n >= 0) { 556 err = setup_one_line(lines, n, init, INIT_ONE, error_out); 557 if (err) 558 return err; 559 } 560 else { 561 for(i = 0; i < num; i++) { 562 err = setup_one_line(lines, i, init, INIT_ALL, 563 error_out); 564 if (err) 565 return err; 566 } 567 } 568 return n == -1 ? num : n; 569 } 570 571 int line_config(struct line *lines, unsigned int num, char *str, 572 const struct chan_opts *opts, char **error_out) 573 { 574 struct line *line; 575 char *new; 576 int n; 577 578 if (*str == '=') { 579 *error_out = "Can't configure all devices from mconsole"; 580 return -EINVAL; 581 } 582 583 new = kstrdup(str, GFP_KERNEL); 584 if (new == NULL) { 585 *error_out = "Failed to allocate memory"; 586 return -ENOMEM; 587 } 588 n = line_setup(lines, num, new, error_out); 589 if (n < 0) 590 return n; 591 592 line = &lines[n]; 593 return parse_chan_pair(line->init_str, line, n, opts, error_out); 594 } 595 596 int line_get_config(char *name, struct line *lines, unsigned int num, char *str, 597 int size, char **error_out) 598 { 599 struct line *line; 600 char *end; 601 int dev, n = 0; 602 603 dev = simple_strtoul(name, &end, 0); 604 if ((*end != '\0') || (end == name)) { 605 *error_out = "line_get_config failed to parse device number"; 606 return 0; 607 } 608 609 if ((dev < 0) || (dev >= num)) { 610 *error_out = "device number out of range"; 611 return 0; 612 } 613 614 line = &lines[dev]; 615 616 spin_lock(&line->count_lock); 617 if (!line->valid) 618 CONFIG_CHUNK(str, size, n, "none", 1); 619 else if (line->tty == NULL) 620 CONFIG_CHUNK(str, size, n, line->init_str, 1); 621 else n = chan_config_string(&line->chan_list, str, size, error_out); 622 spin_unlock(&line->count_lock); 623 624 return n; 625 } 626 627 int line_id(char **str, int *start_out, int *end_out) 628 { 629 char *end; 630 int n; 631 632 n = simple_strtoul(*str, &end, 0); 633 if ((*end != '\0') || (end == *str)) 634 return -1; 635 636 *str = end; 637 *start_out = n; 638 *end_out = n; 639 return n; 640 } 641 642 int line_remove(struct line *lines, unsigned int num, int n, char **error_out) 643 { 644 int err; 645 char config[sizeof("conxxxx=none\0")]; 646 647 sprintf(config, "%d=none", n); 648 err = line_setup(lines, num, config, error_out); 649 if (err >= 0) 650 err = 0; 651 return err; 652 } 653 654 struct tty_driver *register_lines(struct line_driver *line_driver, 655 const struct tty_operations *ops, 656 struct line *lines, int nlines) 657 { 658 int i; 659 struct tty_driver *driver = alloc_tty_driver(nlines); 660 661 if (!driver) 662 return NULL; 663 664 driver->driver_name = line_driver->name; 665 driver->name = line_driver->device_name; 666 driver->major = line_driver->major; 667 driver->minor_start = line_driver->minor_start; 668 driver->type = line_driver->type; 669 driver->subtype = line_driver->subtype; 670 driver->flags = TTY_DRIVER_REAL_RAW; 671 driver->init_termios = tty_std_termios; 672 tty_set_operations(driver, ops); 673 674 if (tty_register_driver(driver)) { 675 printk(KERN_ERR "register_lines : can't register %s driver\n", 676 line_driver->name); 677 put_tty_driver(driver); 678 return NULL; 679 } 680 681 for(i = 0; i < nlines; i++) { 682 if (!lines[i].valid) 683 tty_unregister_device(driver, i); 684 } 685 686 mconsole_register_dev(&line_driver->mc); 687 return driver; 688 } 689 690 static DEFINE_SPINLOCK(winch_handler_lock); 691 static LIST_HEAD(winch_handlers); 692 693 void lines_init(struct line *lines, int nlines, struct chan_opts *opts) 694 { 695 struct line *line; 696 char *error; 697 int i; 698 699 for(i = 0; i < nlines; i++) { 700 line = &lines[i]; 701 INIT_LIST_HEAD(&line->chan_list); 702 703 if (line->init_str == NULL) 704 continue; 705 706 line->init_str = kstrdup(line->init_str, GFP_KERNEL); 707 if (line->init_str == NULL) 708 printk(KERN_ERR "lines_init - kstrdup returned NULL\n"); 709 710 if (parse_chan_pair(line->init_str, line, i, opts, &error)) { 711 printk(KERN_ERR "parse_chan_pair failed for " 712 "device %d : %s\n", i, error); 713 line->valid = 0; 714 } 715 } 716 } 717 718 struct winch { 719 struct list_head list; 720 int fd; 721 int tty_fd; 722 int pid; 723 struct tty_struct *tty; 724 unsigned long stack; 725 }; 726 727 static void free_winch(struct winch *winch, int free_irq_ok) 728 { 729 list_del(&winch->list); 730 731 if (winch->pid != -1) 732 os_kill_process(winch->pid, 1); 733 if (winch->fd != -1) 734 os_close_file(winch->fd); 735 if (winch->stack != 0) 736 free_stack(winch->stack, 0); 737 if (free_irq_ok) 738 free_irq(WINCH_IRQ, winch); 739 kfree(winch); 740 } 741 742 static irqreturn_t winch_interrupt(int irq, void *data) 743 { 744 struct winch *winch = data; 745 struct tty_struct *tty; 746 struct line *line; 747 int err; 748 char c; 749 750 if (winch->fd != -1) { 751 err = generic_read(winch->fd, &c, NULL); 752 if (err < 0) { 753 if (err != -EAGAIN) { 754 printk(KERN_ERR "winch_interrupt : " 755 "read failed, errno = %d\n", -err); 756 printk(KERN_ERR "fd %d is losing SIGWINCH " 757 "support\n", winch->tty_fd); 758 free_winch(winch, 0); 759 return IRQ_HANDLED; 760 } 761 goto out; 762 } 763 } 764 tty = winch->tty; 765 if (tty != NULL) { 766 line = tty->driver_data; 767 if (line != NULL) { 768 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 769 &tty->winsize.ws_col); 770 kill_pgrp(tty->pgrp, SIGWINCH, 1); 771 } 772 } 773 out: 774 if (winch->fd != -1) 775 reactivate_fd(winch->fd, WINCH_IRQ); 776 return IRQ_HANDLED; 777 } 778 779 void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, 780 unsigned long stack) 781 { 782 struct winch *winch; 783 784 winch = kmalloc(sizeof(*winch), GFP_KERNEL); 785 if (winch == NULL) { 786 printk(KERN_ERR "register_winch_irq - kmalloc failed\n"); 787 goto cleanup; 788 } 789 790 *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), 791 .fd = fd, 792 .tty_fd = tty_fd, 793 .pid = pid, 794 .tty = tty, 795 .stack = stack }); 796 797 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 798 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM, 799 "winch", winch) < 0) { 800 printk(KERN_ERR "register_winch_irq - failed to register " 801 "IRQ\n"); 802 goto out_free; 803 } 804 805 spin_lock(&winch_handler_lock); 806 list_add(&winch->list, &winch_handlers); 807 spin_unlock(&winch_handler_lock); 808 809 return; 810 811 out_free: 812 kfree(winch); 813 cleanup: 814 os_kill_process(pid, 1); 815 os_close_file(fd); 816 if (stack != 0) 817 free_stack(stack, 0); 818 } 819 820 static void unregister_winch(struct tty_struct *tty) 821 { 822 struct list_head *ele; 823 struct winch *winch; 824 825 spin_lock(&winch_handler_lock); 826 827 list_for_each(ele, &winch_handlers) { 828 winch = list_entry(ele, struct winch, list); 829 if (winch->tty == tty) { 830 free_winch(winch, 1); 831 break; 832 } 833 } 834 spin_unlock(&winch_handler_lock); 835 } 836 837 static void winch_cleanup(void) 838 { 839 struct list_head *ele, *next; 840 struct winch *winch; 841 842 spin_lock(&winch_handler_lock); 843 844 list_for_each_safe(ele, next, &winch_handlers) { 845 winch = list_entry(ele, struct winch, list); 846 free_winch(winch, 1); 847 } 848 849 spin_unlock(&winch_handler_lock); 850 } 851 __uml_exitcall(winch_cleanup); 852 853 char *add_xterm_umid(char *base) 854 { 855 char *umid, *title; 856 int len; 857 858 umid = get_umid(); 859 if (*umid == '\0') 860 return base; 861 862 len = strlen(base) + strlen(" ()") + strlen(umid) + 1; 863 title = kmalloc(len, GFP_KERNEL); 864 if (title == NULL) { 865 printk(KERN_ERR "Failed to allocate buffer for xterm title\n"); 866 return base; 867 } 868 869 snprintf(title, len, "%s (%s)", base, umid); 870 return title; 871 } 872