1 /* 2 * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 * Licensed under the GPL 4 */ 5 6 #include "linux/irqreturn.h" 7 #include "linux/kd.h" 8 #include "chan_kern.h" 9 #include "irq_kern.h" 10 #include "irq_user.h" 11 #include "kern_util.h" 12 #include "os.h" 13 14 #define LINE_BUFSIZE 4096 15 16 static irqreturn_t line_interrupt(int irq, void *data) 17 { 18 struct chan *chan = data; 19 struct line *line = chan->line; 20 struct tty_struct *tty = line->tty; 21 22 if (line) 23 chan_interrupt(&line->chan_list, &line->task, tty, irq); 24 return IRQ_HANDLED; 25 } 26 27 static void line_timer_cb(struct work_struct *work) 28 { 29 struct line *line = container_of(work, struct line, task.work); 30 31 if (!line->throttled) 32 chan_interrupt(&line->chan_list, &line->task, line->tty, 33 line->driver->read_irq); 34 } 35 36 /* 37 * Returns the free space inside the ring buffer of this line. 38 * 39 * Should be called while holding line->lock (this does not modify data). 40 */ 41 static int write_room(struct line *line) 42 { 43 int n; 44 45 if (line->buffer == NULL) 46 return LINE_BUFSIZE - 1; 47 48 /* This is for the case where the buffer is wrapped! */ 49 n = line->head - line->tail; 50 51 if (n <= 0) 52 n += LINE_BUFSIZE; /* The other case */ 53 return n - 1; 54 } 55 56 int line_write_room(struct tty_struct *tty) 57 { 58 struct line *line = tty->driver_data; 59 unsigned long flags; 60 int room; 61 62 spin_lock_irqsave(&line->lock, flags); 63 room = write_room(line); 64 spin_unlock_irqrestore(&line->lock, flags); 65 66 return room; 67 } 68 69 int line_chars_in_buffer(struct tty_struct *tty) 70 { 71 struct line *line = tty->driver_data; 72 unsigned long flags; 73 int ret; 74 75 spin_lock_irqsave(&line->lock, flags); 76 /* write_room subtracts 1 for the needed NULL, so we readd it.*/ 77 ret = LINE_BUFSIZE - (write_room(line) + 1); 78 spin_unlock_irqrestore(&line->lock, flags); 79 80 return ret; 81 } 82 83 /* 84 * This copies the content of buf into the circular buffer associated with 85 * this line. 86 * The return value is the number of characters actually copied, i.e. the ones 87 * for which there was space: this function is not supposed to ever flush out 88 * the circular buffer. 89 * 90 * Must be called while holding line->lock! 91 */ 92 static int buffer_data(struct line *line, const char *buf, int len) 93 { 94 int end, room; 95 96 if (line->buffer == NULL) { 97 line->buffer = kmalloc(LINE_BUFSIZE, GFP_ATOMIC); 98 if (line->buffer == NULL) { 99 printk(KERN_ERR "buffer_data - atomic allocation " 100 "failed\n"); 101 return 0; 102 } 103 line->head = line->buffer; 104 line->tail = line->buffer; 105 } 106 107 room = write_room(line); 108 len = (len > room) ? room : len; 109 110 end = line->buffer + LINE_BUFSIZE - line->tail; 111 112 if (len < end) { 113 memcpy(line->tail, buf, len); 114 line->tail += len; 115 } 116 else { 117 /* The circular buffer is wrapping */ 118 memcpy(line->tail, buf, end); 119 buf += end; 120 memcpy(line->buffer, buf, len - end); 121 line->tail = line->buffer + len - end; 122 } 123 124 return len; 125 } 126 127 /* 128 * Flushes the ring buffer to the output channels. That is, write_chan is 129 * called, passing it line->head as buffer, and an appropriate count. 130 * 131 * On exit, returns 1 when the buffer is empty, 132 * 0 when the buffer is not empty on exit, 133 * and -errno when an error occurred. 134 * 135 * Must be called while holding line->lock!*/ 136 static int flush_buffer(struct line *line) 137 { 138 int n, count; 139 140 if ((line->buffer == NULL) || (line->head == line->tail)) 141 return 1; 142 143 if (line->tail < line->head) { 144 /* line->buffer + LINE_BUFSIZE is the end of the buffer! */ 145 count = line->buffer + LINE_BUFSIZE - line->head; 146 147 n = write_chan(&line->chan_list, line->head, count, 148 line->driver->write_irq); 149 if (n < 0) 150 return n; 151 if (n == count) { 152 /* 153 * We have flushed from ->head to buffer end, now we 154 * must flush only from the beginning to ->tail. 155 */ 156 line->head = line->buffer; 157 } else { 158 line->head += n; 159 return 0; 160 } 161 } 162 163 count = line->tail - line->head; 164 n = write_chan(&line->chan_list, line->head, count, 165 line->driver->write_irq); 166 167 if (n < 0) 168 return n; 169 170 line->head += n; 171 return line->head == line->tail; 172 } 173 174 void line_flush_buffer(struct tty_struct *tty) 175 { 176 struct line *line = tty->driver_data; 177 unsigned long flags; 178 int err; 179 180 spin_lock_irqsave(&line->lock, flags); 181 err = flush_buffer(line); 182 spin_unlock_irqrestore(&line->lock, flags); 183 } 184 185 /* 186 * We map both ->flush_chars and ->put_char (which go in pair) onto 187 * ->flush_buffer and ->write. Hope it's not that bad. 188 */ 189 void line_flush_chars(struct tty_struct *tty) 190 { 191 line_flush_buffer(tty); 192 } 193 194 int line_put_char(struct tty_struct *tty, unsigned char ch) 195 { 196 return line_write(tty, &ch, sizeof(ch)); 197 } 198 199 int line_write(struct tty_struct *tty, const unsigned char *buf, int len) 200 { 201 struct line *line = tty->driver_data; 202 unsigned long flags; 203 int n, ret = 0; 204 205 spin_lock_irqsave(&line->lock, flags); 206 if (line->head != line->tail) 207 ret = buffer_data(line, buf, len); 208 else { 209 n = write_chan(&line->chan_list, buf, len, 210 line->driver->write_irq); 211 if (n < 0) { 212 ret = n; 213 goto out_up; 214 } 215 216 len -= n; 217 ret += n; 218 if (len > 0) 219 ret += buffer_data(line, buf + n, len); 220 } 221 out_up: 222 spin_unlock_irqrestore(&line->lock, flags); 223 return ret; 224 } 225 226 void line_set_termios(struct tty_struct *tty, struct ktermios * old) 227 { 228 /* nothing */ 229 } 230 231 static const struct { 232 int cmd; 233 char *level; 234 char *name; 235 } tty_ioctls[] = { 236 /* don't print these, they flood the log ... */ 237 { TCGETS, NULL, "TCGETS" }, 238 { TCSETS, NULL, "TCSETS" }, 239 { TCSETSW, NULL, "TCSETSW" }, 240 { TCFLSH, NULL, "TCFLSH" }, 241 { TCSBRK, NULL, "TCSBRK" }, 242 243 /* general tty stuff */ 244 { TCSETSF, KERN_DEBUG, "TCSETSF" }, 245 { TCGETA, KERN_DEBUG, "TCGETA" }, 246 { TIOCMGET, KERN_DEBUG, "TIOCMGET" }, 247 { TCSBRKP, KERN_DEBUG, "TCSBRKP" }, 248 { TIOCMSET, KERN_DEBUG, "TIOCMSET" }, 249 250 /* linux-specific ones */ 251 { TIOCLINUX, KERN_INFO, "TIOCLINUX" }, 252 { KDGKBMODE, KERN_INFO, "KDGKBMODE" }, 253 { KDGKBTYPE, KERN_INFO, "KDGKBTYPE" }, 254 { KDSIGACCEPT, KERN_INFO, "KDSIGACCEPT" }, 255 }; 256 257 int line_ioctl(struct tty_struct *tty, struct file * file, 258 unsigned int cmd, unsigned long arg) 259 { 260 int ret; 261 int i; 262 263 ret = 0; 264 switch(cmd) { 265 #ifdef TIOCGETP 266 case TIOCGETP: 267 case TIOCSETP: 268 case TIOCSETN: 269 #endif 270 #ifdef TIOCGETC 271 case TIOCGETC: 272 case TIOCSETC: 273 #endif 274 #ifdef TIOCGLTC 275 case TIOCGLTC: 276 case TIOCSLTC: 277 #endif 278 case TCGETS: 279 case TCSETSF: 280 case TCSETSW: 281 case TCSETS: 282 case TCGETA: 283 case TCSETAF: 284 case TCSETAW: 285 case TCSETA: 286 case TCXONC: 287 case TCFLSH: 288 case TIOCOUTQ: 289 case TIOCINQ: 290 case TIOCGLCKTRMIOS: 291 case TIOCSLCKTRMIOS: 292 case TIOCPKT: 293 case TIOCGSOFTCAR: 294 case TIOCSSOFTCAR: 295 return -ENOIOCTLCMD; 296 #if 0 297 case TCwhatever: 298 /* do something */ 299 break; 300 #endif 301 default: 302 for (i = 0; i < ARRAY_SIZE(tty_ioctls); i++) 303 if (cmd == tty_ioctls[i].cmd) 304 break; 305 if (i == ARRAY_SIZE(tty_ioctls)) { 306 printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n", 307 __func__, tty->name, cmd); 308 } 309 ret = -ENOIOCTLCMD; 310 break; 311 } 312 return ret; 313 } 314 315 void line_throttle(struct tty_struct *tty) 316 { 317 struct line *line = tty->driver_data; 318 319 deactivate_chan(&line->chan_list, line->driver->read_irq); 320 line->throttled = 1; 321 } 322 323 void line_unthrottle(struct tty_struct *tty) 324 { 325 struct line *line = tty->driver_data; 326 327 line->throttled = 0; 328 chan_interrupt(&line->chan_list, &line->task, tty, 329 line->driver->read_irq); 330 331 /* 332 * Maybe there is enough stuff pending that calling the interrupt 333 * throttles us again. In this case, line->throttled will be 1 334 * again and we shouldn't turn the interrupt back on. 335 */ 336 if (!line->throttled) 337 reactivate_chan(&line->chan_list, line->driver->read_irq); 338 } 339 340 static irqreturn_t line_write_interrupt(int irq, void *data) 341 { 342 struct chan *chan = data; 343 struct line *line = chan->line; 344 struct tty_struct *tty = line->tty; 345 int err; 346 347 /* 348 * Interrupts are disabled here because we registered the interrupt with 349 * IRQF_DISABLED (see line_setup_irq). 350 */ 351 352 spin_lock(&line->lock); 353 err = flush_buffer(line); 354 if (err == 0) { 355 return IRQ_NONE; 356 } else if (err < 0) { 357 line->head = line->buffer; 358 line->tail = line->buffer; 359 } 360 spin_unlock(&line->lock); 361 362 if (tty == NULL) 363 return IRQ_NONE; 364 365 tty_wakeup(tty); 366 return IRQ_HANDLED; 367 } 368 369 int line_setup_irq(int fd, int input, int output, struct line *line, void *data) 370 { 371 const struct line_driver *driver = line->driver; 372 int err = 0, flags = IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM; 373 374 if (input) 375 err = um_request_irq(driver->read_irq, fd, IRQ_READ, 376 line_interrupt, flags, 377 driver->read_irq_name, data); 378 if (err) 379 return err; 380 if (output) 381 err = um_request_irq(driver->write_irq, fd, IRQ_WRITE, 382 line_write_interrupt, flags, 383 driver->write_irq_name, data); 384 line->have_irq = 1; 385 return err; 386 } 387 388 /* 389 * Normally, a driver like this can rely mostly on the tty layer 390 * locking, particularly when it comes to the driver structure. 391 * However, in this case, mconsole requests can come in "from the 392 * side", and race with opens and closes. 393 * 394 * mconsole config requests will want to be sure the device isn't in 395 * use, and get_config, open, and close will want a stable 396 * configuration. The checking and modification of the configuration 397 * is done under a spinlock. Checking whether the device is in use is 398 * line->tty->count > 1, also under the spinlock. 399 * 400 * tty->count serves to decide whether the device should be enabled or 401 * disabled on the host. If it's equal to 1, then we are doing the 402 * first open or last close. Otherwise, open and close just return. 403 */ 404 405 int line_open(struct line *lines, struct tty_struct *tty) 406 { 407 struct line *line = &lines[tty->index]; 408 int err = -ENODEV; 409 410 spin_lock(&line->count_lock); 411 if (!line->valid) 412 goto out_unlock; 413 414 err = 0; 415 if (tty->count > 1) 416 goto out_unlock; 417 418 spin_unlock(&line->count_lock); 419 420 tty->driver_data = line; 421 line->tty = tty; 422 423 err = enable_chan(line); 424 if (err) 425 return err; 426 427 INIT_DELAYED_WORK(&line->task, line_timer_cb); 428 429 if (!line->sigio) { 430 chan_enable_winch(&line->chan_list, tty); 431 line->sigio = 1; 432 } 433 434 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 435 &tty->winsize.ws_col); 436 437 return err; 438 439 out_unlock: 440 spin_unlock(&line->count_lock); 441 return err; 442 } 443 444 static void unregister_winch(struct tty_struct *tty); 445 446 void line_close(struct tty_struct *tty, struct file * filp) 447 { 448 struct line *line = tty->driver_data; 449 450 /* 451 * If line_open fails (and tty->driver_data is never set), 452 * tty_open will call line_close. So just return in this case. 453 */ 454 if (line == NULL) 455 return; 456 457 /* We ignore the error anyway! */ 458 flush_buffer(line); 459 460 spin_lock(&line->count_lock); 461 if (!line->valid) 462 goto out_unlock; 463 464 if (tty->count > 1) 465 goto out_unlock; 466 467 spin_unlock(&line->count_lock); 468 469 line->tty = NULL; 470 tty->driver_data = NULL; 471 472 if (line->sigio) { 473 unregister_winch(tty); 474 line->sigio = 0; 475 } 476 477 return; 478 479 out_unlock: 480 spin_unlock(&line->count_lock); 481 } 482 483 void close_lines(struct line *lines, int nlines) 484 { 485 int i; 486 487 for(i = 0; i < nlines; i++) 488 close_chan(&lines[i].chan_list, 0); 489 } 490 491 static int setup_one_line(struct line *lines, int n, char *init, int init_prio, 492 char **error_out) 493 { 494 struct line *line = &lines[n]; 495 int err = -EINVAL; 496 497 spin_lock(&line->count_lock); 498 499 if (line->tty != NULL) { 500 *error_out = "Device is already open"; 501 goto out; 502 } 503 504 if (line->init_pri <= init_prio) { 505 line->init_pri = init_prio; 506 if (!strcmp(init, "none")) 507 line->valid = 0; 508 else { 509 line->init_str = init; 510 line->valid = 1; 511 } 512 } 513 err = 0; 514 out: 515 spin_unlock(&line->count_lock); 516 return err; 517 } 518 519 /* 520 * Common setup code for both startup command line and mconsole initialization. 521 * @lines contains the array (of size @num) to modify; 522 * @init is the setup string; 523 * @error_out is an error string in the case of failure; 524 */ 525 526 int line_setup(struct line *lines, unsigned int num, char *init, 527 char **error_out) 528 { 529 int i, n, err; 530 char *end; 531 532 if (*init == '=') { 533 /* 534 * We said con=/ssl= instead of con#=, so we are configuring all 535 * consoles at once. 536 */ 537 n = -1; 538 } 539 else { 540 n = simple_strtoul(init, &end, 0); 541 if (*end != '=') { 542 *error_out = "Couldn't parse device number"; 543 return -EINVAL; 544 } 545 init = end; 546 } 547 init++; 548 549 if (n >= (signed int) num) { 550 *error_out = "Device number out of range"; 551 return -EINVAL; 552 } 553 else if (n >= 0) { 554 err = setup_one_line(lines, n, init, INIT_ONE, error_out); 555 if (err) 556 return err; 557 } 558 else { 559 for(i = 0; i < num; i++) { 560 err = setup_one_line(lines, i, init, INIT_ALL, 561 error_out); 562 if (err) 563 return err; 564 } 565 } 566 return n == -1 ? num : n; 567 } 568 569 int line_config(struct line *lines, unsigned int num, char *str, 570 const struct chan_opts *opts, char **error_out) 571 { 572 struct line *line; 573 char *new; 574 int n; 575 576 if (*str == '=') { 577 *error_out = "Can't configure all devices from mconsole"; 578 return -EINVAL; 579 } 580 581 new = kstrdup(str, GFP_KERNEL); 582 if (new == NULL) { 583 *error_out = "Failed to allocate memory"; 584 return -ENOMEM; 585 } 586 n = line_setup(lines, num, new, error_out); 587 if (n < 0) 588 return n; 589 590 line = &lines[n]; 591 return parse_chan_pair(line->init_str, line, n, opts, error_out); 592 } 593 594 int line_get_config(char *name, struct line *lines, unsigned int num, char *str, 595 int size, char **error_out) 596 { 597 struct line *line; 598 char *end; 599 int dev, n = 0; 600 601 dev = simple_strtoul(name, &end, 0); 602 if ((*end != '\0') || (end == name)) { 603 *error_out = "line_get_config failed to parse device number"; 604 return 0; 605 } 606 607 if ((dev < 0) || (dev >= num)) { 608 *error_out = "device number out of range"; 609 return 0; 610 } 611 612 line = &lines[dev]; 613 614 spin_lock(&line->count_lock); 615 if (!line->valid) 616 CONFIG_CHUNK(str, size, n, "none", 1); 617 else if (line->tty == NULL) 618 CONFIG_CHUNK(str, size, n, line->init_str, 1); 619 else n = chan_config_string(&line->chan_list, str, size, error_out); 620 spin_unlock(&line->count_lock); 621 622 return n; 623 } 624 625 int line_id(char **str, int *start_out, int *end_out) 626 { 627 char *end; 628 int n; 629 630 n = simple_strtoul(*str, &end, 0); 631 if ((*end != '\0') || (end == *str)) 632 return -1; 633 634 *str = end; 635 *start_out = n; 636 *end_out = n; 637 return n; 638 } 639 640 int line_remove(struct line *lines, unsigned int num, int n, char **error_out) 641 { 642 int err; 643 char config[sizeof("conxxxx=none\0")]; 644 645 sprintf(config, "%d=none", n); 646 err = line_setup(lines, num, config, error_out); 647 if (err >= 0) 648 err = 0; 649 return err; 650 } 651 652 struct tty_driver *register_lines(struct line_driver *line_driver, 653 const struct tty_operations *ops, 654 struct line *lines, int nlines) 655 { 656 int i; 657 struct tty_driver *driver = alloc_tty_driver(nlines); 658 659 if (!driver) 660 return NULL; 661 662 driver->driver_name = line_driver->name; 663 driver->name = line_driver->device_name; 664 driver->major = line_driver->major; 665 driver->minor_start = line_driver->minor_start; 666 driver->type = line_driver->type; 667 driver->subtype = line_driver->subtype; 668 driver->flags = TTY_DRIVER_REAL_RAW; 669 driver->init_termios = tty_std_termios; 670 tty_set_operations(driver, ops); 671 672 if (tty_register_driver(driver)) { 673 printk(KERN_ERR "register_lines : can't register %s driver\n", 674 line_driver->name); 675 put_tty_driver(driver); 676 return NULL; 677 } 678 679 for(i = 0; i < nlines; i++) { 680 if (!lines[i].valid) 681 tty_unregister_device(driver, i); 682 } 683 684 mconsole_register_dev(&line_driver->mc); 685 return driver; 686 } 687 688 static DEFINE_SPINLOCK(winch_handler_lock); 689 static LIST_HEAD(winch_handlers); 690 691 void lines_init(struct line *lines, int nlines, struct chan_opts *opts) 692 { 693 struct line *line; 694 char *error; 695 int i; 696 697 for(i = 0; i < nlines; i++) { 698 line = &lines[i]; 699 INIT_LIST_HEAD(&line->chan_list); 700 701 if (line->init_str == NULL) 702 continue; 703 704 line->init_str = kstrdup(line->init_str, GFP_KERNEL); 705 if (line->init_str == NULL) 706 printk(KERN_ERR "lines_init - kstrdup returned NULL\n"); 707 708 if (parse_chan_pair(line->init_str, line, i, opts, &error)) { 709 printk(KERN_ERR "parse_chan_pair failed for " 710 "device %d : %s\n", i, error); 711 line->valid = 0; 712 } 713 } 714 } 715 716 struct winch { 717 struct list_head list; 718 int fd; 719 int tty_fd; 720 int pid; 721 struct tty_struct *tty; 722 unsigned long stack; 723 }; 724 725 static void free_winch(struct winch *winch, int free_irq_ok) 726 { 727 list_del(&winch->list); 728 729 if (winch->pid != -1) 730 os_kill_process(winch->pid, 1); 731 if (winch->fd != -1) 732 os_close_file(winch->fd); 733 if (winch->stack != 0) 734 free_stack(winch->stack, 0); 735 if (free_irq_ok) 736 free_irq(WINCH_IRQ, winch); 737 kfree(winch); 738 } 739 740 static irqreturn_t winch_interrupt(int irq, void *data) 741 { 742 struct winch *winch = data; 743 struct tty_struct *tty; 744 struct line *line; 745 int err; 746 char c; 747 748 if (winch->fd != -1) { 749 err = generic_read(winch->fd, &c, NULL); 750 if (err < 0) { 751 if (err != -EAGAIN) { 752 printk(KERN_ERR "winch_interrupt : " 753 "read failed, errno = %d\n", -err); 754 printk(KERN_ERR "fd %d is losing SIGWINCH " 755 "support\n", winch->tty_fd); 756 free_winch(winch, 0); 757 return IRQ_HANDLED; 758 } 759 goto out; 760 } 761 } 762 tty = winch->tty; 763 if (tty != NULL) { 764 line = tty->driver_data; 765 if (line != NULL) { 766 chan_window_size(&line->chan_list, &tty->winsize.ws_row, 767 &tty->winsize.ws_col); 768 kill_pgrp(tty->pgrp, SIGWINCH, 1); 769 } 770 } 771 out: 772 if (winch->fd != -1) 773 reactivate_fd(winch->fd, WINCH_IRQ); 774 return IRQ_HANDLED; 775 } 776 777 void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty, 778 unsigned long stack) 779 { 780 struct winch *winch; 781 782 winch = kmalloc(sizeof(*winch), GFP_KERNEL); 783 if (winch == NULL) { 784 printk(KERN_ERR "register_winch_irq - kmalloc failed\n"); 785 goto cleanup; 786 } 787 788 *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list), 789 .fd = fd, 790 .tty_fd = tty_fd, 791 .pid = pid, 792 .tty = tty, 793 .stack = stack }); 794 795 if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt, 796 IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM, 797 "winch", winch) < 0) { 798 printk(KERN_ERR "register_winch_irq - failed to register " 799 "IRQ\n"); 800 goto out_free; 801 } 802 803 spin_lock(&winch_handler_lock); 804 list_add(&winch->list, &winch_handlers); 805 spin_unlock(&winch_handler_lock); 806 807 return; 808 809 out_free: 810 kfree(winch); 811 cleanup: 812 os_kill_process(pid, 1); 813 os_close_file(fd); 814 if (stack != 0) 815 free_stack(stack, 0); 816 } 817 818 static void unregister_winch(struct tty_struct *tty) 819 { 820 struct list_head *ele; 821 struct winch *winch; 822 823 spin_lock(&winch_handler_lock); 824 825 list_for_each(ele, &winch_handlers) { 826 winch = list_entry(ele, struct winch, list); 827 if (winch->tty == tty) { 828 free_winch(winch, 1); 829 break; 830 } 831 } 832 spin_unlock(&winch_handler_lock); 833 } 834 835 static void winch_cleanup(void) 836 { 837 struct list_head *ele, *next; 838 struct winch *winch; 839 840 spin_lock(&winch_handler_lock); 841 842 list_for_each_safe(ele, next, &winch_handlers) { 843 winch = list_entry(ele, struct winch, list); 844 free_winch(winch, 1); 845 } 846 847 spin_unlock(&winch_handler_lock); 848 } 849 __uml_exitcall(winch_cleanup); 850 851 char *add_xterm_umid(char *base) 852 { 853 char *umid, *title; 854 int len; 855 856 umid = get_umid(); 857 if (*umid == '\0') 858 return base; 859 860 len = strlen(base) + strlen(" ()") + strlen(umid) + 1; 861 title = kmalloc(len, GFP_KERNEL); 862 if (title == NULL) { 863 printk(KERN_ERR "Failed to allocate buffer for xterm title\n"); 864 return base; 865 } 866 867 snprintf(title, len, "%s (%s)", base, umid); 868 return title; 869 } 870