1 /************************************************************************ 2 * Copyright 2003 Digi International (www.digi.com) 3 * 4 * Copyright (C) 2004 IBM Corporation. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2, or (at your option) 9 * any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the 13 * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 14 * PURPOSE. See the GNU General Public License for more details. 15 * 16 * Contact Information: 17 * Scott H Kilau <Scott_Kilau@digi.com> 18 * Ananda Venkatarman <mansarov@us.ibm.com> 19 * Modifications: 20 * 01/19/06: changed jsm_input routine to use the dynamically allocated 21 * tty_buffer changes. Contributors: Scott Kilau and Ananda V. 22 ***********************************************************************/ 23 #include <linux/tty.h> 24 #include <linux/tty_flip.h> 25 #include <linux/serial_reg.h> 26 #include <linux/delay.h> /* For udelay */ 27 #include <linux/pci.h> 28 #include <linux/slab.h> 29 30 #include "jsm.h" 31 32 static DECLARE_BITMAP(linemap, MAXLINES); 33 34 static void jsm_carrier(struct jsm_channel *ch); 35 36 static inline int jsm_get_mstat(struct jsm_channel *ch) 37 { 38 unsigned char mstat; 39 unsigned result; 40 41 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n"); 42 43 mstat = (ch->ch_mostat | ch->ch_mistat); 44 45 result = 0; 46 47 if (mstat & UART_MCR_DTR) 48 result |= TIOCM_DTR; 49 if (mstat & UART_MCR_RTS) 50 result |= TIOCM_RTS; 51 if (mstat & UART_MSR_CTS) 52 result |= TIOCM_CTS; 53 if (mstat & UART_MSR_DSR) 54 result |= TIOCM_DSR; 55 if (mstat & UART_MSR_RI) 56 result |= TIOCM_RI; 57 if (mstat & UART_MSR_DCD) 58 result |= TIOCM_CD; 59 60 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); 61 return result; 62 } 63 64 static unsigned int jsm_tty_tx_empty(struct uart_port *port) 65 { 66 return TIOCSER_TEMT; 67 } 68 69 /* 70 * Return modem signals to ld. 71 */ 72 static unsigned int jsm_tty_get_mctrl(struct uart_port *port) 73 { 74 int result; 75 struct jsm_channel *channel = 76 container_of(port, struct jsm_channel, uart_port); 77 78 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 79 80 result = jsm_get_mstat(channel); 81 82 if (result < 0) 83 return -ENXIO; 84 85 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 86 87 return result; 88 } 89 90 /* 91 * jsm_set_modem_info() 92 * 93 * Set modem signals, called by ld. 94 */ 95 static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl) 96 { 97 struct jsm_channel *channel = 98 container_of(port, struct jsm_channel, uart_port); 99 100 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 101 102 if (mctrl & TIOCM_RTS) 103 channel->ch_mostat |= UART_MCR_RTS; 104 else 105 channel->ch_mostat &= ~UART_MCR_RTS; 106 107 if (mctrl & TIOCM_DTR) 108 channel->ch_mostat |= UART_MCR_DTR; 109 else 110 channel->ch_mostat &= ~UART_MCR_DTR; 111 112 channel->ch_bd->bd_ops->assert_modem_signals(channel); 113 114 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 115 udelay(10); 116 } 117 118 /* 119 * jsm_tty_write() 120 * 121 * Take data from the user or kernel and send it out to the FEP. 122 * In here exists all the Transparent Print magic as well. 123 */ 124 static void jsm_tty_write(struct uart_port *port) 125 { 126 struct jsm_channel *channel; 127 channel = container_of(port, struct jsm_channel, uart_port); 128 channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel); 129 } 130 131 static void jsm_tty_start_tx(struct uart_port *port) 132 { 133 struct jsm_channel *channel = 134 container_of(port, struct jsm_channel, uart_port); 135 136 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 137 138 channel->ch_flags &= ~(CH_STOP); 139 jsm_tty_write(port); 140 141 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 142 } 143 144 static void jsm_tty_stop_tx(struct uart_port *port) 145 { 146 struct jsm_channel *channel = 147 container_of(port, struct jsm_channel, uart_port); 148 149 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 150 151 channel->ch_flags |= (CH_STOP); 152 153 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 154 } 155 156 static void jsm_tty_send_xchar(struct uart_port *port, char ch) 157 { 158 unsigned long lock_flags; 159 struct jsm_channel *channel = 160 container_of(port, struct jsm_channel, uart_port); 161 struct ktermios *termios; 162 163 spin_lock_irqsave(&port->lock, lock_flags); 164 termios = &port->state->port.tty->termios; 165 if (ch == termios->c_cc[VSTART]) 166 channel->ch_bd->bd_ops->send_start_character(channel); 167 168 if (ch == termios->c_cc[VSTOP]) 169 channel->ch_bd->bd_ops->send_stop_character(channel); 170 spin_unlock_irqrestore(&port->lock, lock_flags); 171 } 172 173 static void jsm_tty_stop_rx(struct uart_port *port) 174 { 175 struct jsm_channel *channel = 176 container_of(port, struct jsm_channel, uart_port); 177 178 channel->ch_bd->bd_ops->disable_receiver(channel); 179 } 180 181 static void jsm_tty_break(struct uart_port *port, int break_state) 182 { 183 unsigned long lock_flags; 184 struct jsm_channel *channel = 185 container_of(port, struct jsm_channel, uart_port); 186 187 spin_lock_irqsave(&port->lock, lock_flags); 188 if (break_state == -1) 189 channel->ch_bd->bd_ops->send_break(channel); 190 else 191 channel->ch_bd->bd_ops->clear_break(channel); 192 193 spin_unlock_irqrestore(&port->lock, lock_flags); 194 } 195 196 static int jsm_tty_open(struct uart_port *port) 197 { 198 struct jsm_board *brd; 199 struct jsm_channel *channel = 200 container_of(port, struct jsm_channel, uart_port); 201 struct ktermios *termios; 202 203 /* Get board pointer from our array of majors we have allocated */ 204 brd = channel->ch_bd; 205 206 /* 207 * Allocate channel buffers for read/write/error. 208 * Set flag, so we don't get trounced on. 209 */ 210 channel->ch_flags |= (CH_OPENING); 211 212 /* Drop locks, as malloc with GFP_KERNEL can sleep */ 213 214 if (!channel->ch_rqueue) { 215 channel->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL); 216 if (!channel->ch_rqueue) { 217 jsm_dbg(INIT, &channel->ch_bd->pci_dev, 218 "unable to allocate read queue buf\n"); 219 return -ENOMEM; 220 } 221 } 222 if (!channel->ch_equeue) { 223 channel->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL); 224 if (!channel->ch_equeue) { 225 jsm_dbg(INIT, &channel->ch_bd->pci_dev, 226 "unable to allocate error queue buf\n"); 227 return -ENOMEM; 228 } 229 } 230 231 channel->ch_flags &= ~(CH_OPENING); 232 /* 233 * Initialize if neither terminal is open. 234 */ 235 jsm_dbg(OPEN, &channel->ch_bd->pci_dev, 236 "jsm_open: initializing channel in open...\n"); 237 238 /* 239 * Flush input queues. 240 */ 241 channel->ch_r_head = channel->ch_r_tail = 0; 242 channel->ch_e_head = channel->ch_e_tail = 0; 243 244 brd->bd_ops->flush_uart_write(channel); 245 brd->bd_ops->flush_uart_read(channel); 246 247 channel->ch_flags = 0; 248 channel->ch_cached_lsr = 0; 249 channel->ch_stops_sent = 0; 250 251 termios = &port->state->port.tty->termios; 252 channel->ch_c_cflag = termios->c_cflag; 253 channel->ch_c_iflag = termios->c_iflag; 254 channel->ch_c_oflag = termios->c_oflag; 255 channel->ch_c_lflag = termios->c_lflag; 256 channel->ch_startc = termios->c_cc[VSTART]; 257 channel->ch_stopc = termios->c_cc[VSTOP]; 258 259 /* Tell UART to init itself */ 260 brd->bd_ops->uart_init(channel); 261 262 /* 263 * Run param in case we changed anything 264 */ 265 brd->bd_ops->param(channel); 266 267 jsm_carrier(channel); 268 269 channel->ch_open_count++; 270 271 jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n"); 272 return 0; 273 } 274 275 static void jsm_tty_close(struct uart_port *port) 276 { 277 struct jsm_board *bd; 278 struct ktermios *ts; 279 struct jsm_channel *channel = 280 container_of(port, struct jsm_channel, uart_port); 281 282 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n"); 283 284 bd = channel->ch_bd; 285 ts = &port->state->port.tty->termios; 286 287 channel->ch_flags &= ~(CH_STOPI); 288 289 channel->ch_open_count--; 290 291 /* 292 * If we have HUPCL set, lower DTR and RTS 293 */ 294 if (channel->ch_c_cflag & HUPCL) { 295 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, 296 "Close. HUPCL set, dropping DTR/RTS\n"); 297 298 /* Drop RTS/DTR */ 299 channel->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS); 300 bd->bd_ops->assert_modem_signals(channel); 301 } 302 303 /* Turn off UART interrupts for this port */ 304 channel->ch_bd->bd_ops->uart_off(channel); 305 306 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "finish\n"); 307 } 308 309 static void jsm_tty_set_termios(struct uart_port *port, 310 struct ktermios *termios, 311 struct ktermios *old_termios) 312 { 313 unsigned long lock_flags; 314 struct jsm_channel *channel = 315 container_of(port, struct jsm_channel, uart_port); 316 317 spin_lock_irqsave(&port->lock, lock_flags); 318 channel->ch_c_cflag = termios->c_cflag; 319 channel->ch_c_iflag = termios->c_iflag; 320 channel->ch_c_oflag = termios->c_oflag; 321 channel->ch_c_lflag = termios->c_lflag; 322 channel->ch_startc = termios->c_cc[VSTART]; 323 channel->ch_stopc = termios->c_cc[VSTOP]; 324 325 channel->ch_bd->bd_ops->param(channel); 326 jsm_carrier(channel); 327 spin_unlock_irqrestore(&port->lock, lock_flags); 328 } 329 330 static const char *jsm_tty_type(struct uart_port *port) 331 { 332 return "jsm"; 333 } 334 335 static void jsm_tty_release_port(struct uart_port *port) 336 { 337 } 338 339 static int jsm_tty_request_port(struct uart_port *port) 340 { 341 return 0; 342 } 343 344 static void jsm_config_port(struct uart_port *port, int flags) 345 { 346 port->type = PORT_JSM; 347 } 348 349 static struct uart_ops jsm_ops = { 350 .tx_empty = jsm_tty_tx_empty, 351 .set_mctrl = jsm_tty_set_mctrl, 352 .get_mctrl = jsm_tty_get_mctrl, 353 .stop_tx = jsm_tty_stop_tx, 354 .start_tx = jsm_tty_start_tx, 355 .send_xchar = jsm_tty_send_xchar, 356 .stop_rx = jsm_tty_stop_rx, 357 .break_ctl = jsm_tty_break, 358 .startup = jsm_tty_open, 359 .shutdown = jsm_tty_close, 360 .set_termios = jsm_tty_set_termios, 361 .type = jsm_tty_type, 362 .release_port = jsm_tty_release_port, 363 .request_port = jsm_tty_request_port, 364 .config_port = jsm_config_port, 365 }; 366 367 /* 368 * jsm_tty_init() 369 * 370 * Init the tty subsystem. Called once per board after board has been 371 * downloaded and init'ed. 372 */ 373 int jsm_tty_init(struct jsm_board *brd) 374 { 375 int i; 376 void __iomem *vaddr; 377 struct jsm_channel *ch; 378 379 if (!brd) 380 return -ENXIO; 381 382 jsm_dbg(INIT, &brd->pci_dev, "start\n"); 383 384 /* 385 * Initialize board structure elements. 386 */ 387 388 brd->nasync = brd->maxports; 389 390 /* 391 * Allocate channel memory that might not have been allocated 392 * when the driver was first loaded. 393 */ 394 for (i = 0; i < brd->nasync; i++) { 395 if (!brd->channels[i]) { 396 397 /* 398 * Okay to malloc with GFP_KERNEL, we are not at 399 * interrupt context, and there are no locks held. 400 */ 401 brd->channels[i] = kzalloc(sizeof(struct jsm_channel), GFP_KERNEL); 402 if (!brd->channels[i]) { 403 jsm_dbg(CORE, &brd->pci_dev, 404 "%s:%d Unable to allocate memory for channel struct\n", 405 __FILE__, __LINE__); 406 } 407 } 408 } 409 410 ch = brd->channels[0]; 411 vaddr = brd->re_map_membase; 412 413 /* Set up channel variables */ 414 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { 415 416 if (!brd->channels[i]) 417 continue; 418 419 spin_lock_init(&ch->ch_lock); 420 421 if (brd->bd_uart_offset == 0x200) 422 ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); 423 else 424 ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i); 425 426 ch->ch_bd = brd; 427 ch->ch_portnum = i; 428 429 /* .25 second delay */ 430 ch->ch_close_delay = 250; 431 432 init_waitqueue_head(&ch->ch_flags_wait); 433 } 434 435 jsm_dbg(INIT, &brd->pci_dev, "finish\n"); 436 return 0; 437 } 438 439 int jsm_uart_port_init(struct jsm_board *brd) 440 { 441 int i, rc; 442 unsigned int line; 443 struct jsm_channel *ch; 444 445 if (!brd) 446 return -ENXIO; 447 448 jsm_dbg(INIT, &brd->pci_dev, "start\n"); 449 450 /* 451 * Initialize board structure elements. 452 */ 453 454 brd->nasync = brd->maxports; 455 456 /* Set up channel variables */ 457 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { 458 459 if (!brd->channels[i]) 460 continue; 461 462 brd->channels[i]->uart_port.irq = brd->irq; 463 brd->channels[i]->uart_port.uartclk = 14745600; 464 brd->channels[i]->uart_port.type = PORT_JSM; 465 brd->channels[i]->uart_port.iotype = UPIO_MEM; 466 brd->channels[i]->uart_port.membase = brd->re_map_membase; 467 brd->channels[i]->uart_port.fifosize = 16; 468 brd->channels[i]->uart_port.ops = &jsm_ops; 469 line = find_first_zero_bit(linemap, MAXLINES); 470 if (line >= MAXLINES) { 471 printk(KERN_INFO "jsm: linemap is full, added device failed\n"); 472 continue; 473 } else 474 set_bit(line, linemap); 475 brd->channels[i]->uart_port.line = line; 476 rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port); 477 if (rc){ 478 printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i); 479 return rc; 480 } 481 else 482 printk(KERN_INFO "jsm: Port %d added\n", i); 483 } 484 485 jsm_dbg(INIT, &brd->pci_dev, "finish\n"); 486 return 0; 487 } 488 489 int jsm_remove_uart_port(struct jsm_board *brd) 490 { 491 int i; 492 struct jsm_channel *ch; 493 494 if (!brd) 495 return -ENXIO; 496 497 jsm_dbg(INIT, &brd->pci_dev, "start\n"); 498 499 /* 500 * Initialize board structure elements. 501 */ 502 503 brd->nasync = brd->maxports; 504 505 /* Set up channel variables */ 506 for (i = 0; i < brd->nasync; i++) { 507 508 if (!brd->channels[i]) 509 continue; 510 511 ch = brd->channels[i]; 512 513 clear_bit(ch->uart_port.line, linemap); 514 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); 515 } 516 517 jsm_dbg(INIT, &brd->pci_dev, "finish\n"); 518 return 0; 519 } 520 521 void jsm_input(struct jsm_channel *ch) 522 { 523 struct jsm_board *bd; 524 struct tty_struct *tp; 525 struct tty_port *port; 526 u32 rmask; 527 u16 head; 528 u16 tail; 529 int data_len; 530 unsigned long lock_flags; 531 int len = 0; 532 int s = 0; 533 int i = 0; 534 535 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); 536 537 if (!ch) 538 return; 539 540 port = &ch->uart_port.state->port; 541 tp = port->tty; 542 543 bd = ch->ch_bd; 544 if(!bd) 545 return; 546 547 spin_lock_irqsave(&ch->ch_lock, lock_flags); 548 549 /* 550 *Figure the number of characters in the buffer. 551 *Exit immediately if none. 552 */ 553 554 rmask = RQUEUEMASK; 555 556 head = ch->ch_r_head & rmask; 557 tail = ch->ch_r_tail & rmask; 558 559 data_len = (head - tail) & rmask; 560 if (data_len == 0) { 561 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 562 return; 563 } 564 565 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); 566 567 /* 568 *If the device is not open, or CREAD is off, flush 569 *input data and return immediately. 570 */ 571 if (!tp || !C_CREAD(tp)) { 572 573 jsm_dbg(READ, &ch->ch_bd->pci_dev, 574 "input. dropping %d bytes on port %d...\n", 575 data_len, ch->ch_portnum); 576 ch->ch_r_head = tail; 577 578 /* Force queue flow control to be released, if needed */ 579 jsm_check_queue_flow_control(ch); 580 581 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 582 return; 583 } 584 585 /* 586 * If we are throttled, simply don't read any data. 587 */ 588 if (ch->ch_flags & CH_STOPI) { 589 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 590 jsm_dbg(READ, &ch->ch_bd->pci_dev, 591 "Port %d throttled, not reading any data. head: %x tail: %x\n", 592 ch->ch_portnum, head, tail); 593 return; 594 } 595 596 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n"); 597 598 len = tty_buffer_request_room(port, data_len); 599 600 /* 601 * len now contains the most amount of data we can copy, 602 * bounded either by the flip buffer size or the amount 603 * of data the card actually has pending... 604 */ 605 while (len) { 606 s = ((head >= tail) ? head : RQUEUESIZE) - tail; 607 s = min(s, len); 608 609 if (s <= 0) 610 break; 611 612 /* 613 * If conditions are such that ld needs to see all 614 * UART errors, we will have to walk each character 615 * and error byte and send them to the buffer one at 616 * a time. 617 */ 618 619 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { 620 for (i = 0; i < s; i++) { 621 /* 622 * Give the Linux ld the flags in the 623 * format it likes. 624 */ 625 if (*(ch->ch_equeue +tail +i) & UART_LSR_BI) 626 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_BREAK); 627 else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE) 628 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_PARITY); 629 else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE) 630 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_FRAME); 631 else 632 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_NORMAL); 633 } 634 } else { 635 tty_insert_flip_string(port, ch->ch_rqueue + tail, s); 636 } 637 tail += s; 638 len -= s; 639 /* Flip queue if needed */ 640 tail &= rmask; 641 } 642 643 ch->ch_r_tail = tail & rmask; 644 ch->ch_e_tail = tail & rmask; 645 jsm_check_queue_flow_control(ch); 646 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 647 648 /* Tell the tty layer its okay to "eat" the data now */ 649 tty_flip_buffer_push(port); 650 651 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); 652 } 653 654 static void jsm_carrier(struct jsm_channel *ch) 655 { 656 struct jsm_board *bd; 657 658 int virt_carrier = 0; 659 int phys_carrier = 0; 660 661 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "start\n"); 662 if (!ch) 663 return; 664 665 bd = ch->ch_bd; 666 667 if (!bd) 668 return; 669 670 if (ch->ch_mistat & UART_MSR_DCD) { 671 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "mistat: %x D_CD: %x\n", 672 ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD); 673 phys_carrier = 1; 674 } 675 676 if (ch->ch_c_cflag & CLOCAL) 677 virt_carrier = 1; 678 679 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "DCD: physical: %d virt: %d\n", 680 phys_carrier, virt_carrier); 681 682 /* 683 * Test for a VIRTUAL carrier transition to HIGH. 684 */ 685 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { 686 687 /* 688 * When carrier rises, wake any threads waiting 689 * for carrier in the open routine. 690 */ 691 692 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "carrier: virt DCD rose\n"); 693 694 if (waitqueue_active(&(ch->ch_flags_wait))) 695 wake_up_interruptible(&ch->ch_flags_wait); 696 } 697 698 /* 699 * Test for a PHYSICAL carrier transition to HIGH. 700 */ 701 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { 702 703 /* 704 * When carrier rises, wake any threads waiting 705 * for carrier in the open routine. 706 */ 707 708 jsm_dbg(CARR, &ch->ch_bd->pci_dev, 709 "carrier: physical DCD rose\n"); 710 711 if (waitqueue_active(&(ch->ch_flags_wait))) 712 wake_up_interruptible(&ch->ch_flags_wait); 713 } 714 715 /* 716 * Test for a PHYSICAL transition to low, so long as we aren't 717 * currently ignoring physical transitions (which is what "virtual 718 * carrier" indicates). 719 * 720 * The transition of the virtual carrier to low really doesn't 721 * matter... it really only means "ignore carrier state", not 722 * "make pretend that carrier is there". 723 */ 724 if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) 725 && (phys_carrier == 0)) { 726 /* 727 * When carrier drops: 728 * 729 * Drop carrier on all open units. 730 * 731 * Flush queues, waking up any task waiting in the 732 * line discipline. 733 * 734 * Send a hangup to the control terminal. 735 * 736 * Enable all select calls. 737 */ 738 if (waitqueue_active(&(ch->ch_flags_wait))) 739 wake_up_interruptible(&ch->ch_flags_wait); 740 } 741 742 /* 743 * Make sure that our cached values reflect the current reality. 744 */ 745 if (virt_carrier == 1) 746 ch->ch_flags |= CH_FCAR; 747 else 748 ch->ch_flags &= ~CH_FCAR; 749 750 if (phys_carrier == 1) 751 ch->ch_flags |= CH_CD; 752 else 753 ch->ch_flags &= ~CH_CD; 754 } 755 756 757 void jsm_check_queue_flow_control(struct jsm_channel *ch) 758 { 759 struct board_ops *bd_ops = ch->ch_bd->bd_ops; 760 int qleft; 761 762 /* Store how much space we have left in the queue */ 763 if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0) 764 qleft += RQUEUEMASK + 1; 765 766 /* 767 * Check to see if we should enforce flow control on our queue because 768 * the ld (or user) isn't reading data out of our queue fast enuf. 769 * 770 * NOTE: This is done based on what the current flow control of the 771 * port is set for. 772 * 773 * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt. 774 * This will cause the UART's FIFO to back up, and force 775 * the RTS signal to be dropped. 776 * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to 777 * the other side, in hopes it will stop sending data to us. 778 * 3) NONE - Nothing we can do. We will simply drop any extra data 779 * that gets sent into us when the queue fills up. 780 */ 781 if (qleft < 256) { 782 /* HWFLOW */ 783 if (ch->ch_c_cflag & CRTSCTS) { 784 if(!(ch->ch_flags & CH_RECEIVER_OFF)) { 785 bd_ops->disable_receiver(ch); 786 ch->ch_flags |= (CH_RECEIVER_OFF); 787 jsm_dbg(READ, &ch->ch_bd->pci_dev, 788 "Internal queue hit hilevel mark (%d)! Turning off interrupts\n", 789 qleft); 790 } 791 } 792 /* SWFLOW */ 793 else if (ch->ch_c_iflag & IXOFF) { 794 if (ch->ch_stops_sent <= MAX_STOPS_SENT) { 795 bd_ops->send_stop_character(ch); 796 ch->ch_stops_sent++; 797 jsm_dbg(READ, &ch->ch_bd->pci_dev, 798 "Sending stop char! Times sent: %x\n", 799 ch->ch_stops_sent); 800 } 801 } 802 } 803 804 /* 805 * Check to see if we should unenforce flow control because 806 * ld (or user) finally read enuf data out of our queue. 807 * 808 * NOTE: This is done based on what the current flow control of the 809 * port is set for. 810 * 811 * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt. 812 * This will cause the UART's FIFO to raise RTS back up, 813 * which will allow the other side to start sending data again. 814 * 2) SWFLOW (IXOFF) - Send a start character to 815 * the other side, so it will start sending data to us again. 816 * 3) NONE - Do nothing. Since we didn't do anything to turn off the 817 * other side, we don't need to do anything now. 818 */ 819 if (qleft > (RQUEUESIZE / 2)) { 820 /* HWFLOW */ 821 if (ch->ch_c_cflag & CRTSCTS) { 822 if (ch->ch_flags & CH_RECEIVER_OFF) { 823 bd_ops->enable_receiver(ch); 824 ch->ch_flags &= ~(CH_RECEIVER_OFF); 825 jsm_dbg(READ, &ch->ch_bd->pci_dev, 826 "Internal queue hit lowlevel mark (%d)! Turning on interrupts\n", 827 qleft); 828 } 829 } 830 /* SWFLOW */ 831 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { 832 ch->ch_stops_sent = 0; 833 bd_ops->send_start_character(ch); 834 jsm_dbg(READ, &ch->ch_bd->pci_dev, 835 "Sending start char!\n"); 836 } 837 } 838 } 839