1 // SPDX-License-Identifier: GPL-2.0+ 2 /************************************************************************ 3 * Copyright 2003 Digi International (www.digi.com) 4 * 5 * Copyright (C) 2004 IBM Corporation. All rights reserved. 6 * 7 * Contact Information: 8 * Scott H Kilau <Scott_Kilau@digi.com> 9 * Ananda Venkatarman <mansarov@us.ibm.com> 10 * Modifications: 11 * 01/19/06: changed jsm_input routine to use the dynamically allocated 12 * tty_buffer changes. Contributors: Scott Kilau and Ananda V. 13 ***********************************************************************/ 14 #include <linux/tty.h> 15 #include <linux/tty_flip.h> 16 #include <linux/serial_reg.h> 17 #include <linux/delay.h> /* For udelay */ 18 #include <linux/pci.h> 19 #include <linux/slab.h> 20 21 #include "jsm.h" 22 23 static DECLARE_BITMAP(linemap, MAXLINES); 24 25 static void jsm_carrier(struct jsm_channel *ch); 26 27 static inline int jsm_get_mstat(struct jsm_channel *ch) 28 { 29 unsigned char mstat; 30 int result; 31 32 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "start\n"); 33 34 mstat = (ch->ch_mostat | ch->ch_mistat); 35 36 result = 0; 37 38 if (mstat & UART_MCR_DTR) 39 result |= TIOCM_DTR; 40 if (mstat & UART_MCR_RTS) 41 result |= TIOCM_RTS; 42 if (mstat & UART_MSR_CTS) 43 result |= TIOCM_CTS; 44 if (mstat & UART_MSR_DSR) 45 result |= TIOCM_DSR; 46 if (mstat & UART_MSR_RI) 47 result |= TIOCM_RI; 48 if (mstat & UART_MSR_DCD) 49 result |= TIOCM_CD; 50 51 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); 52 return result; 53 } 54 55 static unsigned int jsm_tty_tx_empty(struct uart_port *port) 56 { 57 return TIOCSER_TEMT; 58 } 59 60 /* 61 * Return modem signals to ld. 62 */ 63 static unsigned int jsm_tty_get_mctrl(struct uart_port *port) 64 { 65 int result; 66 struct jsm_channel *channel = 67 container_of(port, struct jsm_channel, uart_port); 68 69 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 70 71 result = jsm_get_mstat(channel); 72 73 if (result < 0) 74 return -ENXIO; 75 76 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 77 78 return result; 79 } 80 81 /* 82 * jsm_set_modem_info() 83 * 84 * Set modem signals, called by ld. 85 */ 86 static void jsm_tty_set_mctrl(struct uart_port *port, unsigned int mctrl) 87 { 88 struct jsm_channel *channel = 89 container_of(port, struct jsm_channel, uart_port); 90 91 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 92 93 if (mctrl & TIOCM_RTS) 94 channel->ch_mostat |= UART_MCR_RTS; 95 else 96 channel->ch_mostat &= ~UART_MCR_RTS; 97 98 if (mctrl & TIOCM_DTR) 99 channel->ch_mostat |= UART_MCR_DTR; 100 else 101 channel->ch_mostat &= ~UART_MCR_DTR; 102 103 channel->ch_bd->bd_ops->assert_modem_signals(channel); 104 105 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 106 udelay(10); 107 } 108 109 /* 110 * jsm_tty_write() 111 * 112 * Take data from the user or kernel and send it out to the FEP. 113 * In here exists all the Transparent Print magic as well. 114 */ 115 static void jsm_tty_write(struct uart_port *port) 116 { 117 struct jsm_channel *channel; 118 119 channel = container_of(port, struct jsm_channel, uart_port); 120 channel->ch_bd->bd_ops->copy_data_from_queue_to_uart(channel); 121 } 122 123 static void jsm_tty_start_tx(struct uart_port *port) 124 { 125 struct jsm_channel *channel = 126 container_of(port, struct jsm_channel, uart_port); 127 128 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 129 130 channel->ch_flags &= ~(CH_STOP); 131 jsm_tty_write(port); 132 133 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 134 } 135 136 static void jsm_tty_stop_tx(struct uart_port *port) 137 { 138 struct jsm_channel *channel = 139 container_of(port, struct jsm_channel, uart_port); 140 141 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "start\n"); 142 143 channel->ch_flags |= (CH_STOP); 144 145 jsm_dbg(IOCTL, &channel->ch_bd->pci_dev, "finish\n"); 146 } 147 148 static void jsm_tty_send_xchar(struct uart_port *port, char ch) 149 { 150 unsigned long lock_flags; 151 struct jsm_channel *channel = 152 container_of(port, struct jsm_channel, uart_port); 153 struct ktermios *termios; 154 155 spin_lock_irqsave(&port->lock, lock_flags); 156 termios = &port->state->port.tty->termios; 157 if (ch == termios->c_cc[VSTART]) 158 channel->ch_bd->bd_ops->send_start_character(channel); 159 160 if (ch == termios->c_cc[VSTOP]) 161 channel->ch_bd->bd_ops->send_stop_character(channel); 162 spin_unlock_irqrestore(&port->lock, lock_flags); 163 } 164 165 static void jsm_tty_stop_rx(struct uart_port *port) 166 { 167 struct jsm_channel *channel = 168 container_of(port, struct jsm_channel, uart_port); 169 170 channel->ch_bd->bd_ops->disable_receiver(channel); 171 } 172 173 static void jsm_tty_break(struct uart_port *port, int break_state) 174 { 175 unsigned long lock_flags; 176 struct jsm_channel *channel = 177 container_of(port, struct jsm_channel, uart_port); 178 179 spin_lock_irqsave(&port->lock, lock_flags); 180 if (break_state == -1) 181 channel->ch_bd->bd_ops->send_break(channel); 182 else 183 channel->ch_bd->bd_ops->clear_break(channel); 184 185 spin_unlock_irqrestore(&port->lock, lock_flags); 186 } 187 188 static int jsm_tty_open(struct uart_port *port) 189 { 190 struct jsm_board *brd; 191 struct jsm_channel *channel = 192 container_of(port, struct jsm_channel, uart_port); 193 struct ktermios *termios; 194 195 /* Get board pointer from our array of majors we have allocated */ 196 brd = channel->ch_bd; 197 198 /* 199 * Allocate channel buffers for read/write/error. 200 * Set flag, so we don't get trounced on. 201 */ 202 channel->ch_flags |= (CH_OPENING); 203 204 /* Drop locks, as malloc with GFP_KERNEL can sleep */ 205 206 if (!channel->ch_rqueue) { 207 channel->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL); 208 if (!channel->ch_rqueue) { 209 jsm_dbg(INIT, &channel->ch_bd->pci_dev, 210 "unable to allocate read queue buf\n"); 211 return -ENOMEM; 212 } 213 } 214 if (!channel->ch_equeue) { 215 channel->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL); 216 if (!channel->ch_equeue) { 217 jsm_dbg(INIT, &channel->ch_bd->pci_dev, 218 "unable to allocate error queue buf\n"); 219 return -ENOMEM; 220 } 221 } 222 223 channel->ch_flags &= ~(CH_OPENING); 224 /* 225 * Initialize if neither terminal is open. 226 */ 227 jsm_dbg(OPEN, &channel->ch_bd->pci_dev, 228 "jsm_open: initializing channel in open...\n"); 229 230 /* 231 * Flush input queues. 232 */ 233 channel->ch_r_head = channel->ch_r_tail = 0; 234 channel->ch_e_head = channel->ch_e_tail = 0; 235 236 brd->bd_ops->flush_uart_write(channel); 237 brd->bd_ops->flush_uart_read(channel); 238 239 channel->ch_flags = 0; 240 channel->ch_cached_lsr = 0; 241 channel->ch_stops_sent = 0; 242 243 termios = &port->state->port.tty->termios; 244 channel->ch_c_cflag = termios->c_cflag; 245 channel->ch_c_iflag = termios->c_iflag; 246 channel->ch_c_oflag = termios->c_oflag; 247 channel->ch_c_lflag = termios->c_lflag; 248 channel->ch_startc = termios->c_cc[VSTART]; 249 channel->ch_stopc = termios->c_cc[VSTOP]; 250 251 /* Tell UART to init itself */ 252 brd->bd_ops->uart_init(channel); 253 254 /* 255 * Run param in case we changed anything 256 */ 257 brd->bd_ops->param(channel); 258 259 jsm_carrier(channel); 260 261 channel->ch_open_count++; 262 263 jsm_dbg(OPEN, &channel->ch_bd->pci_dev, "finish\n"); 264 return 0; 265 } 266 267 static void jsm_tty_close(struct uart_port *port) 268 { 269 struct jsm_board *bd; 270 struct jsm_channel *channel = 271 container_of(port, struct jsm_channel, uart_port); 272 273 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "start\n"); 274 275 bd = channel->ch_bd; 276 277 channel->ch_flags &= ~(CH_STOPI); 278 279 channel->ch_open_count--; 280 281 /* 282 * If we have HUPCL set, lower DTR and RTS 283 */ 284 if (channel->ch_c_cflag & HUPCL) { 285 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, 286 "Close. HUPCL set, dropping DTR/RTS\n"); 287 288 /* Drop RTS/DTR */ 289 channel->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS); 290 bd->bd_ops->assert_modem_signals(channel); 291 } 292 293 /* Turn off UART interrupts for this port */ 294 channel->ch_bd->bd_ops->uart_off(channel); 295 296 jsm_dbg(CLOSE, &channel->ch_bd->pci_dev, "finish\n"); 297 } 298 299 static void jsm_tty_set_termios(struct uart_port *port, 300 struct ktermios *termios, 301 struct ktermios *old_termios) 302 { 303 unsigned long lock_flags; 304 struct jsm_channel *channel = 305 container_of(port, struct jsm_channel, uart_port); 306 307 spin_lock_irqsave(&port->lock, lock_flags); 308 channel->ch_c_cflag = termios->c_cflag; 309 channel->ch_c_iflag = termios->c_iflag; 310 channel->ch_c_oflag = termios->c_oflag; 311 channel->ch_c_lflag = termios->c_lflag; 312 channel->ch_startc = termios->c_cc[VSTART]; 313 channel->ch_stopc = termios->c_cc[VSTOP]; 314 315 channel->ch_bd->bd_ops->param(channel); 316 jsm_carrier(channel); 317 spin_unlock_irqrestore(&port->lock, lock_flags); 318 } 319 320 static const char *jsm_tty_type(struct uart_port *port) 321 { 322 return "jsm"; 323 } 324 325 static void jsm_tty_release_port(struct uart_port *port) 326 { 327 } 328 329 static int jsm_tty_request_port(struct uart_port *port) 330 { 331 return 0; 332 } 333 334 static void jsm_config_port(struct uart_port *port, int flags) 335 { 336 port->type = PORT_JSM; 337 } 338 339 static const struct uart_ops jsm_ops = { 340 .tx_empty = jsm_tty_tx_empty, 341 .set_mctrl = jsm_tty_set_mctrl, 342 .get_mctrl = jsm_tty_get_mctrl, 343 .stop_tx = jsm_tty_stop_tx, 344 .start_tx = jsm_tty_start_tx, 345 .send_xchar = jsm_tty_send_xchar, 346 .stop_rx = jsm_tty_stop_rx, 347 .break_ctl = jsm_tty_break, 348 .startup = jsm_tty_open, 349 .shutdown = jsm_tty_close, 350 .set_termios = jsm_tty_set_termios, 351 .type = jsm_tty_type, 352 .release_port = jsm_tty_release_port, 353 .request_port = jsm_tty_request_port, 354 .config_port = jsm_config_port, 355 }; 356 357 /* 358 * jsm_tty_init() 359 * 360 * Init the tty subsystem. Called once per board after board has been 361 * downloaded and init'ed. 362 */ 363 int jsm_tty_init(struct jsm_board *brd) 364 { 365 int i; 366 void __iomem *vaddr; 367 struct jsm_channel *ch; 368 369 if (!brd) 370 return -ENXIO; 371 372 jsm_dbg(INIT, &brd->pci_dev, "start\n"); 373 374 /* 375 * Initialize board structure elements. 376 */ 377 378 brd->nasync = brd->maxports; 379 380 /* 381 * Allocate channel memory that might not have been allocated 382 * when the driver was first loaded. 383 */ 384 for (i = 0; i < brd->nasync; i++) { 385 if (!brd->channels[i]) { 386 387 /* 388 * Okay to malloc with GFP_KERNEL, we are not at 389 * interrupt context, and there are no locks held. 390 */ 391 brd->channels[i] = kzalloc(sizeof(struct jsm_channel), GFP_KERNEL); 392 if (!brd->channels[i]) { 393 jsm_dbg(CORE, &brd->pci_dev, 394 "%s:%d Unable to allocate memory for channel struct\n", 395 __FILE__, __LINE__); 396 } 397 } 398 } 399 400 ch = brd->channels[0]; 401 vaddr = brd->re_map_membase; 402 403 /* Set up channel variables */ 404 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { 405 406 if (!brd->channels[i]) 407 continue; 408 409 spin_lock_init(&ch->ch_lock); 410 411 if (brd->bd_uart_offset == 0x200) 412 ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i); 413 else 414 ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i); 415 416 ch->ch_bd = brd; 417 ch->ch_portnum = i; 418 419 /* .25 second delay */ 420 ch->ch_close_delay = 250; 421 422 init_waitqueue_head(&ch->ch_flags_wait); 423 } 424 425 jsm_dbg(INIT, &brd->pci_dev, "finish\n"); 426 return 0; 427 } 428 429 int jsm_uart_port_init(struct jsm_board *brd) 430 { 431 int i, rc; 432 unsigned int line; 433 struct jsm_channel *ch; 434 435 if (!brd) 436 return -ENXIO; 437 438 jsm_dbg(INIT, &brd->pci_dev, "start\n"); 439 440 /* 441 * Initialize board structure elements. 442 */ 443 444 brd->nasync = brd->maxports; 445 446 /* Set up channel variables */ 447 for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) { 448 449 if (!brd->channels[i]) 450 continue; 451 452 brd->channels[i]->uart_port.irq = brd->irq; 453 brd->channels[i]->uart_port.uartclk = 14745600; 454 brd->channels[i]->uart_port.type = PORT_JSM; 455 brd->channels[i]->uart_port.iotype = UPIO_MEM; 456 brd->channels[i]->uart_port.membase = brd->re_map_membase; 457 brd->channels[i]->uart_port.fifosize = 16; 458 brd->channels[i]->uart_port.ops = &jsm_ops; 459 line = find_first_zero_bit(linemap, MAXLINES); 460 if (line >= MAXLINES) { 461 printk(KERN_INFO "jsm: linemap is full, added device failed\n"); 462 continue; 463 } else 464 set_bit(line, linemap); 465 brd->channels[i]->uart_port.line = line; 466 rc = uart_add_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); 467 if (rc) { 468 printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i); 469 return rc; 470 } else 471 printk(KERN_INFO "jsm: Port %d added\n", i); 472 } 473 474 jsm_dbg(INIT, &brd->pci_dev, "finish\n"); 475 return 0; 476 } 477 478 int jsm_remove_uart_port(struct jsm_board *brd) 479 { 480 int i; 481 struct jsm_channel *ch; 482 483 if (!brd) 484 return -ENXIO; 485 486 jsm_dbg(INIT, &brd->pci_dev, "start\n"); 487 488 /* 489 * Initialize board structure elements. 490 */ 491 492 brd->nasync = brd->maxports; 493 494 /* Set up channel variables */ 495 for (i = 0; i < brd->nasync; i++) { 496 497 if (!brd->channels[i]) 498 continue; 499 500 ch = brd->channels[i]; 501 502 clear_bit(ch->uart_port.line, linemap); 503 uart_remove_one_port(&jsm_uart_driver, &brd->channels[i]->uart_port); 504 } 505 506 jsm_dbg(INIT, &brd->pci_dev, "finish\n"); 507 return 0; 508 } 509 510 void jsm_input(struct jsm_channel *ch) 511 { 512 struct jsm_board *bd; 513 struct tty_struct *tp; 514 struct tty_port *port; 515 u32 rmask; 516 u16 head; 517 u16 tail; 518 int data_len; 519 unsigned long lock_flags; 520 int len = 0; 521 int s = 0; 522 int i = 0; 523 524 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); 525 526 port = &ch->uart_port.state->port; 527 tp = port->tty; 528 529 bd = ch->ch_bd; 530 if (!bd) 531 return; 532 533 spin_lock_irqsave(&ch->ch_lock, lock_flags); 534 535 /* 536 *Figure the number of characters in the buffer. 537 *Exit immediately if none. 538 */ 539 540 rmask = RQUEUEMASK; 541 542 head = ch->ch_r_head & rmask; 543 tail = ch->ch_r_tail & rmask; 544 545 data_len = (head - tail) & rmask; 546 if (data_len == 0) { 547 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 548 return; 549 } 550 551 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start\n"); 552 553 /* 554 *If the device is not open, or CREAD is off, flush 555 *input data and return immediately. 556 */ 557 if (!tp || !C_CREAD(tp)) { 558 559 jsm_dbg(READ, &ch->ch_bd->pci_dev, 560 "input. dropping %d bytes on port %d...\n", 561 data_len, ch->ch_portnum); 562 ch->ch_r_head = tail; 563 564 /* Force queue flow control to be released, if needed */ 565 jsm_check_queue_flow_control(ch); 566 567 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 568 return; 569 } 570 571 /* 572 * If we are throttled, simply don't read any data. 573 */ 574 if (ch->ch_flags & CH_STOPI) { 575 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 576 jsm_dbg(READ, &ch->ch_bd->pci_dev, 577 "Port %d throttled, not reading any data. head: %x tail: %x\n", 578 ch->ch_portnum, head, tail); 579 return; 580 } 581 582 jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n"); 583 584 len = tty_buffer_request_room(port, data_len); 585 586 /* 587 * len now contains the most amount of data we can copy, 588 * bounded either by the flip buffer size or the amount 589 * of data the card actually has pending... 590 */ 591 while (len) { 592 s = ((head >= tail) ? head : RQUEUESIZE) - tail; 593 s = min(s, len); 594 595 if (s <= 0) 596 break; 597 598 /* 599 * If conditions are such that ld needs to see all 600 * UART errors, we will have to walk each character 601 * and error byte and send them to the buffer one at 602 * a time. 603 */ 604 605 if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) { 606 for (i = 0; i < s; i++) { 607 /* 608 * Give the Linux ld the flags in the 609 * format it likes. 610 */ 611 if (*(ch->ch_equeue +tail +i) & UART_LSR_BI) 612 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_BREAK); 613 else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE) 614 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_PARITY); 615 else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE) 616 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_FRAME); 617 else 618 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_NORMAL); 619 } 620 } else { 621 tty_insert_flip_string(port, ch->ch_rqueue + tail, s); 622 } 623 tail += s; 624 len -= s; 625 /* Flip queue if needed */ 626 tail &= rmask; 627 } 628 629 ch->ch_r_tail = tail & rmask; 630 ch->ch_e_tail = tail & rmask; 631 jsm_check_queue_flow_control(ch); 632 spin_unlock_irqrestore(&ch->ch_lock, lock_flags); 633 634 /* Tell the tty layer its okay to "eat" the data now */ 635 tty_flip_buffer_push(port); 636 637 jsm_dbg(IOCTL, &ch->ch_bd->pci_dev, "finish\n"); 638 } 639 640 static void jsm_carrier(struct jsm_channel *ch) 641 { 642 struct jsm_board *bd; 643 644 int virt_carrier = 0; 645 int phys_carrier = 0; 646 647 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "start\n"); 648 649 bd = ch->ch_bd; 650 if (!bd) 651 return; 652 653 if (ch->ch_mistat & UART_MSR_DCD) { 654 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "mistat: %x D_CD: %x\n", 655 ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD); 656 phys_carrier = 1; 657 } 658 659 if (ch->ch_c_cflag & CLOCAL) 660 virt_carrier = 1; 661 662 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "DCD: physical: %d virt: %d\n", 663 phys_carrier, virt_carrier); 664 665 /* 666 * Test for a VIRTUAL carrier transition to HIGH. 667 */ 668 if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) { 669 670 /* 671 * When carrier rises, wake any threads waiting 672 * for carrier in the open routine. 673 */ 674 675 jsm_dbg(CARR, &ch->ch_bd->pci_dev, "carrier: virt DCD rose\n"); 676 677 if (waitqueue_active(&(ch->ch_flags_wait))) 678 wake_up_interruptible(&ch->ch_flags_wait); 679 } 680 681 /* 682 * Test for a PHYSICAL carrier transition to HIGH. 683 */ 684 if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) { 685 686 /* 687 * When carrier rises, wake any threads waiting 688 * for carrier in the open routine. 689 */ 690 691 jsm_dbg(CARR, &ch->ch_bd->pci_dev, 692 "carrier: physical DCD rose\n"); 693 694 if (waitqueue_active(&(ch->ch_flags_wait))) 695 wake_up_interruptible(&ch->ch_flags_wait); 696 } 697 698 /* 699 * Test for a PHYSICAL transition to low, so long as we aren't 700 * currently ignoring physical transitions (which is what "virtual 701 * carrier" indicates). 702 * 703 * The transition of the virtual carrier to low really doesn't 704 * matter... it really only means "ignore carrier state", not 705 * "make pretend that carrier is there". 706 */ 707 if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) 708 && (phys_carrier == 0)) { 709 /* 710 * When carrier drops: 711 * 712 * Drop carrier on all open units. 713 * 714 * Flush queues, waking up any task waiting in the 715 * line discipline. 716 * 717 * Send a hangup to the control terminal. 718 * 719 * Enable all select calls. 720 */ 721 if (waitqueue_active(&(ch->ch_flags_wait))) 722 wake_up_interruptible(&ch->ch_flags_wait); 723 } 724 725 /* 726 * Make sure that our cached values reflect the current reality. 727 */ 728 if (virt_carrier == 1) 729 ch->ch_flags |= CH_FCAR; 730 else 731 ch->ch_flags &= ~CH_FCAR; 732 733 if (phys_carrier == 1) 734 ch->ch_flags |= CH_CD; 735 else 736 ch->ch_flags &= ~CH_CD; 737 } 738 739 740 void jsm_check_queue_flow_control(struct jsm_channel *ch) 741 { 742 struct board_ops *bd_ops = ch->ch_bd->bd_ops; 743 int qleft; 744 745 /* Store how much space we have left in the queue */ 746 if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0) 747 qleft += RQUEUEMASK + 1; 748 749 /* 750 * Check to see if we should enforce flow control on our queue because 751 * the ld (or user) isn't reading data out of our queue fast enuf. 752 * 753 * NOTE: This is done based on what the current flow control of the 754 * port is set for. 755 * 756 * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt. 757 * This will cause the UART's FIFO to back up, and force 758 * the RTS signal to be dropped. 759 * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to 760 * the other side, in hopes it will stop sending data to us. 761 * 3) NONE - Nothing we can do. We will simply drop any extra data 762 * that gets sent into us when the queue fills up. 763 */ 764 if (qleft < 256) { 765 /* HWFLOW */ 766 if (ch->ch_c_cflag & CRTSCTS) { 767 if (!(ch->ch_flags & CH_RECEIVER_OFF)) { 768 bd_ops->disable_receiver(ch); 769 ch->ch_flags |= (CH_RECEIVER_OFF); 770 jsm_dbg(READ, &ch->ch_bd->pci_dev, 771 "Internal queue hit hilevel mark (%d)! Turning off interrupts\n", 772 qleft); 773 } 774 } 775 /* SWFLOW */ 776 else if (ch->ch_c_iflag & IXOFF) { 777 if (ch->ch_stops_sent <= MAX_STOPS_SENT) { 778 bd_ops->send_stop_character(ch); 779 ch->ch_stops_sent++; 780 jsm_dbg(READ, &ch->ch_bd->pci_dev, 781 "Sending stop char! Times sent: %x\n", 782 ch->ch_stops_sent); 783 } 784 } 785 } 786 787 /* 788 * Check to see if we should unenforce flow control because 789 * ld (or user) finally read enuf data out of our queue. 790 * 791 * NOTE: This is done based on what the current flow control of the 792 * port is set for. 793 * 794 * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt. 795 * This will cause the UART's FIFO to raise RTS back up, 796 * which will allow the other side to start sending data again. 797 * 2) SWFLOW (IXOFF) - Send a start character to 798 * the other side, so it will start sending data to us again. 799 * 3) NONE - Do nothing. Since we didn't do anything to turn off the 800 * other side, we don't need to do anything now. 801 */ 802 if (qleft > (RQUEUESIZE / 2)) { 803 /* HWFLOW */ 804 if (ch->ch_c_cflag & CRTSCTS) { 805 if (ch->ch_flags & CH_RECEIVER_OFF) { 806 bd_ops->enable_receiver(ch); 807 ch->ch_flags &= ~(CH_RECEIVER_OFF); 808 jsm_dbg(READ, &ch->ch_bd->pci_dev, 809 "Internal queue hit lowlevel mark (%d)! Turning on interrupts\n", 810 qleft); 811 } 812 } 813 /* SWFLOW */ 814 else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) { 815 ch->ch_stops_sent = 0; 816 bd_ops->send_start_character(ch); 817 jsm_dbg(READ, &ch->ch_bd->pci_dev, 818 "Sending start char!\n"); 819 } 820 } 821 } 822