1 // SPDX-License-Identifier: GPL-1.0+ 2 /* generic HDLC line discipline for Linux 3 * 4 * Written by Paul Fulghum paulkf@microgate.com 5 * for Microgate Corporation 6 * 7 * Microgate and SyncLink are registered trademarks of Microgate Corporation 8 * 9 * Adapted from ppp.c, written by Michael Callahan <callahan@maths.ox.ac.uk>, 10 * Al Longyear <longyear@netcom.com>, 11 * Paul Mackerras <Paul.Mackerras@cs.anu.edu.au> 12 * 13 * Original release 01/11/99 14 * 15 * This module implements the tty line discipline N_HDLC for use with 16 * tty device drivers that support bit-synchronous HDLC communications. 17 * 18 * All HDLC data is frame oriented which means: 19 * 20 * 1. tty write calls represent one complete transmit frame of data 21 * The device driver should accept the complete frame or none of 22 * the frame (busy) in the write method. Each write call should have 23 * a byte count in the range of 2-65535 bytes (2 is min HDLC frame 24 * with 1 addr byte and 1 ctrl byte). The max byte count of 65535 25 * should include any crc bytes required. For example, when using 26 * CCITT CRC32, 4 crc bytes are required, so the maximum size frame 27 * the application may transmit is limited to 65531 bytes. For CCITT 28 * CRC16, the maximum application frame size would be 65533. 29 * 30 * 31 * 2. receive callbacks from the device driver represents 32 * one received frame. The device driver should bypass 33 * the tty flip buffer and call the line discipline receive 34 * callback directly to avoid fragmenting or concatenating 35 * multiple frames into a single receive callback. 36 * 37 * The HDLC line discipline queues the receive frames in separate 38 * buffers so complete receive frames can be returned by the 39 * tty read calls. 40 * 41 * 3. tty read calls returns an entire frame of data or nothing. 42 * 43 * 4. all send and receive data is considered raw. No processing 44 * or translation is performed by the line discipline, regardless 45 * of the tty flags 46 * 47 * 5. When line discipline is queried for the amount of receive 48 * data available (FIOC), 0 is returned if no data available, 49 * otherwise the count of the next available frame is returned. 50 * (instead of the sum of all received frame counts). 51 * 52 * These conventions allow the standard tty programming interface 53 * to be used for synchronous HDLC applications when used with 54 * this line discipline (or another line discipline that is frame 55 * oriented such as N_PPP). 56 * 57 * The SyncLink driver (synclink.c) implements both asynchronous 58 * (using standard line discipline N_TTY) and synchronous HDLC 59 * (using N_HDLC) communications, with the latter using the above 60 * conventions. 61 * 62 * This implementation is very basic and does not maintain 63 * any statistics. The main point is to enforce the raw data 64 * and frame orientation of HDLC communications. 65 * 66 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 67 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 68 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 69 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 70 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 71 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 72 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 74 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 75 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 76 * OF THE POSSIBILITY OF SUCH DAMAGE. 77 */ 78 79 #include <linux/module.h> 80 #include <linux/init.h> 81 #include <linux/kernel.h> 82 #include <linux/sched.h> 83 #include <linux/types.h> 84 #include <linux/fcntl.h> 85 #include <linux/interrupt.h> 86 #include <linux/ptrace.h> 87 88 #include <linux/poll.h> 89 #include <linux/in.h> 90 #include <linux/ioctl.h> 91 #include <linux/slab.h> 92 #include <linux/tty.h> 93 #include <linux/errno.h> 94 #include <linux/string.h> /* used in new tty drivers */ 95 #include <linux/signal.h> /* used in new tty drivers */ 96 #include <linux/if.h> 97 #include <linux/bitops.h> 98 99 #include <linux/uaccess.h> 100 #include "tty.h" 101 102 /* 103 * Buffers for individual HDLC frames 104 */ 105 #define MAX_HDLC_FRAME_SIZE 65535 106 #define DEFAULT_RX_BUF_COUNT 10 107 #define MAX_RX_BUF_COUNT 60 108 #define DEFAULT_TX_BUF_COUNT 3 109 110 struct n_hdlc_buf { 111 struct list_head list_item; 112 int count; 113 char buf[]; 114 }; 115 116 struct n_hdlc_buf_list { 117 struct list_head list; 118 int count; 119 spinlock_t spinlock; 120 }; 121 122 /** 123 * struct n_hdlc - per device instance data structure 124 * @tbusy: reentrancy flag for tx wakeup code 125 * @woke_up: tx wakeup needs to be run again as it was called while @tbusy 126 * @tx_buf_list: list of pending transmit frame buffers 127 * @rx_buf_list: list of received frame buffers 128 * @tx_free_buf_list: list unused transmit frame buffers 129 * @rx_free_buf_list: list unused received frame buffers 130 */ 131 struct n_hdlc { 132 bool tbusy; 133 bool woke_up; 134 struct n_hdlc_buf_list tx_buf_list; 135 struct n_hdlc_buf_list rx_buf_list; 136 struct n_hdlc_buf_list tx_free_buf_list; 137 struct n_hdlc_buf_list rx_free_buf_list; 138 struct work_struct write_work; 139 struct tty_struct *tty_for_write_work; 140 }; 141 142 /* 143 * HDLC buffer list manipulation functions 144 */ 145 static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, 146 struct n_hdlc_buf *buf); 147 static void n_hdlc_buf_put(struct n_hdlc_buf_list *list, 148 struct n_hdlc_buf *buf); 149 static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list); 150 151 /* Local functions */ 152 153 static struct n_hdlc *n_hdlc_alloc(void); 154 static void n_hdlc_tty_write_work(struct work_struct *work); 155 156 /* max frame size for memory allocations */ 157 static int maxframe = 4096; 158 159 static void flush_rx_queue(struct tty_struct *tty) 160 { 161 struct n_hdlc *n_hdlc = tty->disc_data; 162 struct n_hdlc_buf *buf; 163 164 while ((buf = n_hdlc_buf_get(&n_hdlc->rx_buf_list))) 165 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, buf); 166 } 167 168 static void flush_tx_queue(struct tty_struct *tty) 169 { 170 struct n_hdlc *n_hdlc = tty->disc_data; 171 struct n_hdlc_buf *buf; 172 173 while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list))) 174 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf); 175 } 176 177 static void n_hdlc_free_buf_list(struct n_hdlc_buf_list *list) 178 { 179 struct n_hdlc_buf *buf; 180 181 do { 182 buf = n_hdlc_buf_get(list); 183 kfree(buf); 184 } while (buf); 185 } 186 187 /** 188 * n_hdlc_tty_close - line discipline close 189 * @tty: pointer to tty info structure 190 * 191 * Called when the line discipline is changed to something 192 * else, the tty is closed, or the tty detects a hangup. 193 */ 194 static void n_hdlc_tty_close(struct tty_struct *tty) 195 { 196 struct n_hdlc *n_hdlc = tty->disc_data; 197 198 #if defined(TTY_NO_WRITE_SPLIT) 199 clear_bit(TTY_NO_WRITE_SPLIT, &tty->flags); 200 #endif 201 tty->disc_data = NULL; 202 203 /* Ensure that the n_hdlcd process is not hanging on select()/poll() */ 204 wake_up_interruptible(&tty->read_wait); 205 wake_up_interruptible(&tty->write_wait); 206 207 cancel_work_sync(&n_hdlc->write_work); 208 209 n_hdlc_free_buf_list(&n_hdlc->rx_free_buf_list); 210 n_hdlc_free_buf_list(&n_hdlc->tx_free_buf_list); 211 n_hdlc_free_buf_list(&n_hdlc->rx_buf_list); 212 n_hdlc_free_buf_list(&n_hdlc->tx_buf_list); 213 kfree(n_hdlc); 214 } /* end of n_hdlc_tty_close() */ 215 216 /** 217 * n_hdlc_tty_open - called when line discipline changed to n_hdlc 218 * @tty: pointer to tty info structure 219 * 220 * Returns 0 if success, otherwise error code 221 */ 222 static int n_hdlc_tty_open(struct tty_struct *tty) 223 { 224 struct n_hdlc *n_hdlc = tty->disc_data; 225 226 pr_debug("%s() called (device=%s)\n", __func__, tty->name); 227 228 /* There should not be an existing table for this slot. */ 229 if (n_hdlc) { 230 pr_err("%s: tty already associated!\n", __func__); 231 return -EEXIST; 232 } 233 234 n_hdlc = n_hdlc_alloc(); 235 if (!n_hdlc) { 236 pr_err("%s: n_hdlc_alloc failed\n", __func__); 237 return -ENFILE; 238 } 239 240 INIT_WORK(&n_hdlc->write_work, n_hdlc_tty_write_work); 241 n_hdlc->tty_for_write_work = tty; 242 tty->disc_data = n_hdlc; 243 tty->receive_room = 65536; 244 245 /* change tty_io write() to not split large writes into 8K chunks */ 246 set_bit(TTY_NO_WRITE_SPLIT, &tty->flags); 247 248 /* flush receive data from driver */ 249 tty_driver_flush_buffer(tty); 250 251 return 0; 252 253 } /* end of n_tty_hdlc_open() */ 254 255 /** 256 * n_hdlc_send_frames - send frames on pending send buffer list 257 * @n_hdlc: pointer to ldisc instance data 258 * @tty: pointer to tty instance data 259 * 260 * Send frames on pending send buffer list until the driver does not accept a 261 * frame (busy) this function is called after adding a frame to the send buffer 262 * list and by the tty wakeup callback. 263 */ 264 static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) 265 { 266 register int actual; 267 unsigned long flags; 268 struct n_hdlc_buf *tbuf; 269 270 check_again: 271 272 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); 273 if (n_hdlc->tbusy) { 274 n_hdlc->woke_up = true; 275 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); 276 return; 277 } 278 n_hdlc->tbusy = true; 279 n_hdlc->woke_up = false; 280 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); 281 282 tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); 283 while (tbuf) { 284 pr_debug("sending frame %p, count=%d\n", tbuf, tbuf->count); 285 286 /* Send the next block of data to device */ 287 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 288 actual = tty->ops->write(tty, tbuf->buf, tbuf->count); 289 290 /* rollback was possible and has been done */ 291 if (actual == -ERESTARTSYS) { 292 n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); 293 break; 294 } 295 /* if transmit error, throw frame away by */ 296 /* pretending it was accepted by driver */ 297 if (actual < 0) 298 actual = tbuf->count; 299 300 if (actual == tbuf->count) { 301 pr_debug("frame %p completed\n", tbuf); 302 303 /* free current transmit buffer */ 304 n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf); 305 306 /* wait up sleeping writers */ 307 wake_up_interruptible(&tty->write_wait); 308 309 /* get next pending transmit buffer */ 310 tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list); 311 } else { 312 pr_debug("frame %p pending\n", tbuf); 313 314 /* 315 * the buffer was not accepted by driver, 316 * return it back into tx queue 317 */ 318 n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf); 319 break; 320 } 321 } 322 323 if (!tbuf) 324 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 325 326 /* Clear the re-entry flag */ 327 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); 328 n_hdlc->tbusy = false; 329 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); 330 331 if (n_hdlc->woke_up) 332 goto check_again; 333 } /* end of n_hdlc_send_frames() */ 334 335 /** 336 * n_hdlc_tty_write_work - Asynchronous callback for transmit wakeup 337 * @work: pointer to work_struct 338 * 339 * Called when low level device driver can accept more send data. 340 */ 341 static void n_hdlc_tty_write_work(struct work_struct *work) 342 { 343 struct n_hdlc *n_hdlc = container_of(work, struct n_hdlc, write_work); 344 struct tty_struct *tty = n_hdlc->tty_for_write_work; 345 346 n_hdlc_send_frames(n_hdlc, tty); 347 } /* end of n_hdlc_tty_write_work() */ 348 349 /** 350 * n_hdlc_tty_wakeup - Callback for transmit wakeup 351 * @tty: pointer to associated tty instance data 352 * 353 * Called when low level device driver can accept more send data. 354 */ 355 static void n_hdlc_tty_wakeup(struct tty_struct *tty) 356 { 357 struct n_hdlc *n_hdlc = tty->disc_data; 358 359 schedule_work(&n_hdlc->write_work); 360 } /* end of n_hdlc_tty_wakeup() */ 361 362 /** 363 * n_hdlc_tty_receive - Called by tty driver when receive data is available 364 * @tty: pointer to tty instance data 365 * @data: pointer to received data 366 * @flags: pointer to flags for data 367 * @count: count of received data in bytes 368 * 369 * Called by tty low level driver when receive data is available. Data is 370 * interpreted as one HDLC frame. 371 */ 372 static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data, 373 const char *flags, int count) 374 { 375 register struct n_hdlc *n_hdlc = tty->disc_data; 376 register struct n_hdlc_buf *buf; 377 378 pr_debug("%s() called count=%d\n", __func__, count); 379 380 if (count > maxframe) { 381 pr_debug("rx count>maxframesize, data discarded\n"); 382 return; 383 } 384 385 /* get a free HDLC buffer */ 386 buf = n_hdlc_buf_get(&n_hdlc->rx_free_buf_list); 387 if (!buf) { 388 /* 389 * no buffers in free list, attempt to allocate another rx 390 * buffer unless the maximum count has been reached 391 */ 392 if (n_hdlc->rx_buf_list.count < MAX_RX_BUF_COUNT) 393 buf = kmalloc(struct_size(buf, buf, maxframe), 394 GFP_ATOMIC); 395 } 396 397 if (!buf) { 398 pr_debug("no more rx buffers, data discarded\n"); 399 return; 400 } 401 402 /* copy received data to HDLC buffer */ 403 memcpy(buf->buf, data, count); 404 buf->count = count; 405 406 /* add HDLC buffer to list of received frames */ 407 n_hdlc_buf_put(&n_hdlc->rx_buf_list, buf); 408 409 /* wake up any blocked reads and perform async signalling */ 410 wake_up_interruptible(&tty->read_wait); 411 if (tty->fasync != NULL) 412 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 413 414 } /* end of n_hdlc_tty_receive() */ 415 416 /** 417 * n_hdlc_tty_read - Called to retrieve one frame of data (if available) 418 * @tty: pointer to tty instance data 419 * @file: pointer to open file object 420 * @kbuf: pointer to returned data buffer 421 * @nr: size of returned data buffer 422 * @cookie: stored rbuf from previous run 423 * @offset: offset into the data buffer 424 * 425 * Returns the number of bytes returned or error code. 426 */ 427 static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, 428 __u8 *kbuf, size_t nr, 429 void **cookie, unsigned long offset) 430 { 431 struct n_hdlc *n_hdlc = tty->disc_data; 432 int ret = 0; 433 struct n_hdlc_buf *rbuf; 434 DECLARE_WAITQUEUE(wait, current); 435 436 /* Is this a repeated call for an rbuf we already found earlier? */ 437 rbuf = *cookie; 438 if (rbuf) 439 goto have_rbuf; 440 441 add_wait_queue(&tty->read_wait, &wait); 442 443 for (;;) { 444 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { 445 ret = -EIO; 446 break; 447 } 448 if (tty_hung_up_p(file)) 449 break; 450 451 set_current_state(TASK_INTERRUPTIBLE); 452 453 rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); 454 if (rbuf) 455 break; 456 457 /* no data */ 458 if (tty_io_nonblock(tty, file)) { 459 ret = -EAGAIN; 460 break; 461 } 462 463 schedule(); 464 465 if (signal_pending(current)) { 466 ret = -EINTR; 467 break; 468 } 469 } 470 471 remove_wait_queue(&tty->read_wait, &wait); 472 __set_current_state(TASK_RUNNING); 473 474 if (!rbuf) 475 return ret; 476 *cookie = rbuf; 477 478 have_rbuf: 479 /* Have we used it up entirely? */ 480 if (offset >= rbuf->count) 481 goto done_with_rbuf; 482 483 /* More data to go, but can't copy any more? EOVERFLOW */ 484 ret = -EOVERFLOW; 485 if (!nr) 486 goto done_with_rbuf; 487 488 /* Copy as much data as possible */ 489 ret = rbuf->count - offset; 490 if (ret > nr) 491 ret = nr; 492 memcpy(kbuf, rbuf->buf+offset, ret); 493 offset += ret; 494 495 /* If we still have data left, we leave the rbuf in the cookie */ 496 if (offset < rbuf->count) 497 return ret; 498 499 done_with_rbuf: 500 *cookie = NULL; 501 502 if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT) 503 kfree(rbuf); 504 else 505 n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf); 506 507 return ret; 508 509 } /* end of n_hdlc_tty_read() */ 510 511 /** 512 * n_hdlc_tty_write - write a single frame of data to device 513 * @tty: pointer to associated tty device instance data 514 * @file: pointer to file object data 515 * @data: pointer to transmit data (one frame) 516 * @count: size of transmit frame in bytes 517 * 518 * Returns the number of bytes written (or error code). 519 */ 520 static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, 521 const unsigned char *data, size_t count) 522 { 523 struct n_hdlc *n_hdlc = tty->disc_data; 524 int error = 0; 525 DECLARE_WAITQUEUE(wait, current); 526 struct n_hdlc_buf *tbuf; 527 528 pr_debug("%s() called count=%zd\n", __func__, count); 529 530 /* verify frame size */ 531 if (count > maxframe) { 532 pr_debug("%s: truncating user packet from %zu to %d\n", 533 __func__, count, maxframe); 534 count = maxframe; 535 } 536 537 add_wait_queue(&tty->write_wait, &wait); 538 539 for (;;) { 540 set_current_state(TASK_INTERRUPTIBLE); 541 542 tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list); 543 if (tbuf) 544 break; 545 546 if (tty_io_nonblock(tty, file)) { 547 error = -EAGAIN; 548 break; 549 } 550 schedule(); 551 552 if (signal_pending(current)) { 553 error = -EINTR; 554 break; 555 } 556 } 557 558 __set_current_state(TASK_RUNNING); 559 remove_wait_queue(&tty->write_wait, &wait); 560 561 if (!error) { 562 /* Retrieve the user's buffer */ 563 memcpy(tbuf->buf, data, count); 564 565 /* Send the data */ 566 tbuf->count = error = count; 567 n_hdlc_buf_put(&n_hdlc->tx_buf_list, tbuf); 568 n_hdlc_send_frames(n_hdlc, tty); 569 } 570 571 return error; 572 573 } /* end of n_hdlc_tty_write() */ 574 575 /** 576 * n_hdlc_tty_ioctl - process IOCTL system call for the tty device. 577 * @tty: pointer to tty instance data 578 * @cmd: IOCTL command code 579 * @arg: argument for IOCTL call (cmd dependent) 580 * 581 * Returns command dependent result. 582 */ 583 static int n_hdlc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, 584 unsigned long arg) 585 { 586 struct n_hdlc *n_hdlc = tty->disc_data; 587 int error = 0; 588 int count; 589 unsigned long flags; 590 struct n_hdlc_buf *buf = NULL; 591 592 pr_debug("%s() called %d\n", __func__, cmd); 593 594 switch (cmd) { 595 case FIONREAD: 596 /* report count of read data available */ 597 /* in next available frame (if any) */ 598 spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock, flags); 599 buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list, 600 struct n_hdlc_buf, list_item); 601 if (buf) 602 count = buf->count; 603 else 604 count = 0; 605 spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock, flags); 606 error = put_user(count, (int __user *)arg); 607 break; 608 609 case TIOCOUTQ: 610 /* get the pending tx byte count in the driver */ 611 count = tty_chars_in_buffer(tty); 612 /* add size of next output frame in queue */ 613 spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags); 614 buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list, 615 struct n_hdlc_buf, list_item); 616 if (buf) 617 count += buf->count; 618 spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags); 619 error = put_user(count, (int __user *)arg); 620 break; 621 622 case TCFLSH: 623 switch (arg) { 624 case TCIOFLUSH: 625 case TCOFLUSH: 626 flush_tx_queue(tty); 627 } 628 fallthrough; /* to default */ 629 630 default: 631 error = n_tty_ioctl_helper(tty, cmd, arg); 632 break; 633 } 634 return error; 635 636 } /* end of n_hdlc_tty_ioctl() */ 637 638 /** 639 * n_hdlc_tty_poll - TTY callback for poll system call 640 * @tty: pointer to tty instance data 641 * @filp: pointer to open file object for device 642 * @wait: wait queue for operations 643 * 644 * Determine which operations (read/write) will not block and return info 645 * to caller. 646 * Returns a bit mask containing info on which ops will not block. 647 */ 648 static __poll_t n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, 649 poll_table *wait) 650 { 651 struct n_hdlc *n_hdlc = tty->disc_data; 652 __poll_t mask = 0; 653 654 /* 655 * queue the current process into any wait queue that may awaken in the 656 * future (read and write) 657 */ 658 poll_wait(filp, &tty->read_wait, wait); 659 poll_wait(filp, &tty->write_wait, wait); 660 661 /* set bits for operations that won't block */ 662 if (!list_empty(&n_hdlc->rx_buf_list.list)) 663 mask |= EPOLLIN | EPOLLRDNORM; /* readable */ 664 if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) 665 mask |= EPOLLHUP; 666 if (tty_hung_up_p(filp)) 667 mask |= EPOLLHUP; 668 if (!tty_is_writelocked(tty) && 669 !list_empty(&n_hdlc->tx_free_buf_list.list)) 670 mask |= EPOLLOUT | EPOLLWRNORM; /* writable */ 671 672 return mask; 673 } /* end of n_hdlc_tty_poll() */ 674 675 static void n_hdlc_alloc_buf(struct n_hdlc_buf_list *list, unsigned int count, 676 const char *name) 677 { 678 struct n_hdlc_buf *buf; 679 unsigned int i; 680 681 for (i = 0; i < count; i++) { 682 buf = kmalloc(struct_size(buf, buf, maxframe), GFP_KERNEL); 683 if (!buf) { 684 pr_debug("%s(), kmalloc() failed for %s buffer %u\n", 685 __func__, name, i); 686 return; 687 } 688 n_hdlc_buf_put(list, buf); 689 } 690 } 691 692 /** 693 * n_hdlc_alloc - allocate an n_hdlc instance data structure 694 * 695 * Returns a pointer to newly created structure if success, otherwise %NULL 696 */ 697 static struct n_hdlc *n_hdlc_alloc(void) 698 { 699 struct n_hdlc *n_hdlc = kzalloc(sizeof(*n_hdlc), GFP_KERNEL); 700 701 if (!n_hdlc) 702 return NULL; 703 704 spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock); 705 spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock); 706 spin_lock_init(&n_hdlc->rx_buf_list.spinlock); 707 spin_lock_init(&n_hdlc->tx_buf_list.spinlock); 708 709 INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list); 710 INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list); 711 INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list); 712 INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list); 713 714 n_hdlc_alloc_buf(&n_hdlc->rx_free_buf_list, DEFAULT_RX_BUF_COUNT, "rx"); 715 n_hdlc_alloc_buf(&n_hdlc->tx_free_buf_list, DEFAULT_TX_BUF_COUNT, "tx"); 716 717 return n_hdlc; 718 719 } /* end of n_hdlc_alloc() */ 720 721 /** 722 * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list 723 * @buf_list: pointer to the buffer list 724 * @buf: pointer to the buffer 725 */ 726 static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list, 727 struct n_hdlc_buf *buf) 728 { 729 unsigned long flags; 730 731 spin_lock_irqsave(&buf_list->spinlock, flags); 732 733 list_add(&buf->list_item, &buf_list->list); 734 buf_list->count++; 735 736 spin_unlock_irqrestore(&buf_list->spinlock, flags); 737 } 738 739 /** 740 * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list 741 * @buf_list: pointer to buffer list 742 * @buf: pointer to buffer 743 */ 744 static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list, 745 struct n_hdlc_buf *buf) 746 { 747 unsigned long flags; 748 749 spin_lock_irqsave(&buf_list->spinlock, flags); 750 751 list_add_tail(&buf->list_item, &buf_list->list); 752 buf_list->count++; 753 754 spin_unlock_irqrestore(&buf_list->spinlock, flags); 755 } /* end of n_hdlc_buf_put() */ 756 757 /** 758 * n_hdlc_buf_get - remove and return an HDLC buffer from list 759 * @buf_list: pointer to HDLC buffer list 760 * 761 * Remove and return an HDLC buffer from the head of the specified HDLC buffer 762 * list. 763 * Returns a pointer to HDLC buffer if available, otherwise %NULL. 764 */ 765 static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list) 766 { 767 unsigned long flags; 768 struct n_hdlc_buf *buf; 769 770 spin_lock_irqsave(&buf_list->spinlock, flags); 771 772 buf = list_first_entry_or_null(&buf_list->list, 773 struct n_hdlc_buf, list_item); 774 if (buf) { 775 list_del(&buf->list_item); 776 buf_list->count--; 777 } 778 779 spin_unlock_irqrestore(&buf_list->spinlock, flags); 780 return buf; 781 } /* end of n_hdlc_buf_get() */ 782 783 static struct tty_ldisc_ops n_hdlc_ldisc = { 784 .owner = THIS_MODULE, 785 .num = N_HDLC, 786 .name = "hdlc", 787 .open = n_hdlc_tty_open, 788 .close = n_hdlc_tty_close, 789 .read = n_hdlc_tty_read, 790 .write = n_hdlc_tty_write, 791 .ioctl = n_hdlc_tty_ioctl, 792 .poll = n_hdlc_tty_poll, 793 .receive_buf = n_hdlc_tty_receive, 794 .write_wakeup = n_hdlc_tty_wakeup, 795 .flush_buffer = flush_rx_queue, 796 }; 797 798 static int __init n_hdlc_init(void) 799 { 800 int status; 801 802 /* range check maxframe arg */ 803 maxframe = clamp(maxframe, 4096, MAX_HDLC_FRAME_SIZE); 804 805 status = tty_register_ldisc(&n_hdlc_ldisc); 806 if (!status) 807 pr_info("N_HDLC line discipline registered with maxframe=%d\n", 808 maxframe); 809 else 810 pr_err("N_HDLC: error registering line discipline: %d\n", 811 status); 812 813 return status; 814 815 } /* end of init_module() */ 816 817 static void __exit n_hdlc_exit(void) 818 { 819 tty_unregister_ldisc(&n_hdlc_ldisc); 820 } 821 822 module_init(n_hdlc_init); 823 module_exit(n_hdlc_exit); 824 825 MODULE_LICENSE("GPL"); 826 MODULE_AUTHOR("Paul Fulghum paulkf@microgate.com"); 827 module_param(maxframe, int, 0); 828 MODULE_ALIAS_LDISC(N_HDLC); 829