1 /* 2 * PPP async serial channel driver for Linux. 3 * 4 * Copyright 1999 Paul Mackerras. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 * 11 * This driver provides the encapsulation and framing for sending 12 * and receiving PPP frames over async serial lines. It relies on 13 * the generic PPP layer to give it frames to send and to process 14 * received frames. It implements the PPP line discipline. 15 * 16 * Part of the code in this driver was inspired by the old async-only 17 * PPP driver, written by Michael Callahan and Al Longyear, and 18 * subsequently hacked by Paul Mackerras. 19 */ 20 21 #include <linux/module.h> 22 #include <linux/kernel.h> 23 #include <linux/skbuff.h> 24 #include <linux/tty.h> 25 #include <linux/netdevice.h> 26 #include <linux/poll.h> 27 #include <linux/crc-ccitt.h> 28 #include <linux/ppp_defs.h> 29 #include <linux/ppp-ioctl.h> 30 #include <linux/ppp_channel.h> 31 #include <linux/spinlock.h> 32 #include <linux/init.h> 33 #include <linux/interrupt.h> 34 #include <linux/jiffies.h> 35 #include <linux/slab.h> 36 #include <asm/unaligned.h> 37 #include <asm/uaccess.h> 38 #include <asm/string.h> 39 40 #define PPP_VERSION "2.4.2" 41 42 #define OBUFSIZE 4096 43 44 /* Structure for storing local state. */ 45 struct asyncppp { 46 struct tty_struct *tty; 47 unsigned int flags; 48 unsigned int state; 49 unsigned int rbits; 50 int mru; 51 spinlock_t xmit_lock; 52 spinlock_t recv_lock; 53 unsigned long xmit_flags; 54 u32 xaccm[8]; 55 u32 raccm; 56 unsigned int bytes_sent; 57 unsigned int bytes_rcvd; 58 59 struct sk_buff *tpkt; 60 int tpkt_pos; 61 u16 tfcs; 62 unsigned char *optr; 63 unsigned char *olim; 64 unsigned long last_xmit; 65 66 struct sk_buff *rpkt; 67 int lcp_fcs; 68 struct sk_buff_head rqueue; 69 70 struct tasklet_struct tsk; 71 72 atomic_t refcnt; 73 struct semaphore dead_sem; 74 struct ppp_channel chan; /* interface to generic ppp layer */ 75 unsigned char obuf[OBUFSIZE]; 76 }; 77 78 /* Bit numbers in xmit_flags */ 79 #define XMIT_WAKEUP 0 80 #define XMIT_FULL 1 81 #define XMIT_BUSY 2 82 83 /* State bits */ 84 #define SC_TOSS 1 85 #define SC_ESCAPE 2 86 #define SC_PREV_ERROR 4 87 88 /* Bits in rbits */ 89 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) 90 91 static int flag_time = HZ; 92 module_param(flag_time, int, 0); 93 MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)"); 94 MODULE_LICENSE("GPL"); 95 MODULE_ALIAS_LDISC(N_PPP); 96 97 /* 98 * Prototypes. 99 */ 100 static int ppp_async_encode(struct asyncppp *ap); 101 static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb); 102 static int ppp_async_push(struct asyncppp *ap); 103 static void ppp_async_flush_output(struct asyncppp *ap); 104 static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 105 char *flags, int count); 106 static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, 107 unsigned long arg); 108 static void ppp_async_process(unsigned long arg); 109 110 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 111 int len, int inbound); 112 113 static const struct ppp_channel_ops async_ops = { 114 .start_xmit = ppp_async_send, 115 .ioctl = ppp_async_ioctl, 116 }; 117 118 /* 119 * Routines implementing the PPP line discipline. 120 */ 121 122 /* 123 * We have a potential race on dereferencing tty->disc_data, 124 * because the tty layer provides no locking at all - thus one 125 * cpu could be running ppp_asynctty_receive while another 126 * calls ppp_asynctty_close, which zeroes tty->disc_data and 127 * frees the memory that ppp_asynctty_receive is using. The best 128 * way to fix this is to use a rwlock in the tty struct, but for now 129 * we use a single global rwlock for all ttys in ppp line discipline. 130 * 131 * FIXME: this is no longer true. The _close path for the ldisc is 132 * now guaranteed to be sane. 133 */ 134 static DEFINE_RWLOCK(disc_data_lock); 135 136 static struct asyncppp *ap_get(struct tty_struct *tty) 137 { 138 struct asyncppp *ap; 139 140 read_lock(&disc_data_lock); 141 ap = tty->disc_data; 142 if (ap != NULL) 143 atomic_inc(&ap->refcnt); 144 read_unlock(&disc_data_lock); 145 return ap; 146 } 147 148 static void ap_put(struct asyncppp *ap) 149 { 150 if (atomic_dec_and_test(&ap->refcnt)) 151 up(&ap->dead_sem); 152 } 153 154 /* 155 * Called when a tty is put into PPP line discipline. Called in process 156 * context. 157 */ 158 static int 159 ppp_asynctty_open(struct tty_struct *tty) 160 { 161 struct asyncppp *ap; 162 int err; 163 int speed; 164 165 if (tty->ops->write == NULL) 166 return -EOPNOTSUPP; 167 168 err = -ENOMEM; 169 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 170 if (!ap) 171 goto out; 172 173 /* initialize the asyncppp structure */ 174 ap->tty = tty; 175 ap->mru = PPP_MRU; 176 spin_lock_init(&ap->xmit_lock); 177 spin_lock_init(&ap->recv_lock); 178 ap->xaccm[0] = ~0U; 179 ap->xaccm[3] = 0x60000000U; 180 ap->raccm = ~0U; 181 ap->optr = ap->obuf; 182 ap->olim = ap->obuf; 183 ap->lcp_fcs = -1; 184 185 skb_queue_head_init(&ap->rqueue); 186 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); 187 188 atomic_set(&ap->refcnt, 1); 189 sema_init(&ap->dead_sem, 0); 190 191 ap->chan.private = ap; 192 ap->chan.ops = &async_ops; 193 ap->chan.mtu = PPP_MRU; 194 speed = tty_get_baud_rate(tty); 195 ap->chan.speed = speed; 196 err = ppp_register_channel(&ap->chan); 197 if (err) 198 goto out_free; 199 200 tty->disc_data = ap; 201 tty->receive_room = 65536; 202 return 0; 203 204 out_free: 205 kfree(ap); 206 out: 207 return err; 208 } 209 210 /* 211 * Called when the tty is put into another line discipline 212 * or it hangs up. We have to wait for any cpu currently 213 * executing in any of the other ppp_asynctty_* routines to 214 * finish before we can call ppp_unregister_channel and free 215 * the asyncppp struct. This routine must be called from 216 * process context, not interrupt or softirq context. 217 */ 218 static void 219 ppp_asynctty_close(struct tty_struct *tty) 220 { 221 struct asyncppp *ap; 222 223 write_lock_irq(&disc_data_lock); 224 ap = tty->disc_data; 225 tty->disc_data = NULL; 226 write_unlock_irq(&disc_data_lock); 227 if (!ap) 228 return; 229 230 /* 231 * We have now ensured that nobody can start using ap from now 232 * on, but we have to wait for all existing users to finish. 233 * Note that ppp_unregister_channel ensures that no calls to 234 * our channel ops (i.e. ppp_async_send/ioctl) are in progress 235 * by the time it returns. 236 */ 237 if (!atomic_dec_and_test(&ap->refcnt)) 238 down(&ap->dead_sem); 239 tasklet_kill(&ap->tsk); 240 241 ppp_unregister_channel(&ap->chan); 242 kfree_skb(ap->rpkt); 243 skb_queue_purge(&ap->rqueue); 244 kfree_skb(ap->tpkt); 245 kfree(ap); 246 } 247 248 /* 249 * Called on tty hangup in process context. 250 * 251 * Wait for I/O to driver to complete and unregister PPP channel. 252 * This is already done by the close routine, so just call that. 253 */ 254 static int ppp_asynctty_hangup(struct tty_struct *tty) 255 { 256 ppp_asynctty_close(tty); 257 return 0; 258 } 259 260 /* 261 * Read does nothing - no data is ever available this way. 262 * Pppd reads and writes packets via /dev/ppp instead. 263 */ 264 static ssize_t 265 ppp_asynctty_read(struct tty_struct *tty, struct file *file, 266 unsigned char __user *buf, size_t count) 267 { 268 return -EAGAIN; 269 } 270 271 /* 272 * Write on the tty does nothing, the packets all come in 273 * from the ppp generic stuff. 274 */ 275 static ssize_t 276 ppp_asynctty_write(struct tty_struct *tty, struct file *file, 277 const unsigned char *buf, size_t count) 278 { 279 return -EAGAIN; 280 } 281 282 /* 283 * Called in process context only. May be re-entered by multiple 284 * ioctl calling threads. 285 */ 286 287 static int 288 ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file, 289 unsigned int cmd, unsigned long arg) 290 { 291 struct asyncppp *ap = ap_get(tty); 292 int err, val; 293 int __user *p = (int __user *)arg; 294 295 if (!ap) 296 return -ENXIO; 297 err = -EFAULT; 298 switch (cmd) { 299 case PPPIOCGCHAN: 300 err = -EFAULT; 301 if (put_user(ppp_channel_index(&ap->chan), p)) 302 break; 303 err = 0; 304 break; 305 306 case PPPIOCGUNIT: 307 err = -EFAULT; 308 if (put_user(ppp_unit_number(&ap->chan), p)) 309 break; 310 err = 0; 311 break; 312 313 case TCFLSH: 314 /* flush our buffers and the serial port's buffer */ 315 if (arg == TCIOFLUSH || arg == TCOFLUSH) 316 ppp_async_flush_output(ap); 317 err = n_tty_ioctl_helper(tty, file, cmd, arg); 318 break; 319 320 case FIONREAD: 321 val = 0; 322 if (put_user(val, p)) 323 break; 324 err = 0; 325 break; 326 327 default: 328 /* Try the various mode ioctls */ 329 err = tty_mode_ioctl(tty, file, cmd, arg); 330 } 331 332 ap_put(ap); 333 return err; 334 } 335 336 /* No kernel lock - fine */ 337 static unsigned int 338 ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait) 339 { 340 return 0; 341 } 342 343 /* May sleep, don't call from interrupt level or with interrupts disabled */ 344 static void 345 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 346 char *cflags, int count) 347 { 348 struct asyncppp *ap = ap_get(tty); 349 unsigned long flags; 350 351 if (!ap) 352 return; 353 spin_lock_irqsave(&ap->recv_lock, flags); 354 ppp_async_input(ap, buf, cflags, count); 355 spin_unlock_irqrestore(&ap->recv_lock, flags); 356 if (!skb_queue_empty(&ap->rqueue)) 357 tasklet_schedule(&ap->tsk); 358 ap_put(ap); 359 tty_unthrottle(tty); 360 } 361 362 static void 363 ppp_asynctty_wakeup(struct tty_struct *tty) 364 { 365 struct asyncppp *ap = ap_get(tty); 366 367 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 368 if (!ap) 369 return; 370 set_bit(XMIT_WAKEUP, &ap->xmit_flags); 371 tasklet_schedule(&ap->tsk); 372 ap_put(ap); 373 } 374 375 376 static struct tty_ldisc_ops ppp_ldisc = { 377 .owner = THIS_MODULE, 378 .magic = TTY_LDISC_MAGIC, 379 .name = "ppp", 380 .open = ppp_asynctty_open, 381 .close = ppp_asynctty_close, 382 .hangup = ppp_asynctty_hangup, 383 .read = ppp_asynctty_read, 384 .write = ppp_asynctty_write, 385 .ioctl = ppp_asynctty_ioctl, 386 .poll = ppp_asynctty_poll, 387 .receive_buf = ppp_asynctty_receive, 388 .write_wakeup = ppp_asynctty_wakeup, 389 }; 390 391 static int __init 392 ppp_async_init(void) 393 { 394 int err; 395 396 err = tty_register_ldisc(N_PPP, &ppp_ldisc); 397 if (err != 0) 398 printk(KERN_ERR "PPP_async: error %d registering line disc.\n", 399 err); 400 return err; 401 } 402 403 /* 404 * The following routines provide the PPP channel interface. 405 */ 406 static int 407 ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg) 408 { 409 struct asyncppp *ap = chan->private; 410 void __user *argp = (void __user *)arg; 411 int __user *p = argp; 412 int err, val; 413 u32 accm[8]; 414 415 err = -EFAULT; 416 switch (cmd) { 417 case PPPIOCGFLAGS: 418 val = ap->flags | ap->rbits; 419 if (put_user(val, p)) 420 break; 421 err = 0; 422 break; 423 case PPPIOCSFLAGS: 424 if (get_user(val, p)) 425 break; 426 ap->flags = val & ~SC_RCV_BITS; 427 spin_lock_irq(&ap->recv_lock); 428 ap->rbits = val & SC_RCV_BITS; 429 spin_unlock_irq(&ap->recv_lock); 430 err = 0; 431 break; 432 433 case PPPIOCGASYNCMAP: 434 if (put_user(ap->xaccm[0], (u32 __user *)argp)) 435 break; 436 err = 0; 437 break; 438 case PPPIOCSASYNCMAP: 439 if (get_user(ap->xaccm[0], (u32 __user *)argp)) 440 break; 441 err = 0; 442 break; 443 444 case PPPIOCGRASYNCMAP: 445 if (put_user(ap->raccm, (u32 __user *)argp)) 446 break; 447 err = 0; 448 break; 449 case PPPIOCSRASYNCMAP: 450 if (get_user(ap->raccm, (u32 __user *)argp)) 451 break; 452 err = 0; 453 break; 454 455 case PPPIOCGXASYNCMAP: 456 if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm))) 457 break; 458 err = 0; 459 break; 460 case PPPIOCSXASYNCMAP: 461 if (copy_from_user(accm, argp, sizeof(accm))) 462 break; 463 accm[2] &= ~0x40000000U; /* can't escape 0x5e */ 464 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */ 465 memcpy(ap->xaccm, accm, sizeof(ap->xaccm)); 466 err = 0; 467 break; 468 469 case PPPIOCGMRU: 470 if (put_user(ap->mru, p)) 471 break; 472 err = 0; 473 break; 474 case PPPIOCSMRU: 475 if (get_user(val, p)) 476 break; 477 if (val < PPP_MRU) 478 val = PPP_MRU; 479 ap->mru = val; 480 err = 0; 481 break; 482 483 default: 484 err = -ENOTTY; 485 } 486 487 return err; 488 } 489 490 /* 491 * This is called at softirq level to deliver received packets 492 * to the ppp_generic code, and to tell the ppp_generic code 493 * if we can accept more output now. 494 */ 495 static void ppp_async_process(unsigned long arg) 496 { 497 struct asyncppp *ap = (struct asyncppp *) arg; 498 struct sk_buff *skb; 499 500 /* process received packets */ 501 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) { 502 if (skb->cb[0]) 503 ppp_input_error(&ap->chan, 0); 504 ppp_input(&ap->chan, skb); 505 } 506 507 /* try to push more stuff out */ 508 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap)) 509 ppp_output_wakeup(&ap->chan); 510 } 511 512 /* 513 * Procedures for encapsulation and framing. 514 */ 515 516 /* 517 * Procedure to encode the data for async serial transmission. 518 * Does octet stuffing (escaping), puts the address/control bytes 519 * on if A/C compression is disabled, and does protocol compression. 520 * Assumes ap->tpkt != 0 on entry. 521 * Returns 1 if we finished the current frame, 0 otherwise. 522 */ 523 524 #define PUT_BYTE(ap, buf, c, islcp) do { \ 525 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 526 *buf++ = PPP_ESCAPE; \ 527 *buf++ = c ^ PPP_TRANS; \ 528 } else \ 529 *buf++ = c; \ 530 } while (0) 531 532 static int 533 ppp_async_encode(struct asyncppp *ap) 534 { 535 int fcs, i, count, c, proto; 536 unsigned char *buf, *buflim; 537 unsigned char *data; 538 int islcp; 539 540 buf = ap->obuf; 541 ap->olim = buf; 542 ap->optr = buf; 543 i = ap->tpkt_pos; 544 data = ap->tpkt->data; 545 count = ap->tpkt->len; 546 fcs = ap->tfcs; 547 proto = get_unaligned_be16(data); 548 549 /* 550 * LCP packets with code values between 1 (configure-reqest) 551 * and 7 (code-reject) must be sent as though no options 552 * had been negotiated. 553 */ 554 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7; 555 556 if (i == 0) { 557 if (islcp) 558 async_lcp_peek(ap, data, count, 0); 559 560 /* 561 * Start of a new packet - insert the leading FLAG 562 * character if necessary. 563 */ 564 if (islcp || flag_time == 0 || 565 time_after_eq(jiffies, ap->last_xmit + flag_time)) 566 *buf++ = PPP_FLAG; 567 ap->last_xmit = jiffies; 568 fcs = PPP_INITFCS; 569 570 /* 571 * Put in the address/control bytes if necessary 572 */ 573 if ((ap->flags & SC_COMP_AC) == 0 || islcp) { 574 PUT_BYTE(ap, buf, 0xff, islcp); 575 fcs = PPP_FCS(fcs, 0xff); 576 PUT_BYTE(ap, buf, 0x03, islcp); 577 fcs = PPP_FCS(fcs, 0x03); 578 } 579 } 580 581 /* 582 * Once we put in the last byte, we need to put in the FCS 583 * and closing flag, so make sure there is at least 7 bytes 584 * of free space in the output buffer. 585 */ 586 buflim = ap->obuf + OBUFSIZE - 6; 587 while (i < count && buf < buflim) { 588 c = data[i++]; 589 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT)) 590 continue; /* compress protocol field */ 591 fcs = PPP_FCS(fcs, c); 592 PUT_BYTE(ap, buf, c, islcp); 593 } 594 595 if (i < count) { 596 /* 597 * Remember where we are up to in this packet. 598 */ 599 ap->olim = buf; 600 ap->tpkt_pos = i; 601 ap->tfcs = fcs; 602 return 0; 603 } 604 605 /* 606 * We have finished the packet. Add the FCS and flag. 607 */ 608 fcs = ~fcs; 609 c = fcs & 0xff; 610 PUT_BYTE(ap, buf, c, islcp); 611 c = (fcs >> 8) & 0xff; 612 PUT_BYTE(ap, buf, c, islcp); 613 *buf++ = PPP_FLAG; 614 ap->olim = buf; 615 616 consume_skb(ap->tpkt); 617 ap->tpkt = NULL; 618 return 1; 619 } 620 621 /* 622 * Transmit-side routines. 623 */ 624 625 /* 626 * Send a packet to the peer over an async tty line. 627 * Returns 1 iff the packet was accepted. 628 * If the packet was not accepted, we will call ppp_output_wakeup 629 * at some later time. 630 */ 631 static int 632 ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb) 633 { 634 struct asyncppp *ap = chan->private; 635 636 ppp_async_push(ap); 637 638 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags)) 639 return 0; /* already full */ 640 ap->tpkt = skb; 641 ap->tpkt_pos = 0; 642 643 ppp_async_push(ap); 644 return 1; 645 } 646 647 /* 648 * Push as much data as possible out to the tty. 649 */ 650 static int 651 ppp_async_push(struct asyncppp *ap) 652 { 653 int avail, sent, done = 0; 654 struct tty_struct *tty = ap->tty; 655 int tty_stuffed = 0; 656 657 /* 658 * We can get called recursively here if the tty write 659 * function calls our wakeup function. This can happen 660 * for example on a pty with both the master and slave 661 * set to PPP line discipline. 662 * We use the XMIT_BUSY bit to detect this and get out, 663 * leaving the XMIT_WAKEUP bit set to tell the other 664 * instance that it may now be able to write more now. 665 */ 666 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 667 return 0; 668 spin_lock_bh(&ap->xmit_lock); 669 for (;;) { 670 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags)) 671 tty_stuffed = 0; 672 if (!tty_stuffed && ap->optr < ap->olim) { 673 avail = ap->olim - ap->optr; 674 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 675 sent = tty->ops->write(tty, ap->optr, avail); 676 if (sent < 0) 677 goto flush; /* error, e.g. loss of CD */ 678 ap->optr += sent; 679 if (sent < avail) 680 tty_stuffed = 1; 681 continue; 682 } 683 if (ap->optr >= ap->olim && ap->tpkt) { 684 if (ppp_async_encode(ap)) { 685 /* finished processing ap->tpkt */ 686 clear_bit(XMIT_FULL, &ap->xmit_flags); 687 done = 1; 688 } 689 continue; 690 } 691 /* 692 * We haven't made any progress this time around. 693 * Clear XMIT_BUSY to let other callers in, but 694 * after doing so we have to check if anyone set 695 * XMIT_WAKEUP since we last checked it. If they 696 * did, we should try again to set XMIT_BUSY and go 697 * around again in case XMIT_BUSY was still set when 698 * the other caller tried. 699 */ 700 clear_bit(XMIT_BUSY, &ap->xmit_flags); 701 /* any more work to do? if not, exit the loop */ 702 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) || 703 (!tty_stuffed && ap->tpkt))) 704 break; 705 /* more work to do, see if we can do it now */ 706 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags)) 707 break; 708 } 709 spin_unlock_bh(&ap->xmit_lock); 710 return done; 711 712 flush: 713 clear_bit(XMIT_BUSY, &ap->xmit_flags); 714 if (ap->tpkt) { 715 kfree_skb(ap->tpkt); 716 ap->tpkt = NULL; 717 clear_bit(XMIT_FULL, &ap->xmit_flags); 718 done = 1; 719 } 720 ap->optr = ap->olim; 721 spin_unlock_bh(&ap->xmit_lock); 722 return done; 723 } 724 725 /* 726 * Flush output from our internal buffers. 727 * Called for the TCFLSH ioctl. Can be entered in parallel 728 * but this is covered by the xmit_lock. 729 */ 730 static void 731 ppp_async_flush_output(struct asyncppp *ap) 732 { 733 int done = 0; 734 735 spin_lock_bh(&ap->xmit_lock); 736 ap->optr = ap->olim; 737 if (ap->tpkt != NULL) { 738 kfree_skb(ap->tpkt); 739 ap->tpkt = NULL; 740 clear_bit(XMIT_FULL, &ap->xmit_flags); 741 done = 1; 742 } 743 spin_unlock_bh(&ap->xmit_lock); 744 if (done) 745 ppp_output_wakeup(&ap->chan); 746 } 747 748 /* 749 * Receive-side routines. 750 */ 751 752 /* see how many ordinary chars there are at the start of buf */ 753 static inline int 754 scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count) 755 { 756 int i, c; 757 758 for (i = 0; i < count; ++i) { 759 c = buf[i]; 760 if (c == PPP_ESCAPE || c == PPP_FLAG || 761 (c < 0x20 && (ap->raccm & (1 << c)) != 0)) 762 break; 763 } 764 return i; 765 } 766 767 /* called when a flag is seen - do end-of-packet processing */ 768 static void 769 process_input_packet(struct asyncppp *ap) 770 { 771 struct sk_buff *skb; 772 unsigned char *p; 773 unsigned int len, fcs, proto; 774 775 skb = ap->rpkt; 776 if (ap->state & (SC_TOSS | SC_ESCAPE)) 777 goto err; 778 779 if (skb == NULL) 780 return; /* 0-length packet */ 781 782 /* check the FCS */ 783 p = skb->data; 784 len = skb->len; 785 if (len < 3) 786 goto err; /* too short */ 787 fcs = PPP_INITFCS; 788 for (; len > 0; --len) 789 fcs = PPP_FCS(fcs, *p++); 790 if (fcs != PPP_GOODFCS) 791 goto err; /* bad FCS */ 792 skb_trim(skb, skb->len - 2); 793 794 /* check for address/control and protocol compression */ 795 p = skb->data; 796 if (p[0] == PPP_ALLSTATIONS) { 797 /* chop off address/control */ 798 if (p[1] != PPP_UI || skb->len < 3) 799 goto err; 800 p = skb_pull(skb, 2); 801 } 802 proto = p[0]; 803 if (proto & 1) { 804 /* protocol is compressed */ 805 skb_push(skb, 1)[0] = 0; 806 } else { 807 if (skb->len < 2) 808 goto err; 809 proto = (proto << 8) + p[1]; 810 if (proto == PPP_LCP) 811 async_lcp_peek(ap, p, skb->len, 1); 812 } 813 814 /* queue the frame to be processed */ 815 skb->cb[0] = ap->state; 816 skb_queue_tail(&ap->rqueue, skb); 817 ap->rpkt = NULL; 818 ap->state = 0; 819 return; 820 821 err: 822 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */ 823 ap->state = SC_PREV_ERROR; 824 if (skb) { 825 /* make skb appear as freshly allocated */ 826 skb_trim(skb, 0); 827 skb_reserve(skb, - skb_headroom(skb)); 828 } 829 } 830 831 /* Called when the tty driver has data for us. Runs parallel with the 832 other ldisc functions but will not be re-entered */ 833 834 static void 835 ppp_async_input(struct asyncppp *ap, const unsigned char *buf, 836 char *flags, int count) 837 { 838 struct sk_buff *skb; 839 int c, i, j, n, s, f; 840 unsigned char *sp; 841 842 /* update bits used for 8-bit cleanness detection */ 843 if (~ap->rbits & SC_RCV_BITS) { 844 s = 0; 845 for (i = 0; i < count; ++i) { 846 c = buf[i]; 847 if (flags && flags[i] != 0) 848 continue; 849 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0; 850 c = ((c >> 4) ^ c) & 0xf; 851 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP; 852 } 853 ap->rbits |= s; 854 } 855 856 while (count > 0) { 857 /* scan through and see how many chars we can do in bulk */ 858 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE) 859 n = 1; 860 else 861 n = scan_ordinary(ap, buf, count); 862 863 f = 0; 864 if (flags && (ap->state & SC_TOSS) == 0) { 865 /* check the flags to see if any char had an error */ 866 for (j = 0; j < n; ++j) 867 if ((f = flags[j]) != 0) 868 break; 869 } 870 if (f != 0) { 871 /* start tossing */ 872 ap->state |= SC_TOSS; 873 874 } else if (n > 0 && (ap->state & SC_TOSS) == 0) { 875 /* stuff the chars in the skb */ 876 skb = ap->rpkt; 877 if (!skb) { 878 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2); 879 if (!skb) 880 goto nomem; 881 ap->rpkt = skb; 882 } 883 if (skb->len == 0) { 884 /* Try to get the payload 4-byte aligned. 885 * This should match the 886 * PPP_ALLSTATIONS/PPP_UI/compressed tests in 887 * process_input_packet, but we do not have 888 * enough chars here to test buf[1] and buf[2]. 889 */ 890 if (buf[0] != PPP_ALLSTATIONS) 891 skb_reserve(skb, 2 + (buf[0] & 1)); 892 } 893 if (n > skb_tailroom(skb)) { 894 /* packet overflowed MRU */ 895 ap->state |= SC_TOSS; 896 } else { 897 sp = skb_put(skb, n); 898 memcpy(sp, buf, n); 899 if (ap->state & SC_ESCAPE) { 900 sp[0] ^= PPP_TRANS; 901 ap->state &= ~SC_ESCAPE; 902 } 903 } 904 } 905 906 if (n >= count) 907 break; 908 909 c = buf[n]; 910 if (flags != NULL && flags[n] != 0) { 911 ap->state |= SC_TOSS; 912 } else if (c == PPP_FLAG) { 913 process_input_packet(ap); 914 } else if (c == PPP_ESCAPE) { 915 ap->state |= SC_ESCAPE; 916 } else if (I_IXON(ap->tty)) { 917 if (c == START_CHAR(ap->tty)) 918 start_tty(ap->tty); 919 else if (c == STOP_CHAR(ap->tty)) 920 stop_tty(ap->tty); 921 } 922 /* otherwise it's a char in the recv ACCM */ 923 ++n; 924 925 buf += n; 926 if (flags) 927 flags += n; 928 count -= n; 929 } 930 return; 931 932 nomem: 933 printk(KERN_ERR "PPPasync: no memory (input pkt)\n"); 934 ap->state |= SC_TOSS; 935 } 936 937 /* 938 * We look at LCP frames going past so that we can notice 939 * and react to the LCP configure-ack from the peer. 940 * In the situation where the peer has been sent a configure-ack 941 * already, LCP is up once it has sent its configure-ack 942 * so the immediately following packet can be sent with the 943 * configured LCP options. This allows us to process the following 944 * packet correctly without pppd needing to respond quickly. 945 * 946 * We only respond to the received configure-ack if we have just 947 * sent a configure-request, and the configure-ack contains the 948 * same data (this is checked using a 16-bit crc of the data). 949 */ 950 #define CONFREQ 1 /* LCP code field values */ 951 #define CONFACK 2 952 #define LCP_MRU 1 /* LCP option numbers */ 953 #define LCP_ASYNCMAP 2 954 955 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data, 956 int len, int inbound) 957 { 958 int dlen, fcs, i, code; 959 u32 val; 960 961 data += 2; /* skip protocol bytes */ 962 len -= 2; 963 if (len < 4) /* 4 = code, ID, length */ 964 return; 965 code = data[0]; 966 if (code != CONFACK && code != CONFREQ) 967 return; 968 dlen = get_unaligned_be16(data + 2); 969 if (len < dlen) 970 return; /* packet got truncated or length is bogus */ 971 972 if (code == (inbound? CONFACK: CONFREQ)) { 973 /* 974 * sent confreq or received confack: 975 * calculate the crc of the data from the ID field on. 976 */ 977 fcs = PPP_INITFCS; 978 for (i = 1; i < dlen; ++i) 979 fcs = PPP_FCS(fcs, data[i]); 980 981 if (!inbound) { 982 /* outbound confreq - remember the crc for later */ 983 ap->lcp_fcs = fcs; 984 return; 985 } 986 987 /* received confack, check the crc */ 988 fcs ^= ap->lcp_fcs; 989 ap->lcp_fcs = -1; 990 if (fcs != 0) 991 return; 992 } else if (inbound) 993 return; /* not interested in received confreq */ 994 995 /* process the options in the confack */ 996 data += 4; 997 dlen -= 4; 998 /* data[0] is code, data[1] is length */ 999 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 1000 switch (data[0]) { 1001 case LCP_MRU: 1002 val = get_unaligned_be16(data + 2); 1003 if (inbound) 1004 ap->mru = val; 1005 else 1006 ap->chan.mtu = val; 1007 break; 1008 case LCP_ASYNCMAP: 1009 val = get_unaligned_be32(data + 2); 1010 if (inbound) 1011 ap->raccm = val; 1012 else 1013 ap->xaccm[0] = val; 1014 break; 1015 } 1016 dlen -= data[1]; 1017 data += data[1]; 1018 } 1019 } 1020 1021 static void __exit ppp_async_cleanup(void) 1022 { 1023 if (tty_unregister_ldisc(N_PPP) != 0) 1024 printk(KERN_ERR "failed to unregister PPP line discipline\n"); 1025 } 1026 1027 module_init(ppp_async_init); 1028 module_exit(ppp_async_cleanup); 1029