xref: /openbmc/linux/drivers/net/ppp/ppp_async.c (revision 2f0754f2)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * PPP async serial channel driver for Linux.
4  *
5  * Copyright 1999 Paul Mackerras.
6  *
7  * This driver provides the encapsulation and framing for sending
8  * and receiving PPP frames over async serial lines.  It relies on
9  * the generic PPP layer to give it frames to send and to process
10  * received frames.  It implements the PPP line discipline.
11  *
12  * Part of the code in this driver was inspired by the old async-only
13  * PPP driver, written by Michael Callahan and Al Longyear, and
14  * subsequently hacked by Paul Mackerras.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/skbuff.h>
20 #include <linux/tty.h>
21 #include <linux/netdevice.h>
22 #include <linux/poll.h>
23 #include <linux/crc-ccitt.h>
24 #include <linux/ppp_defs.h>
25 #include <linux/ppp-ioctl.h>
26 #include <linux/ppp_channel.h>
27 #include <linux/spinlock.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/jiffies.h>
31 #include <linux/slab.h>
32 #include <asm/unaligned.h>
33 #include <linux/uaccess.h>
34 #include <asm/string.h>
35 
36 #define PPP_VERSION	"2.4.2"
37 
38 #define OBUFSIZE	4096
39 
40 /* Structure for storing local state. */
41 struct asyncppp {
42 	struct tty_struct *tty;
43 	unsigned int	flags;
44 	unsigned int	state;
45 	unsigned int	rbits;
46 	int		mru;
47 	spinlock_t	xmit_lock;
48 	spinlock_t	recv_lock;
49 	unsigned long	xmit_flags;
50 	u32		xaccm[8];
51 	u32		raccm;
52 	unsigned int	bytes_sent;
53 	unsigned int	bytes_rcvd;
54 
55 	struct sk_buff	*tpkt;
56 	int		tpkt_pos;
57 	u16		tfcs;
58 	unsigned char	*optr;
59 	unsigned char	*olim;
60 	unsigned long	last_xmit;
61 
62 	struct sk_buff	*rpkt;
63 	int		lcp_fcs;
64 	struct sk_buff_head rqueue;
65 
66 	struct tasklet_struct tsk;
67 
68 	refcount_t	refcnt;
69 	struct completion dead;
70 	struct ppp_channel chan;	/* interface to generic ppp layer */
71 	unsigned char	obuf[OBUFSIZE];
72 };
73 
74 /* Bit numbers in xmit_flags */
75 #define XMIT_WAKEUP	0
76 #define XMIT_FULL	1
77 #define XMIT_BUSY	2
78 
79 /* State bits */
80 #define SC_TOSS		1
81 #define SC_ESCAPE	2
82 #define SC_PREV_ERROR	4
83 
84 /* Bits in rbits */
85 #define SC_RCV_BITS	(SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
86 
87 static int flag_time = HZ;
88 module_param(flag_time, int, 0);
89 MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
90 MODULE_LICENSE("GPL");
91 MODULE_ALIAS_LDISC(N_PPP);
92 
93 /*
94  * Prototypes.
95  */
96 static int ppp_async_encode(struct asyncppp *ap);
97 static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
98 static int ppp_async_push(struct asyncppp *ap);
99 static void ppp_async_flush_output(struct asyncppp *ap);
100 static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
101 			    const char *flags, int count);
102 static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
103 			   unsigned long arg);
104 static void ppp_async_process(struct tasklet_struct *t);
105 
106 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
107 			   int len, int inbound);
108 
109 static const struct ppp_channel_ops async_ops = {
110 	.start_xmit = ppp_async_send,
111 	.ioctl      = ppp_async_ioctl,
112 };
113 
114 /*
115  * Routines implementing the PPP line discipline.
116  */
117 
118 /*
119  * We have a potential race on dereferencing tty->disc_data,
120  * because the tty layer provides no locking at all - thus one
121  * cpu could be running ppp_asynctty_receive while another
122  * calls ppp_asynctty_close, which zeroes tty->disc_data and
123  * frees the memory that ppp_asynctty_receive is using.  The best
124  * way to fix this is to use a rwlock in the tty struct, but for now
125  * we use a single global rwlock for all ttys in ppp line discipline.
126  *
127  * FIXME: this is no longer true. The _close path for the ldisc is
128  * now guaranteed to be sane.
129  */
130 static DEFINE_RWLOCK(disc_data_lock);
131 
132 static struct asyncppp *ap_get(struct tty_struct *tty)
133 {
134 	struct asyncppp *ap;
135 
136 	read_lock(&disc_data_lock);
137 	ap = tty->disc_data;
138 	if (ap != NULL)
139 		refcount_inc(&ap->refcnt);
140 	read_unlock(&disc_data_lock);
141 	return ap;
142 }
143 
144 static void ap_put(struct asyncppp *ap)
145 {
146 	if (refcount_dec_and_test(&ap->refcnt))
147 		complete(&ap->dead);
148 }
149 
150 /*
151  * Called when a tty is put into PPP line discipline. Called in process
152  * context.
153  */
154 static int
155 ppp_asynctty_open(struct tty_struct *tty)
156 {
157 	struct asyncppp *ap;
158 	int err;
159 	int speed;
160 
161 	if (tty->ops->write == NULL)
162 		return -EOPNOTSUPP;
163 
164 	err = -ENOMEM;
165 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
166 	if (!ap)
167 		goto out;
168 
169 	/* initialize the asyncppp structure */
170 	ap->tty = tty;
171 	ap->mru = PPP_MRU;
172 	spin_lock_init(&ap->xmit_lock);
173 	spin_lock_init(&ap->recv_lock);
174 	ap->xaccm[0] = ~0U;
175 	ap->xaccm[3] = 0x60000000U;
176 	ap->raccm = ~0U;
177 	ap->optr = ap->obuf;
178 	ap->olim = ap->obuf;
179 	ap->lcp_fcs = -1;
180 
181 	skb_queue_head_init(&ap->rqueue);
182 	tasklet_setup(&ap->tsk, ppp_async_process);
183 
184 	refcount_set(&ap->refcnt, 1);
185 	init_completion(&ap->dead);
186 
187 	ap->chan.private = ap;
188 	ap->chan.ops = &async_ops;
189 	ap->chan.mtu = PPP_MRU;
190 	speed = tty_get_baud_rate(tty);
191 	ap->chan.speed = speed;
192 	err = ppp_register_channel(&ap->chan);
193 	if (err)
194 		goto out_free;
195 
196 	tty->disc_data = ap;
197 	tty->receive_room = 65536;
198 	return 0;
199 
200  out_free:
201 	kfree(ap);
202  out:
203 	return err;
204 }
205 
206 /*
207  * Called when the tty is put into another line discipline
208  * or it hangs up.  We have to wait for any cpu currently
209  * executing in any of the other ppp_asynctty_* routines to
210  * finish before we can call ppp_unregister_channel and free
211  * the asyncppp struct.  This routine must be called from
212  * process context, not interrupt or softirq context.
213  */
214 static void
215 ppp_asynctty_close(struct tty_struct *tty)
216 {
217 	struct asyncppp *ap;
218 
219 	write_lock_irq(&disc_data_lock);
220 	ap = tty->disc_data;
221 	tty->disc_data = NULL;
222 	write_unlock_irq(&disc_data_lock);
223 	if (!ap)
224 		return;
225 
226 	/*
227 	 * We have now ensured that nobody can start using ap from now
228 	 * on, but we have to wait for all existing users to finish.
229 	 * Note that ppp_unregister_channel ensures that no calls to
230 	 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
231 	 * by the time it returns.
232 	 */
233 	if (!refcount_dec_and_test(&ap->refcnt))
234 		wait_for_completion(&ap->dead);
235 	tasklet_kill(&ap->tsk);
236 
237 	ppp_unregister_channel(&ap->chan);
238 	kfree_skb(ap->rpkt);
239 	skb_queue_purge(&ap->rqueue);
240 	kfree_skb(ap->tpkt);
241 	kfree(ap);
242 }
243 
244 /*
245  * Called on tty hangup in process context.
246  *
247  * Wait for I/O to driver to complete and unregister PPP channel.
248  * This is already done by the close routine, so just call that.
249  */
250 static void ppp_asynctty_hangup(struct tty_struct *tty)
251 {
252 	ppp_asynctty_close(tty);
253 }
254 
255 /*
256  * Read does nothing - no data is ever available this way.
257  * Pppd reads and writes packets via /dev/ppp instead.
258  */
259 static ssize_t
260 ppp_asynctty_read(struct tty_struct *tty, struct file *file,
261 		  unsigned char *buf, size_t count,
262 		  void **cookie, unsigned long offset)
263 {
264 	return -EAGAIN;
265 }
266 
267 /*
268  * Write on the tty does nothing, the packets all come in
269  * from the ppp generic stuff.
270  */
271 static ssize_t
272 ppp_asynctty_write(struct tty_struct *tty, struct file *file,
273 		   const unsigned char *buf, size_t count)
274 {
275 	return -EAGAIN;
276 }
277 
278 /*
279  * Called in process context only. May be re-entered by multiple
280  * ioctl calling threads.
281  */
282 
283 static int
284 ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
285 {
286 	struct asyncppp *ap = ap_get(tty);
287 	int err, val;
288 	int __user *p = (int __user *)arg;
289 
290 	if (!ap)
291 		return -ENXIO;
292 	err = -EFAULT;
293 	switch (cmd) {
294 	case PPPIOCGCHAN:
295 		err = -EFAULT;
296 		if (put_user(ppp_channel_index(&ap->chan), p))
297 			break;
298 		err = 0;
299 		break;
300 
301 	case PPPIOCGUNIT:
302 		err = -EFAULT;
303 		if (put_user(ppp_unit_number(&ap->chan), p))
304 			break;
305 		err = 0;
306 		break;
307 
308 	case TCFLSH:
309 		/* flush our buffers and the serial port's buffer */
310 		if (arg == TCIOFLUSH || arg == TCOFLUSH)
311 			ppp_async_flush_output(ap);
312 		err = n_tty_ioctl_helper(tty, cmd, arg);
313 		break;
314 
315 	case FIONREAD:
316 		val = 0;
317 		if (put_user(val, p))
318 			break;
319 		err = 0;
320 		break;
321 
322 	default:
323 		/* Try the various mode ioctls */
324 		err = tty_mode_ioctl(tty, cmd, arg);
325 	}
326 
327 	ap_put(ap);
328 	return err;
329 }
330 
331 /* No kernel lock - fine */
332 static __poll_t
333 ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
334 {
335 	return 0;
336 }
337 
338 /* May sleep, don't call from interrupt level or with interrupts disabled */
339 static void
340 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
341 		  const char *cflags, int count)
342 {
343 	struct asyncppp *ap = ap_get(tty);
344 	unsigned long flags;
345 
346 	if (!ap)
347 		return;
348 	spin_lock_irqsave(&ap->recv_lock, flags);
349 	ppp_async_input(ap, buf, cflags, count);
350 	spin_unlock_irqrestore(&ap->recv_lock, flags);
351 	if (!skb_queue_empty(&ap->rqueue))
352 		tasklet_schedule(&ap->tsk);
353 	ap_put(ap);
354 	tty_unthrottle(tty);
355 }
356 
357 static void
358 ppp_asynctty_wakeup(struct tty_struct *tty)
359 {
360 	struct asyncppp *ap = ap_get(tty);
361 
362 	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
363 	if (!ap)
364 		return;
365 	set_bit(XMIT_WAKEUP, &ap->xmit_flags);
366 	tasklet_schedule(&ap->tsk);
367 	ap_put(ap);
368 }
369 
370 
371 static struct tty_ldisc_ops ppp_ldisc = {
372 	.owner  = THIS_MODULE,
373 	.num	= N_PPP,
374 	.name	= "ppp",
375 	.open	= ppp_asynctty_open,
376 	.close	= ppp_asynctty_close,
377 	.hangup	= ppp_asynctty_hangup,
378 	.read	= ppp_asynctty_read,
379 	.write	= ppp_asynctty_write,
380 	.ioctl	= ppp_asynctty_ioctl,
381 	.poll	= ppp_asynctty_poll,
382 	.receive_buf = ppp_asynctty_receive,
383 	.write_wakeup = ppp_asynctty_wakeup,
384 };
385 
386 static int __init
387 ppp_async_init(void)
388 {
389 	int err;
390 
391 	err = tty_register_ldisc(&ppp_ldisc);
392 	if (err != 0)
393 		printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
394 		       err);
395 	return err;
396 }
397 
398 /*
399  * The following routines provide the PPP channel interface.
400  */
401 static int
402 ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
403 {
404 	struct asyncppp *ap = chan->private;
405 	void __user *argp = (void __user *)arg;
406 	int __user *p = argp;
407 	int err, val;
408 	u32 accm[8];
409 
410 	err = -EFAULT;
411 	switch (cmd) {
412 	case PPPIOCGFLAGS:
413 		val = ap->flags | ap->rbits;
414 		if (put_user(val, p))
415 			break;
416 		err = 0;
417 		break;
418 	case PPPIOCSFLAGS:
419 		if (get_user(val, p))
420 			break;
421 		ap->flags = val & ~SC_RCV_BITS;
422 		spin_lock_irq(&ap->recv_lock);
423 		ap->rbits = val & SC_RCV_BITS;
424 		spin_unlock_irq(&ap->recv_lock);
425 		err = 0;
426 		break;
427 
428 	case PPPIOCGASYNCMAP:
429 		if (put_user(ap->xaccm[0], (u32 __user *)argp))
430 			break;
431 		err = 0;
432 		break;
433 	case PPPIOCSASYNCMAP:
434 		if (get_user(ap->xaccm[0], (u32 __user *)argp))
435 			break;
436 		err = 0;
437 		break;
438 
439 	case PPPIOCGRASYNCMAP:
440 		if (put_user(ap->raccm, (u32 __user *)argp))
441 			break;
442 		err = 0;
443 		break;
444 	case PPPIOCSRASYNCMAP:
445 		if (get_user(ap->raccm, (u32 __user *)argp))
446 			break;
447 		err = 0;
448 		break;
449 
450 	case PPPIOCGXASYNCMAP:
451 		if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
452 			break;
453 		err = 0;
454 		break;
455 	case PPPIOCSXASYNCMAP:
456 		if (copy_from_user(accm, argp, sizeof(accm)))
457 			break;
458 		accm[2] &= ~0x40000000U;	/* can't escape 0x5e */
459 		accm[3] |= 0x60000000U;		/* must escape 0x7d, 0x7e */
460 		memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
461 		err = 0;
462 		break;
463 
464 	case PPPIOCGMRU:
465 		if (put_user(ap->mru, p))
466 			break;
467 		err = 0;
468 		break;
469 	case PPPIOCSMRU:
470 		if (get_user(val, p))
471 			break;
472 		if (val < PPP_MRU)
473 			val = PPP_MRU;
474 		ap->mru = val;
475 		err = 0;
476 		break;
477 
478 	default:
479 		err = -ENOTTY;
480 	}
481 
482 	return err;
483 }
484 
485 /*
486  * This is called at softirq level to deliver received packets
487  * to the ppp_generic code, and to tell the ppp_generic code
488  * if we can accept more output now.
489  */
490 static void ppp_async_process(struct tasklet_struct *t)
491 {
492 	struct asyncppp *ap = from_tasklet(ap, t, tsk);
493 	struct sk_buff *skb;
494 
495 	/* process received packets */
496 	while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
497 		if (skb->cb[0])
498 			ppp_input_error(&ap->chan, 0);
499 		ppp_input(&ap->chan, skb);
500 	}
501 
502 	/* try to push more stuff out */
503 	if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
504 		ppp_output_wakeup(&ap->chan);
505 }
506 
507 /*
508  * Procedures for encapsulation and framing.
509  */
510 
511 /*
512  * Procedure to encode the data for async serial transmission.
513  * Does octet stuffing (escaping), puts the address/control bytes
514  * on if A/C compression is disabled, and does protocol compression.
515  * Assumes ap->tpkt != 0 on entry.
516  * Returns 1 if we finished the current frame, 0 otherwise.
517  */
518 
519 #define PUT_BYTE(ap, buf, c, islcp)	do {		\
520 	if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
521 		*buf++ = PPP_ESCAPE;			\
522 		*buf++ = c ^ PPP_TRANS;			\
523 	} else						\
524 		*buf++ = c;				\
525 } while (0)
526 
527 static int
528 ppp_async_encode(struct asyncppp *ap)
529 {
530 	int fcs, i, count, c, proto;
531 	unsigned char *buf, *buflim;
532 	unsigned char *data;
533 	int islcp;
534 
535 	buf = ap->obuf;
536 	ap->olim = buf;
537 	ap->optr = buf;
538 	i = ap->tpkt_pos;
539 	data = ap->tpkt->data;
540 	count = ap->tpkt->len;
541 	fcs = ap->tfcs;
542 	proto = get_unaligned_be16(data);
543 
544 	/*
545 	 * LCP packets with code values between 1 (configure-reqest)
546 	 * and 7 (code-reject) must be sent as though no options
547 	 * had been negotiated.
548 	 */
549 	islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
550 
551 	if (i == 0) {
552 		if (islcp)
553 			async_lcp_peek(ap, data, count, 0);
554 
555 		/*
556 		 * Start of a new packet - insert the leading FLAG
557 		 * character if necessary.
558 		 */
559 		if (islcp || flag_time == 0 ||
560 		    time_after_eq(jiffies, ap->last_xmit + flag_time))
561 			*buf++ = PPP_FLAG;
562 		ap->last_xmit = jiffies;
563 		fcs = PPP_INITFCS;
564 
565 		/*
566 		 * Put in the address/control bytes if necessary
567 		 */
568 		if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
569 			PUT_BYTE(ap, buf, 0xff, islcp);
570 			fcs = PPP_FCS(fcs, 0xff);
571 			PUT_BYTE(ap, buf, 0x03, islcp);
572 			fcs = PPP_FCS(fcs, 0x03);
573 		}
574 	}
575 
576 	/*
577 	 * Once we put in the last byte, we need to put in the FCS
578 	 * and closing flag, so make sure there is at least 7 bytes
579 	 * of free space in the output buffer.
580 	 */
581 	buflim = ap->obuf + OBUFSIZE - 6;
582 	while (i < count && buf < buflim) {
583 		c = data[i++];
584 		if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
585 			continue;	/* compress protocol field */
586 		fcs = PPP_FCS(fcs, c);
587 		PUT_BYTE(ap, buf, c, islcp);
588 	}
589 
590 	if (i < count) {
591 		/*
592 		 * Remember where we are up to in this packet.
593 		 */
594 		ap->olim = buf;
595 		ap->tpkt_pos = i;
596 		ap->tfcs = fcs;
597 		return 0;
598 	}
599 
600 	/*
601 	 * We have finished the packet.  Add the FCS and flag.
602 	 */
603 	fcs = ~fcs;
604 	c = fcs & 0xff;
605 	PUT_BYTE(ap, buf, c, islcp);
606 	c = (fcs >> 8) & 0xff;
607 	PUT_BYTE(ap, buf, c, islcp);
608 	*buf++ = PPP_FLAG;
609 	ap->olim = buf;
610 
611 	consume_skb(ap->tpkt);
612 	ap->tpkt = NULL;
613 	return 1;
614 }
615 
616 /*
617  * Transmit-side routines.
618  */
619 
620 /*
621  * Send a packet to the peer over an async tty line.
622  * Returns 1 iff the packet was accepted.
623  * If the packet was not accepted, we will call ppp_output_wakeup
624  * at some later time.
625  */
626 static int
627 ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
628 {
629 	struct asyncppp *ap = chan->private;
630 
631 	ppp_async_push(ap);
632 
633 	if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
634 		return 0;	/* already full */
635 	ap->tpkt = skb;
636 	ap->tpkt_pos = 0;
637 
638 	ppp_async_push(ap);
639 	return 1;
640 }
641 
642 /*
643  * Push as much data as possible out to the tty.
644  */
645 static int
646 ppp_async_push(struct asyncppp *ap)
647 {
648 	int avail, sent, done = 0;
649 	struct tty_struct *tty = ap->tty;
650 	int tty_stuffed = 0;
651 
652 	/*
653 	 * We can get called recursively here if the tty write
654 	 * function calls our wakeup function.  This can happen
655 	 * for example on a pty with both the master and slave
656 	 * set to PPP line discipline.
657 	 * We use the XMIT_BUSY bit to detect this and get out,
658 	 * leaving the XMIT_WAKEUP bit set to tell the other
659 	 * instance that it may now be able to write more now.
660 	 */
661 	if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
662 		return 0;
663 	spin_lock_bh(&ap->xmit_lock);
664 	for (;;) {
665 		if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
666 			tty_stuffed = 0;
667 		if (!tty_stuffed && ap->optr < ap->olim) {
668 			avail = ap->olim - ap->optr;
669 			set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
670 			sent = tty->ops->write(tty, ap->optr, avail);
671 			if (sent < 0)
672 				goto flush;	/* error, e.g. loss of CD */
673 			ap->optr += sent;
674 			if (sent < avail)
675 				tty_stuffed = 1;
676 			continue;
677 		}
678 		if (ap->optr >= ap->olim && ap->tpkt) {
679 			if (ppp_async_encode(ap)) {
680 				/* finished processing ap->tpkt */
681 				clear_bit(XMIT_FULL, &ap->xmit_flags);
682 				done = 1;
683 			}
684 			continue;
685 		}
686 		/*
687 		 * We haven't made any progress this time around.
688 		 * Clear XMIT_BUSY to let other callers in, but
689 		 * after doing so we have to check if anyone set
690 		 * XMIT_WAKEUP since we last checked it.  If they
691 		 * did, we should try again to set XMIT_BUSY and go
692 		 * around again in case XMIT_BUSY was still set when
693 		 * the other caller tried.
694 		 */
695 		clear_bit(XMIT_BUSY, &ap->xmit_flags);
696 		/* any more work to do? if not, exit the loop */
697 		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
698 		      (!tty_stuffed && ap->tpkt)))
699 			break;
700 		/* more work to do, see if we can do it now */
701 		if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
702 			break;
703 	}
704 	spin_unlock_bh(&ap->xmit_lock);
705 	return done;
706 
707 flush:
708 	clear_bit(XMIT_BUSY, &ap->xmit_flags);
709 	if (ap->tpkt) {
710 		kfree_skb(ap->tpkt);
711 		ap->tpkt = NULL;
712 		clear_bit(XMIT_FULL, &ap->xmit_flags);
713 		done = 1;
714 	}
715 	ap->optr = ap->olim;
716 	spin_unlock_bh(&ap->xmit_lock);
717 	return done;
718 }
719 
720 /*
721  * Flush output from our internal buffers.
722  * Called for the TCFLSH ioctl. Can be entered in parallel
723  * but this is covered by the xmit_lock.
724  */
725 static void
726 ppp_async_flush_output(struct asyncppp *ap)
727 {
728 	int done = 0;
729 
730 	spin_lock_bh(&ap->xmit_lock);
731 	ap->optr = ap->olim;
732 	if (ap->tpkt != NULL) {
733 		kfree_skb(ap->tpkt);
734 		ap->tpkt = NULL;
735 		clear_bit(XMIT_FULL, &ap->xmit_flags);
736 		done = 1;
737 	}
738 	spin_unlock_bh(&ap->xmit_lock);
739 	if (done)
740 		ppp_output_wakeup(&ap->chan);
741 }
742 
743 /*
744  * Receive-side routines.
745  */
746 
747 /* see how many ordinary chars there are at the start of buf */
748 static inline int
749 scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
750 {
751 	int i, c;
752 
753 	for (i = 0; i < count; ++i) {
754 		c = buf[i];
755 		if (c == PPP_ESCAPE || c == PPP_FLAG ||
756 		    (c < 0x20 && (ap->raccm & (1 << c)) != 0))
757 			break;
758 	}
759 	return i;
760 }
761 
762 /* called when a flag is seen - do end-of-packet processing */
763 static void
764 process_input_packet(struct asyncppp *ap)
765 {
766 	struct sk_buff *skb;
767 	unsigned char *p;
768 	unsigned int len, fcs;
769 
770 	skb = ap->rpkt;
771 	if (ap->state & (SC_TOSS | SC_ESCAPE))
772 		goto err;
773 
774 	if (skb == NULL)
775 		return;		/* 0-length packet */
776 
777 	/* check the FCS */
778 	p = skb->data;
779 	len = skb->len;
780 	if (len < 3)
781 		goto err;	/* too short */
782 	fcs = PPP_INITFCS;
783 	for (; len > 0; --len)
784 		fcs = PPP_FCS(fcs, *p++);
785 	if (fcs != PPP_GOODFCS)
786 		goto err;	/* bad FCS */
787 	skb_trim(skb, skb->len - 2);
788 
789 	/* check for address/control and protocol compression */
790 	p = skb->data;
791 	if (p[0] == PPP_ALLSTATIONS) {
792 		/* chop off address/control */
793 		if (p[1] != PPP_UI || skb->len < 3)
794 			goto err;
795 		p = skb_pull(skb, 2);
796 	}
797 
798 	/* If protocol field is not compressed, it can be LCP packet */
799 	if (!(p[0] & 0x01)) {
800 		unsigned int proto;
801 
802 		if (skb->len < 2)
803 			goto err;
804 		proto = (p[0] << 8) + p[1];
805 		if (proto == PPP_LCP)
806 			async_lcp_peek(ap, p, skb->len, 1);
807 	}
808 
809 	/* queue the frame to be processed */
810 	skb->cb[0] = ap->state;
811 	skb_queue_tail(&ap->rqueue, skb);
812 	ap->rpkt = NULL;
813 	ap->state = 0;
814 	return;
815 
816  err:
817 	/* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
818 	ap->state = SC_PREV_ERROR;
819 	if (skb) {
820 		/* make skb appear as freshly allocated */
821 		skb_trim(skb, 0);
822 		skb_reserve(skb, - skb_headroom(skb));
823 	}
824 }
825 
826 /* Called when the tty driver has data for us. Runs parallel with the
827    other ldisc functions but will not be re-entered */
828 
829 static void
830 ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
831 		const char *flags, int count)
832 {
833 	struct sk_buff *skb;
834 	int c, i, j, n, s, f;
835 	unsigned char *sp;
836 
837 	/* update bits used for 8-bit cleanness detection */
838 	if (~ap->rbits & SC_RCV_BITS) {
839 		s = 0;
840 		for (i = 0; i < count; ++i) {
841 			c = buf[i];
842 			if (flags && flags[i] != 0)
843 				continue;
844 			s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
845 			c = ((c >> 4) ^ c) & 0xf;
846 			s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
847 		}
848 		ap->rbits |= s;
849 	}
850 
851 	while (count > 0) {
852 		/* scan through and see how many chars we can do in bulk */
853 		if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
854 			n = 1;
855 		else
856 			n = scan_ordinary(ap, buf, count);
857 
858 		f = 0;
859 		if (flags && (ap->state & SC_TOSS) == 0) {
860 			/* check the flags to see if any char had an error */
861 			for (j = 0; j < n; ++j)
862 				if ((f = flags[j]) != 0)
863 					break;
864 		}
865 		if (f != 0) {
866 			/* start tossing */
867 			ap->state |= SC_TOSS;
868 
869 		} else if (n > 0 && (ap->state & SC_TOSS) == 0) {
870 			/* stuff the chars in the skb */
871 			skb = ap->rpkt;
872 			if (!skb) {
873 				skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
874 				if (!skb)
875 					goto nomem;
876 				ap->rpkt = skb;
877 			}
878 			if (skb->len == 0) {
879 				/* Try to get the payload 4-byte aligned.
880 				 * This should match the
881 				 * PPP_ALLSTATIONS/PPP_UI/compressed tests in
882 				 * process_input_packet, but we do not have
883 				 * enough chars here to test buf[1] and buf[2].
884 				 */
885 				if (buf[0] != PPP_ALLSTATIONS)
886 					skb_reserve(skb, 2 + (buf[0] & 1));
887 			}
888 			if (n > skb_tailroom(skb)) {
889 				/* packet overflowed MRU */
890 				ap->state |= SC_TOSS;
891 			} else {
892 				sp = skb_put_data(skb, buf, n);
893 				if (ap->state & SC_ESCAPE) {
894 					sp[0] ^= PPP_TRANS;
895 					ap->state &= ~SC_ESCAPE;
896 				}
897 			}
898 		}
899 
900 		if (n >= count)
901 			break;
902 
903 		c = buf[n];
904 		if (flags != NULL && flags[n] != 0) {
905 			ap->state |= SC_TOSS;
906 		} else if (c == PPP_FLAG) {
907 			process_input_packet(ap);
908 		} else if (c == PPP_ESCAPE) {
909 			ap->state |= SC_ESCAPE;
910 		} else if (I_IXON(ap->tty)) {
911 			if (c == START_CHAR(ap->tty))
912 				start_tty(ap->tty);
913 			else if (c == STOP_CHAR(ap->tty))
914 				stop_tty(ap->tty);
915 		}
916 		/* otherwise it's a char in the recv ACCM */
917 		++n;
918 
919 		buf += n;
920 		if (flags)
921 			flags += n;
922 		count -= n;
923 	}
924 	return;
925 
926  nomem:
927 	printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
928 	ap->state |= SC_TOSS;
929 }
930 
931 /*
932  * We look at LCP frames going past so that we can notice
933  * and react to the LCP configure-ack from the peer.
934  * In the situation where the peer has been sent a configure-ack
935  * already, LCP is up once it has sent its configure-ack
936  * so the immediately following packet can be sent with the
937  * configured LCP options.  This allows us to process the following
938  * packet correctly without pppd needing to respond quickly.
939  *
940  * We only respond to the received configure-ack if we have just
941  * sent a configure-request, and the configure-ack contains the
942  * same data (this is checked using a 16-bit crc of the data).
943  */
944 #define CONFREQ		1	/* LCP code field values */
945 #define CONFACK		2
946 #define LCP_MRU		1	/* LCP option numbers */
947 #define LCP_ASYNCMAP	2
948 
949 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
950 			   int len, int inbound)
951 {
952 	int dlen, fcs, i, code;
953 	u32 val;
954 
955 	data += 2;		/* skip protocol bytes */
956 	len -= 2;
957 	if (len < 4)		/* 4 = code, ID, length */
958 		return;
959 	code = data[0];
960 	if (code != CONFACK && code != CONFREQ)
961 		return;
962 	dlen = get_unaligned_be16(data + 2);
963 	if (len < dlen)
964 		return;		/* packet got truncated or length is bogus */
965 
966 	if (code == (inbound? CONFACK: CONFREQ)) {
967 		/*
968 		 * sent confreq or received confack:
969 		 * calculate the crc of the data from the ID field on.
970 		 */
971 		fcs = PPP_INITFCS;
972 		for (i = 1; i < dlen; ++i)
973 			fcs = PPP_FCS(fcs, data[i]);
974 
975 		if (!inbound) {
976 			/* outbound confreq - remember the crc for later */
977 			ap->lcp_fcs = fcs;
978 			return;
979 		}
980 
981 		/* received confack, check the crc */
982 		fcs ^= ap->lcp_fcs;
983 		ap->lcp_fcs = -1;
984 		if (fcs != 0)
985 			return;
986 	} else if (inbound)
987 		return;	/* not interested in received confreq */
988 
989 	/* process the options in the confack */
990 	data += 4;
991 	dlen -= 4;
992 	/* data[0] is code, data[1] is length */
993 	while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
994 		switch (data[0]) {
995 		case LCP_MRU:
996 			val = get_unaligned_be16(data + 2);
997 			if (inbound)
998 				ap->mru = val;
999 			else
1000 				ap->chan.mtu = val;
1001 			break;
1002 		case LCP_ASYNCMAP:
1003 			val = get_unaligned_be32(data + 2);
1004 			if (inbound)
1005 				ap->raccm = val;
1006 			else
1007 				ap->xaccm[0] = val;
1008 			break;
1009 		}
1010 		dlen -= data[1];
1011 		data += data[1];
1012 	}
1013 }
1014 
1015 static void __exit ppp_async_cleanup(void)
1016 {
1017 	tty_unregister_ldisc(&ppp_ldisc);
1018 }
1019 
1020 module_init(ppp_async_init);
1021 module_exit(ppp_async_cleanup);
1022