xref: /openbmc/linux/drivers/net/ppp/ppp_async.c (revision c34a8052)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * PPP async serial channel driver for Linux.
4  *
5  * Copyright 1999 Paul Mackerras.
6  *
7  * This driver provides the encapsulation and framing for sending
8  * and receiving PPP frames over async serial lines.  It relies on
9  * the generic PPP layer to give it frames to send and to process
10  * received frames.  It implements the PPP line discipline.
11  *
12  * Part of the code in this driver was inspired by the old async-only
13  * PPP driver, written by Michael Callahan and Al Longyear, and
14  * subsequently hacked by Paul Mackerras.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/skbuff.h>
20 #include <linux/tty.h>
21 #include <linux/netdevice.h>
22 #include <linux/poll.h>
23 #include <linux/crc-ccitt.h>
24 #include <linux/ppp_defs.h>
25 #include <linux/ppp-ioctl.h>
26 #include <linux/ppp_channel.h>
27 #include <linux/spinlock.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/jiffies.h>
31 #include <linux/slab.h>
32 #include <asm/unaligned.h>
33 #include <linux/uaccess.h>
34 #include <asm/string.h>
35 
36 #define PPP_VERSION	"2.4.2"
37 
38 #define OBUFSIZE	4096
39 
40 /* Structure for storing local state. */
41 struct asyncppp {
42 	struct tty_struct *tty;
43 	unsigned int	flags;
44 	unsigned int	state;
45 	unsigned int	rbits;
46 	int		mru;
47 	spinlock_t	xmit_lock;
48 	spinlock_t	recv_lock;
49 	unsigned long	xmit_flags;
50 	u32		xaccm[8];
51 	u32		raccm;
52 	unsigned int	bytes_sent;
53 	unsigned int	bytes_rcvd;
54 
55 	struct sk_buff	*tpkt;
56 	int		tpkt_pos;
57 	u16		tfcs;
58 	unsigned char	*optr;
59 	unsigned char	*olim;
60 	unsigned long	last_xmit;
61 
62 	struct sk_buff	*rpkt;
63 	int		lcp_fcs;
64 	struct sk_buff_head rqueue;
65 
66 	struct tasklet_struct tsk;
67 
68 	refcount_t	refcnt;
69 	struct completion dead;
70 	struct ppp_channel chan;	/* interface to generic ppp layer */
71 	unsigned char	obuf[OBUFSIZE];
72 };
73 
74 /* Bit numbers in xmit_flags */
75 #define XMIT_WAKEUP	0
76 #define XMIT_FULL	1
77 #define XMIT_BUSY	2
78 
79 /* State bits */
80 #define SC_TOSS		1
81 #define SC_ESCAPE	2
82 #define SC_PREV_ERROR	4
83 
84 /* Bits in rbits */
85 #define SC_RCV_BITS	(SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
86 
87 static int flag_time = HZ;
88 module_param(flag_time, int, 0);
89 MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
90 MODULE_LICENSE("GPL");
91 MODULE_ALIAS_LDISC(N_PPP);
92 
93 /*
94  * Prototypes.
95  */
96 static int ppp_async_encode(struct asyncppp *ap);
97 static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
98 static int ppp_async_push(struct asyncppp *ap);
99 static void ppp_async_flush_output(struct asyncppp *ap);
100 static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
101 			    const u8 *flags, int count);
102 static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
103 			   unsigned long arg);
104 static void ppp_async_process(struct tasklet_struct *t);
105 
106 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
107 			   int len, int inbound);
108 
109 static const struct ppp_channel_ops async_ops = {
110 	.start_xmit = ppp_async_send,
111 	.ioctl      = ppp_async_ioctl,
112 };
113 
114 /*
115  * Routines implementing the PPP line discipline.
116  */
117 
118 /*
119  * We have a potential race on dereferencing tty->disc_data,
120  * because the tty layer provides no locking at all - thus one
121  * cpu could be running ppp_asynctty_receive while another
122  * calls ppp_asynctty_close, which zeroes tty->disc_data and
123  * frees the memory that ppp_asynctty_receive is using.  The best
124  * way to fix this is to use a rwlock in the tty struct, but for now
125  * we use a single global rwlock for all ttys in ppp line discipline.
126  *
127  * FIXME: this is no longer true. The _close path for the ldisc is
128  * now guaranteed to be sane.
129  */
130 static DEFINE_RWLOCK(disc_data_lock);
131 
132 static struct asyncppp *ap_get(struct tty_struct *tty)
133 {
134 	struct asyncppp *ap;
135 
136 	read_lock(&disc_data_lock);
137 	ap = tty->disc_data;
138 	if (ap != NULL)
139 		refcount_inc(&ap->refcnt);
140 	read_unlock(&disc_data_lock);
141 	return ap;
142 }
143 
144 static void ap_put(struct asyncppp *ap)
145 {
146 	if (refcount_dec_and_test(&ap->refcnt))
147 		complete(&ap->dead);
148 }
149 
150 /*
151  * Called when a tty is put into PPP line discipline. Called in process
152  * context.
153  */
154 static int
155 ppp_asynctty_open(struct tty_struct *tty)
156 {
157 	struct asyncppp *ap;
158 	int err;
159 	int speed;
160 
161 	if (tty->ops->write == NULL)
162 		return -EOPNOTSUPP;
163 
164 	err = -ENOMEM;
165 	ap = kzalloc(sizeof(*ap), GFP_KERNEL);
166 	if (!ap)
167 		goto out;
168 
169 	/* initialize the asyncppp structure */
170 	ap->tty = tty;
171 	ap->mru = PPP_MRU;
172 	spin_lock_init(&ap->xmit_lock);
173 	spin_lock_init(&ap->recv_lock);
174 	ap->xaccm[0] = ~0U;
175 	ap->xaccm[3] = 0x60000000U;
176 	ap->raccm = ~0U;
177 	ap->optr = ap->obuf;
178 	ap->olim = ap->obuf;
179 	ap->lcp_fcs = -1;
180 
181 	skb_queue_head_init(&ap->rqueue);
182 	tasklet_setup(&ap->tsk, ppp_async_process);
183 
184 	refcount_set(&ap->refcnt, 1);
185 	init_completion(&ap->dead);
186 
187 	ap->chan.private = ap;
188 	ap->chan.ops = &async_ops;
189 	ap->chan.mtu = PPP_MRU;
190 	speed = tty_get_baud_rate(tty);
191 	ap->chan.speed = speed;
192 	err = ppp_register_channel(&ap->chan);
193 	if (err)
194 		goto out_free;
195 
196 	tty->disc_data = ap;
197 	tty->receive_room = 65536;
198 	return 0;
199 
200  out_free:
201 	kfree(ap);
202  out:
203 	return err;
204 }
205 
206 /*
207  * Called when the tty is put into another line discipline
208  * or it hangs up.  We have to wait for any cpu currently
209  * executing in any of the other ppp_asynctty_* routines to
210  * finish before we can call ppp_unregister_channel and free
211  * the asyncppp struct.  This routine must be called from
212  * process context, not interrupt or softirq context.
213  */
214 static void
215 ppp_asynctty_close(struct tty_struct *tty)
216 {
217 	struct asyncppp *ap;
218 
219 	write_lock_irq(&disc_data_lock);
220 	ap = tty->disc_data;
221 	tty->disc_data = NULL;
222 	write_unlock_irq(&disc_data_lock);
223 	if (!ap)
224 		return;
225 
226 	/*
227 	 * We have now ensured that nobody can start using ap from now
228 	 * on, but we have to wait for all existing users to finish.
229 	 * Note that ppp_unregister_channel ensures that no calls to
230 	 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
231 	 * by the time it returns.
232 	 */
233 	if (!refcount_dec_and_test(&ap->refcnt))
234 		wait_for_completion(&ap->dead);
235 	tasklet_kill(&ap->tsk);
236 
237 	ppp_unregister_channel(&ap->chan);
238 	kfree_skb(ap->rpkt);
239 	skb_queue_purge(&ap->rqueue);
240 	kfree_skb(ap->tpkt);
241 	kfree(ap);
242 }
243 
244 /*
245  * Called on tty hangup in process context.
246  *
247  * Wait for I/O to driver to complete and unregister PPP channel.
248  * This is already done by the close routine, so just call that.
249  */
250 static void ppp_asynctty_hangup(struct tty_struct *tty)
251 {
252 	ppp_asynctty_close(tty);
253 }
254 
255 /*
256  * Read does nothing - no data is ever available this way.
257  * Pppd reads and writes packets via /dev/ppp instead.
258  */
259 static ssize_t
260 ppp_asynctty_read(struct tty_struct *tty, struct file *file, u8 *buf,
261 		  size_t count, void **cookie, unsigned long offset)
262 {
263 	return -EAGAIN;
264 }
265 
266 /*
267  * Write on the tty does nothing, the packets all come in
268  * from the ppp generic stuff.
269  */
270 static ssize_t
271 ppp_asynctty_write(struct tty_struct *tty, struct file *file, const u8 *buf,
272 		   size_t count)
273 {
274 	return -EAGAIN;
275 }
276 
277 /*
278  * Called in process context only. May be re-entered by multiple
279  * ioctl calling threads.
280  */
281 
282 static int
283 ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
284 {
285 	struct asyncppp *ap = ap_get(tty);
286 	int err, val;
287 	int __user *p = (int __user *)arg;
288 
289 	if (!ap)
290 		return -ENXIO;
291 	err = -EFAULT;
292 	switch (cmd) {
293 	case PPPIOCGCHAN:
294 		err = -EFAULT;
295 		if (put_user(ppp_channel_index(&ap->chan), p))
296 			break;
297 		err = 0;
298 		break;
299 
300 	case PPPIOCGUNIT:
301 		err = -EFAULT;
302 		if (put_user(ppp_unit_number(&ap->chan), p))
303 			break;
304 		err = 0;
305 		break;
306 
307 	case TCFLSH:
308 		/* flush our buffers and the serial port's buffer */
309 		if (arg == TCIOFLUSH || arg == TCOFLUSH)
310 			ppp_async_flush_output(ap);
311 		err = n_tty_ioctl_helper(tty, cmd, arg);
312 		break;
313 
314 	case FIONREAD:
315 		val = 0;
316 		if (put_user(val, p))
317 			break;
318 		err = 0;
319 		break;
320 
321 	default:
322 		/* Try the various mode ioctls */
323 		err = tty_mode_ioctl(tty, cmd, arg);
324 	}
325 
326 	ap_put(ap);
327 	return err;
328 }
329 
330 /* May sleep, don't call from interrupt level or with interrupts disabled */
331 static void
332 ppp_asynctty_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags,
333 		     size_t count)
334 {
335 	struct asyncppp *ap = ap_get(tty);
336 	unsigned long flags;
337 
338 	if (!ap)
339 		return;
340 	spin_lock_irqsave(&ap->recv_lock, flags);
341 	ppp_async_input(ap, buf, cflags, count);
342 	spin_unlock_irqrestore(&ap->recv_lock, flags);
343 	if (!skb_queue_empty(&ap->rqueue))
344 		tasklet_schedule(&ap->tsk);
345 	ap_put(ap);
346 	tty_unthrottle(tty);
347 }
348 
349 static void
350 ppp_asynctty_wakeup(struct tty_struct *tty)
351 {
352 	struct asyncppp *ap = ap_get(tty);
353 
354 	clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
355 	if (!ap)
356 		return;
357 	set_bit(XMIT_WAKEUP, &ap->xmit_flags);
358 	tasklet_schedule(&ap->tsk);
359 	ap_put(ap);
360 }
361 
362 
363 static struct tty_ldisc_ops ppp_ldisc = {
364 	.owner  = THIS_MODULE,
365 	.num	= N_PPP,
366 	.name	= "ppp",
367 	.open	= ppp_asynctty_open,
368 	.close	= ppp_asynctty_close,
369 	.hangup	= ppp_asynctty_hangup,
370 	.read	= ppp_asynctty_read,
371 	.write	= ppp_asynctty_write,
372 	.ioctl	= ppp_asynctty_ioctl,
373 	.receive_buf = ppp_asynctty_receive,
374 	.write_wakeup = ppp_asynctty_wakeup,
375 };
376 
377 static int __init
378 ppp_async_init(void)
379 {
380 	int err;
381 
382 	err = tty_register_ldisc(&ppp_ldisc);
383 	if (err != 0)
384 		printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
385 		       err);
386 	return err;
387 }
388 
389 /*
390  * The following routines provide the PPP channel interface.
391  */
392 static int
393 ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
394 {
395 	struct asyncppp *ap = chan->private;
396 	void __user *argp = (void __user *)arg;
397 	int __user *p = argp;
398 	int err, val;
399 	u32 accm[8];
400 
401 	err = -EFAULT;
402 	switch (cmd) {
403 	case PPPIOCGFLAGS:
404 		val = ap->flags | ap->rbits;
405 		if (put_user(val, p))
406 			break;
407 		err = 0;
408 		break;
409 	case PPPIOCSFLAGS:
410 		if (get_user(val, p))
411 			break;
412 		ap->flags = val & ~SC_RCV_BITS;
413 		spin_lock_irq(&ap->recv_lock);
414 		ap->rbits = val & SC_RCV_BITS;
415 		spin_unlock_irq(&ap->recv_lock);
416 		err = 0;
417 		break;
418 
419 	case PPPIOCGASYNCMAP:
420 		if (put_user(ap->xaccm[0], (u32 __user *)argp))
421 			break;
422 		err = 0;
423 		break;
424 	case PPPIOCSASYNCMAP:
425 		if (get_user(ap->xaccm[0], (u32 __user *)argp))
426 			break;
427 		err = 0;
428 		break;
429 
430 	case PPPIOCGRASYNCMAP:
431 		if (put_user(ap->raccm, (u32 __user *)argp))
432 			break;
433 		err = 0;
434 		break;
435 	case PPPIOCSRASYNCMAP:
436 		if (get_user(ap->raccm, (u32 __user *)argp))
437 			break;
438 		err = 0;
439 		break;
440 
441 	case PPPIOCGXASYNCMAP:
442 		if (copy_to_user(argp, ap->xaccm, sizeof(ap->xaccm)))
443 			break;
444 		err = 0;
445 		break;
446 	case PPPIOCSXASYNCMAP:
447 		if (copy_from_user(accm, argp, sizeof(accm)))
448 			break;
449 		accm[2] &= ~0x40000000U;	/* can't escape 0x5e */
450 		accm[3] |= 0x60000000U;		/* must escape 0x7d, 0x7e */
451 		memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
452 		err = 0;
453 		break;
454 
455 	case PPPIOCGMRU:
456 		if (put_user(ap->mru, p))
457 			break;
458 		err = 0;
459 		break;
460 	case PPPIOCSMRU:
461 		if (get_user(val, p))
462 			break;
463 		if (val > U16_MAX) {
464 			err = -EINVAL;
465 			break;
466 		}
467 		if (val < PPP_MRU)
468 			val = PPP_MRU;
469 		ap->mru = val;
470 		err = 0;
471 		break;
472 
473 	default:
474 		err = -ENOTTY;
475 	}
476 
477 	return err;
478 }
479 
480 /*
481  * This is called at softirq level to deliver received packets
482  * to the ppp_generic code, and to tell the ppp_generic code
483  * if we can accept more output now.
484  */
485 static void ppp_async_process(struct tasklet_struct *t)
486 {
487 	struct asyncppp *ap = from_tasklet(ap, t, tsk);
488 	struct sk_buff *skb;
489 
490 	/* process received packets */
491 	while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
492 		if (skb->cb[0])
493 			ppp_input_error(&ap->chan, 0);
494 		ppp_input(&ap->chan, skb);
495 	}
496 
497 	/* try to push more stuff out */
498 	if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
499 		ppp_output_wakeup(&ap->chan);
500 }
501 
502 /*
503  * Procedures for encapsulation and framing.
504  */
505 
506 /*
507  * Procedure to encode the data for async serial transmission.
508  * Does octet stuffing (escaping), puts the address/control bytes
509  * on if A/C compression is disabled, and does protocol compression.
510  * Assumes ap->tpkt != 0 on entry.
511  * Returns 1 if we finished the current frame, 0 otherwise.
512  */
513 
514 #define PUT_BYTE(ap, buf, c, islcp)	do {		\
515 	if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
516 		*buf++ = PPP_ESCAPE;			\
517 		*buf++ = c ^ PPP_TRANS;			\
518 	} else						\
519 		*buf++ = c;				\
520 } while (0)
521 
522 static int
523 ppp_async_encode(struct asyncppp *ap)
524 {
525 	int fcs, i, count, c, proto;
526 	unsigned char *buf, *buflim;
527 	unsigned char *data;
528 	int islcp;
529 
530 	buf = ap->obuf;
531 	ap->olim = buf;
532 	ap->optr = buf;
533 	i = ap->tpkt_pos;
534 	data = ap->tpkt->data;
535 	count = ap->tpkt->len;
536 	fcs = ap->tfcs;
537 	proto = get_unaligned_be16(data);
538 
539 	/*
540 	 * LCP packets with code values between 1 (configure-reqest)
541 	 * and 7 (code-reject) must be sent as though no options
542 	 * had been negotiated.
543 	 */
544 	islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
545 
546 	if (i == 0) {
547 		if (islcp)
548 			async_lcp_peek(ap, data, count, 0);
549 
550 		/*
551 		 * Start of a new packet - insert the leading FLAG
552 		 * character if necessary.
553 		 */
554 		if (islcp || flag_time == 0 ||
555 		    time_after_eq(jiffies, ap->last_xmit + flag_time))
556 			*buf++ = PPP_FLAG;
557 		ap->last_xmit = jiffies;
558 		fcs = PPP_INITFCS;
559 
560 		/*
561 		 * Put in the address/control bytes if necessary
562 		 */
563 		if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
564 			PUT_BYTE(ap, buf, 0xff, islcp);
565 			fcs = PPP_FCS(fcs, 0xff);
566 			PUT_BYTE(ap, buf, 0x03, islcp);
567 			fcs = PPP_FCS(fcs, 0x03);
568 		}
569 	}
570 
571 	/*
572 	 * Once we put in the last byte, we need to put in the FCS
573 	 * and closing flag, so make sure there is at least 7 bytes
574 	 * of free space in the output buffer.
575 	 */
576 	buflim = ap->obuf + OBUFSIZE - 6;
577 	while (i < count && buf < buflim) {
578 		c = data[i++];
579 		if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
580 			continue;	/* compress protocol field */
581 		fcs = PPP_FCS(fcs, c);
582 		PUT_BYTE(ap, buf, c, islcp);
583 	}
584 
585 	if (i < count) {
586 		/*
587 		 * Remember where we are up to in this packet.
588 		 */
589 		ap->olim = buf;
590 		ap->tpkt_pos = i;
591 		ap->tfcs = fcs;
592 		return 0;
593 	}
594 
595 	/*
596 	 * We have finished the packet.  Add the FCS and flag.
597 	 */
598 	fcs = ~fcs;
599 	c = fcs & 0xff;
600 	PUT_BYTE(ap, buf, c, islcp);
601 	c = (fcs >> 8) & 0xff;
602 	PUT_BYTE(ap, buf, c, islcp);
603 	*buf++ = PPP_FLAG;
604 	ap->olim = buf;
605 
606 	consume_skb(ap->tpkt);
607 	ap->tpkt = NULL;
608 	return 1;
609 }
610 
611 /*
612  * Transmit-side routines.
613  */
614 
615 /*
616  * Send a packet to the peer over an async tty line.
617  * Returns 1 iff the packet was accepted.
618  * If the packet was not accepted, we will call ppp_output_wakeup
619  * at some later time.
620  */
621 static int
622 ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
623 {
624 	struct asyncppp *ap = chan->private;
625 
626 	ppp_async_push(ap);
627 
628 	if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
629 		return 0;	/* already full */
630 	ap->tpkt = skb;
631 	ap->tpkt_pos = 0;
632 
633 	ppp_async_push(ap);
634 	return 1;
635 }
636 
637 /*
638  * Push as much data as possible out to the tty.
639  */
640 static int
641 ppp_async_push(struct asyncppp *ap)
642 {
643 	int avail, sent, done = 0;
644 	struct tty_struct *tty = ap->tty;
645 	int tty_stuffed = 0;
646 
647 	/*
648 	 * We can get called recursively here if the tty write
649 	 * function calls our wakeup function.  This can happen
650 	 * for example on a pty with both the master and slave
651 	 * set to PPP line discipline.
652 	 * We use the XMIT_BUSY bit to detect this and get out,
653 	 * leaving the XMIT_WAKEUP bit set to tell the other
654 	 * instance that it may now be able to write more now.
655 	 */
656 	if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
657 		return 0;
658 	spin_lock_bh(&ap->xmit_lock);
659 	for (;;) {
660 		if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
661 			tty_stuffed = 0;
662 		if (!tty_stuffed && ap->optr < ap->olim) {
663 			avail = ap->olim - ap->optr;
664 			set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
665 			sent = tty->ops->write(tty, ap->optr, avail);
666 			if (sent < 0)
667 				goto flush;	/* error, e.g. loss of CD */
668 			ap->optr += sent;
669 			if (sent < avail)
670 				tty_stuffed = 1;
671 			continue;
672 		}
673 		if (ap->optr >= ap->olim && ap->tpkt) {
674 			if (ppp_async_encode(ap)) {
675 				/* finished processing ap->tpkt */
676 				clear_bit(XMIT_FULL, &ap->xmit_flags);
677 				done = 1;
678 			}
679 			continue;
680 		}
681 		/*
682 		 * We haven't made any progress this time around.
683 		 * Clear XMIT_BUSY to let other callers in, but
684 		 * after doing so we have to check if anyone set
685 		 * XMIT_WAKEUP since we last checked it.  If they
686 		 * did, we should try again to set XMIT_BUSY and go
687 		 * around again in case XMIT_BUSY was still set when
688 		 * the other caller tried.
689 		 */
690 		clear_bit(XMIT_BUSY, &ap->xmit_flags);
691 		/* any more work to do? if not, exit the loop */
692 		if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags) ||
693 		      (!tty_stuffed && ap->tpkt)))
694 			break;
695 		/* more work to do, see if we can do it now */
696 		if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
697 			break;
698 	}
699 	spin_unlock_bh(&ap->xmit_lock);
700 	return done;
701 
702 flush:
703 	clear_bit(XMIT_BUSY, &ap->xmit_flags);
704 	if (ap->tpkt) {
705 		kfree_skb(ap->tpkt);
706 		ap->tpkt = NULL;
707 		clear_bit(XMIT_FULL, &ap->xmit_flags);
708 		done = 1;
709 	}
710 	ap->optr = ap->olim;
711 	spin_unlock_bh(&ap->xmit_lock);
712 	return done;
713 }
714 
715 /*
716  * Flush output from our internal buffers.
717  * Called for the TCFLSH ioctl. Can be entered in parallel
718  * but this is covered by the xmit_lock.
719  */
720 static void
721 ppp_async_flush_output(struct asyncppp *ap)
722 {
723 	int done = 0;
724 
725 	spin_lock_bh(&ap->xmit_lock);
726 	ap->optr = ap->olim;
727 	if (ap->tpkt != NULL) {
728 		kfree_skb(ap->tpkt);
729 		ap->tpkt = NULL;
730 		clear_bit(XMIT_FULL, &ap->xmit_flags);
731 		done = 1;
732 	}
733 	spin_unlock_bh(&ap->xmit_lock);
734 	if (done)
735 		ppp_output_wakeup(&ap->chan);
736 }
737 
738 /*
739  * Receive-side routines.
740  */
741 
742 /* see how many ordinary chars there are at the start of buf */
743 static inline int
744 scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
745 {
746 	int i, c;
747 
748 	for (i = 0; i < count; ++i) {
749 		c = buf[i];
750 		if (c == PPP_ESCAPE || c == PPP_FLAG ||
751 		    (c < 0x20 && (ap->raccm & (1 << c)) != 0))
752 			break;
753 	}
754 	return i;
755 }
756 
757 /* called when a flag is seen - do end-of-packet processing */
758 static void
759 process_input_packet(struct asyncppp *ap)
760 {
761 	struct sk_buff *skb;
762 	unsigned char *p;
763 	unsigned int len, fcs;
764 
765 	skb = ap->rpkt;
766 	if (ap->state & (SC_TOSS | SC_ESCAPE))
767 		goto err;
768 
769 	if (skb == NULL)
770 		return;		/* 0-length packet */
771 
772 	/* check the FCS */
773 	p = skb->data;
774 	len = skb->len;
775 	if (len < 3)
776 		goto err;	/* too short */
777 	fcs = PPP_INITFCS;
778 	for (; len > 0; --len)
779 		fcs = PPP_FCS(fcs, *p++);
780 	if (fcs != PPP_GOODFCS)
781 		goto err;	/* bad FCS */
782 	skb_trim(skb, skb->len - 2);
783 
784 	/* check for address/control and protocol compression */
785 	p = skb->data;
786 	if (p[0] == PPP_ALLSTATIONS) {
787 		/* chop off address/control */
788 		if (p[1] != PPP_UI || skb->len < 3)
789 			goto err;
790 		p = skb_pull(skb, 2);
791 	}
792 
793 	/* If protocol field is not compressed, it can be LCP packet */
794 	if (!(p[0] & 0x01)) {
795 		unsigned int proto;
796 
797 		if (skb->len < 2)
798 			goto err;
799 		proto = (p[0] << 8) + p[1];
800 		if (proto == PPP_LCP)
801 			async_lcp_peek(ap, p, skb->len, 1);
802 	}
803 
804 	/* queue the frame to be processed */
805 	skb->cb[0] = ap->state;
806 	skb_queue_tail(&ap->rqueue, skb);
807 	ap->rpkt = NULL;
808 	ap->state = 0;
809 	return;
810 
811  err:
812 	/* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
813 	ap->state = SC_PREV_ERROR;
814 	if (skb) {
815 		/* make skb appear as freshly allocated */
816 		skb_trim(skb, 0);
817 		skb_reserve(skb, - skb_headroom(skb));
818 	}
819 }
820 
821 /* Called when the tty driver has data for us. Runs parallel with the
822    other ldisc functions but will not be re-entered */
823 
824 static void
825 ppp_async_input(struct asyncppp *ap, const u8 *buf, const u8 *flags, int count)
826 {
827 	struct sk_buff *skb;
828 	int c, i, j, n, s, f;
829 	unsigned char *sp;
830 
831 	/* update bits used for 8-bit cleanness detection */
832 	if (~ap->rbits & SC_RCV_BITS) {
833 		s = 0;
834 		for (i = 0; i < count; ++i) {
835 			c = buf[i];
836 			if (flags && flags[i] != 0)
837 				continue;
838 			s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
839 			c = ((c >> 4) ^ c) & 0xf;
840 			s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
841 		}
842 		ap->rbits |= s;
843 	}
844 
845 	while (count > 0) {
846 		/* scan through and see how many chars we can do in bulk */
847 		if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
848 			n = 1;
849 		else
850 			n = scan_ordinary(ap, buf, count);
851 
852 		f = 0;
853 		if (flags && (ap->state & SC_TOSS) == 0) {
854 			/* check the flags to see if any char had an error */
855 			for (j = 0; j < n; ++j)
856 				if ((f = flags[j]) != 0)
857 					break;
858 		}
859 		if (f != 0) {
860 			/* start tossing */
861 			ap->state |= SC_TOSS;
862 
863 		} else if (n > 0 && (ap->state & SC_TOSS) == 0) {
864 			/* stuff the chars in the skb */
865 			skb = ap->rpkt;
866 			if (!skb) {
867 				skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
868 				if (!skb)
869 					goto nomem;
870 				ap->rpkt = skb;
871 			}
872 			if (skb->len == 0) {
873 				/* Try to get the payload 4-byte aligned.
874 				 * This should match the
875 				 * PPP_ALLSTATIONS/PPP_UI/compressed tests in
876 				 * process_input_packet, but we do not have
877 				 * enough chars here to test buf[1] and buf[2].
878 				 */
879 				if (buf[0] != PPP_ALLSTATIONS)
880 					skb_reserve(skb, 2 + (buf[0] & 1));
881 			}
882 			if (n > skb_tailroom(skb)) {
883 				/* packet overflowed MRU */
884 				ap->state |= SC_TOSS;
885 			} else {
886 				sp = skb_put_data(skb, buf, n);
887 				if (ap->state & SC_ESCAPE) {
888 					sp[0] ^= PPP_TRANS;
889 					ap->state &= ~SC_ESCAPE;
890 				}
891 			}
892 		}
893 
894 		if (n >= count)
895 			break;
896 
897 		c = buf[n];
898 		if (flags != NULL && flags[n] != 0) {
899 			ap->state |= SC_TOSS;
900 		} else if (c == PPP_FLAG) {
901 			process_input_packet(ap);
902 		} else if (c == PPP_ESCAPE) {
903 			ap->state |= SC_ESCAPE;
904 		} else if (I_IXON(ap->tty)) {
905 			if (c == START_CHAR(ap->tty))
906 				start_tty(ap->tty);
907 			else if (c == STOP_CHAR(ap->tty))
908 				stop_tty(ap->tty);
909 		}
910 		/* otherwise it's a char in the recv ACCM */
911 		++n;
912 
913 		buf += n;
914 		if (flags)
915 			flags += n;
916 		count -= n;
917 	}
918 	return;
919 
920  nomem:
921 	printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
922 	ap->state |= SC_TOSS;
923 }
924 
925 /*
926  * We look at LCP frames going past so that we can notice
927  * and react to the LCP configure-ack from the peer.
928  * In the situation where the peer has been sent a configure-ack
929  * already, LCP is up once it has sent its configure-ack
930  * so the immediately following packet can be sent with the
931  * configured LCP options.  This allows us to process the following
932  * packet correctly without pppd needing to respond quickly.
933  *
934  * We only respond to the received configure-ack if we have just
935  * sent a configure-request, and the configure-ack contains the
936  * same data (this is checked using a 16-bit crc of the data).
937  */
938 #define CONFREQ		1	/* LCP code field values */
939 #define CONFACK		2
940 #define LCP_MRU		1	/* LCP option numbers */
941 #define LCP_ASYNCMAP	2
942 
943 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
944 			   int len, int inbound)
945 {
946 	int dlen, fcs, i, code;
947 	u32 val;
948 
949 	data += 2;		/* skip protocol bytes */
950 	len -= 2;
951 	if (len < 4)		/* 4 = code, ID, length */
952 		return;
953 	code = data[0];
954 	if (code != CONFACK && code != CONFREQ)
955 		return;
956 	dlen = get_unaligned_be16(data + 2);
957 	if (len < dlen)
958 		return;		/* packet got truncated or length is bogus */
959 
960 	if (code == (inbound? CONFACK: CONFREQ)) {
961 		/*
962 		 * sent confreq or received confack:
963 		 * calculate the crc of the data from the ID field on.
964 		 */
965 		fcs = PPP_INITFCS;
966 		for (i = 1; i < dlen; ++i)
967 			fcs = PPP_FCS(fcs, data[i]);
968 
969 		if (!inbound) {
970 			/* outbound confreq - remember the crc for later */
971 			ap->lcp_fcs = fcs;
972 			return;
973 		}
974 
975 		/* received confack, check the crc */
976 		fcs ^= ap->lcp_fcs;
977 		ap->lcp_fcs = -1;
978 		if (fcs != 0)
979 			return;
980 	} else if (inbound)
981 		return;	/* not interested in received confreq */
982 
983 	/* process the options in the confack */
984 	data += 4;
985 	dlen -= 4;
986 	/* data[0] is code, data[1] is length */
987 	while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
988 		switch (data[0]) {
989 		case LCP_MRU:
990 			val = get_unaligned_be16(data + 2);
991 			if (inbound)
992 				ap->mru = val;
993 			else
994 				ap->chan.mtu = val;
995 			break;
996 		case LCP_ASYNCMAP:
997 			val = get_unaligned_be32(data + 2);
998 			if (inbound)
999 				ap->raccm = val;
1000 			else
1001 				ap->xaccm[0] = val;
1002 			break;
1003 		}
1004 		dlen -= data[1];
1005 		data += data[1];
1006 	}
1007 }
1008 
1009 static void __exit ppp_async_cleanup(void)
1010 {
1011 	tty_unregister_ldisc(&ppp_ldisc);
1012 }
1013 
1014 module_init(ppp_async_init);
1015 module_exit(ppp_async_cleanup);
1016