xref: /openbmc/linux/drivers/net/wan/hdlc_ppp.c (revision 9e255e2b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic HDLC support routines for Linux
4  * Point-to-point protocol support
5  *
6  * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/hdlc.h>
11 #include <linux/if_arp.h>
12 #include <linux/inetdevice.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pkt_sched.h>
17 #include <linux/poll.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 
22 #define DEBUG_CP		0 /* also bytes# to dump */
23 #define DEBUG_STATE		0
24 #define DEBUG_HARD_HEADER	0
25 
26 #define HDLC_ADDR_ALLSTATIONS	0xFF
27 #define HDLC_CTRL_UI		0x03
28 
29 #define PID_LCP			0xC021
30 #define PID_IP			0x0021
31 #define PID_IPCP		0x8021
32 #define PID_IPV6		0x0057
33 #define PID_IPV6CP		0x8057
34 
35 enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
36 enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
37       CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
38       LCP_DISC_REQ, CP_CODES};
39 #if DEBUG_CP
40 static const char *const code_names[CP_CODES] = {
41 	"0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
42 	"TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
43 };
44 static char debug_buffer[64 + 3 * DEBUG_CP];
45 #endif
46 
47 enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
48 
49 struct hdlc_header {
50 	u8 address;
51 	u8 control;
52 	__be16 protocol;
53 };
54 
55 struct cp_header {
56 	u8 code;
57 	u8 id;
58 	__be16 len;
59 };
60 
61 
62 struct proto {
63 	struct net_device *dev;
64 	struct timer_list timer;
65 	unsigned long timeout;
66 	u16 pid;		/* protocol ID */
67 	u8 state;
68 	u8 cr_id;		/* ID of last Configuration-Request */
69 	u8 restart_counter;
70 };
71 
72 struct ppp {
73 	struct proto protos[IDX_COUNT];
74 	spinlock_t lock;
75 	unsigned long last_pong;
76 	unsigned int req_timeout, cr_retries, term_retries;
77 	unsigned int keepalive_interval, keepalive_timeout;
78 	u8 seq;			/* local sequence number for requests */
79 	u8 echo_id;		/* ID of last Echo-Request (LCP) */
80 };
81 
82 enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
83       STATES, STATE_MASK = 0xF};
84 enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
85       RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
86 enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
87       SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
88 
89 #if DEBUG_STATE
90 static const char *const state_names[STATES] = {
91 	"Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
92 	"Opened"
93 };
94 static const char *const event_names[EVENTS] = {
95 	"Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
96 	"RTR", "RTA", "RUC", "RXJ+", "RXJ-"
97 };
98 #endif
99 
100 static struct sk_buff_head tx_queue; /* used when holding the spin lock */
101 
102 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr);
103 
104 static inline struct ppp* get_ppp(struct net_device *dev)
105 {
106 	return (struct ppp *)dev_to_hdlc(dev)->state;
107 }
108 
109 static inline struct proto* get_proto(struct net_device *dev, u16 pid)
110 {
111 	struct ppp *ppp = get_ppp(dev);
112 
113 	switch (pid) {
114 	case PID_LCP:
115 		return &ppp->protos[IDX_LCP];
116 	case PID_IPCP:
117 		return &ppp->protos[IDX_IPCP];
118 	case PID_IPV6CP:
119 		return &ppp->protos[IDX_IPV6CP];
120 	default:
121 		return NULL;
122 	}
123 }
124 
125 static inline const char* proto_name(u16 pid)
126 {
127 	switch (pid) {
128 	case PID_LCP:
129 		return "LCP";
130 	case PID_IPCP:
131 		return "IPCP";
132 	case PID_IPV6CP:
133 		return "IPV6CP";
134 	default:
135 		return NULL;
136 	}
137 }
138 
139 static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
140 {
141 	struct hdlc_header *data = (struct hdlc_header*)skb->data;
142 
143 	if (skb->len < sizeof(struct hdlc_header))
144 		return htons(ETH_P_HDLC);
145 	if (data->address != HDLC_ADDR_ALLSTATIONS ||
146 	    data->control != HDLC_CTRL_UI)
147 		return htons(ETH_P_HDLC);
148 
149 	switch (data->protocol) {
150 	case cpu_to_be16(PID_IP):
151 		skb_pull(skb, sizeof(struct hdlc_header));
152 		return htons(ETH_P_IP);
153 
154 	case cpu_to_be16(PID_IPV6):
155 		skb_pull(skb, sizeof(struct hdlc_header));
156 		return htons(ETH_P_IPV6);
157 
158 	default:
159 		return htons(ETH_P_HDLC);
160 	}
161 }
162 
163 
164 static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
165 			   u16 type, const void *daddr, const void *saddr,
166 			   unsigned int len)
167 {
168 	struct hdlc_header *data;
169 #if DEBUG_HARD_HEADER
170 	printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
171 #endif
172 
173 	skb_push(skb, sizeof(struct hdlc_header));
174 	data = (struct hdlc_header*)skb->data;
175 
176 	data->address = HDLC_ADDR_ALLSTATIONS;
177 	data->control = HDLC_CTRL_UI;
178 	switch (type) {
179 	case ETH_P_IP:
180 		data->protocol = htons(PID_IP);
181 		break;
182 	case ETH_P_IPV6:
183 		data->protocol = htons(PID_IPV6);
184 		break;
185 	case PID_LCP:
186 	case PID_IPCP:
187 	case PID_IPV6CP:
188 		data->protocol = htons(type);
189 		break;
190 	default:		/* unknown protocol */
191 		data->protocol = 0;
192 	}
193 	return sizeof(struct hdlc_header);
194 }
195 
196 
197 static void ppp_tx_flush(void)
198 {
199 	struct sk_buff *skb;
200 	while ((skb = skb_dequeue(&tx_queue)) != NULL)
201 		dev_queue_xmit(skb);
202 }
203 
204 static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
205 		      u8 id, unsigned int len, const void *data)
206 {
207 	struct sk_buff *skb;
208 	struct cp_header *cp;
209 	unsigned int magic_len = 0;
210 	static u32 magic;
211 
212 #if DEBUG_CP
213 	int i;
214 	char *ptr;
215 #endif
216 
217 	if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
218 		magic_len = sizeof(magic);
219 
220 	skb = dev_alloc_skb(sizeof(struct hdlc_header) +
221 			    sizeof(struct cp_header) + magic_len + len);
222 	if (!skb) {
223 		netdev_warn(dev, "out of memory in ppp_tx_cp()\n");
224 		return;
225 	}
226 	skb_reserve(skb, sizeof(struct hdlc_header));
227 
228 	cp = skb_put(skb, sizeof(struct cp_header));
229 	cp->code = code;
230 	cp->id = id;
231 	cp->len = htons(sizeof(struct cp_header) + magic_len + len);
232 
233 	if (magic_len)
234 		skb_put_data(skb, &magic, magic_len);
235 	if (len)
236 		skb_put_data(skb, data, len);
237 
238 #if DEBUG_CP
239 	BUG_ON(code >= CP_CODES);
240 	ptr = debug_buffer;
241 	*ptr = '\x0';
242 	for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
243 		sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
244 		ptr += strlen(ptr);
245 	}
246 	printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
247 	       proto_name(pid), code_names[code], id, debug_buffer);
248 #endif
249 
250 	ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
251 
252 	skb->priority = TC_PRIO_CONTROL;
253 	skb->dev = dev;
254 	skb->protocol = htons(ETH_P_HDLC);
255 	skb_reset_network_header(skb);
256 	skb_queue_tail(&tx_queue, skb);
257 }
258 
259 
260 /* State transition table (compare STD-51)
261    Events                                   Actions
262    TO+  = Timeout with counter > 0          irc = Initialize-Restart-Count
263    TO-  = Timeout with counter expired      zrc = Zero-Restart-Count
264 
265    RCR+ = Receive-Configure-Request (Good)  scr = Send-Configure-Request
266    RCR- = Receive-Configure-Request (Bad)
267    RCA  = Receive-Configure-Ack             sca = Send-Configure-Ack
268    RCN  = Receive-Configure-Nak/Rej         scn = Send-Configure-Nak/Rej
269 
270    RTR  = Receive-Terminate-Request         str = Send-Terminate-Request
271    RTA  = Receive-Terminate-Ack             sta = Send-Terminate-Ack
272 
273    RUC  = Receive-Unknown-Code              scj = Send-Code-Reject
274    RXJ+ = Receive-Code-Reject (permitted)
275        or Receive-Protocol-Reject
276    RXJ- = Receive-Code-Reject (catastrophic)
277        or Receive-Protocol-Reject
278 */
279 static int cp_table[EVENTS][STATES] = {
280 	/* CLOSED     STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
281 	     0           1         2       3       4      5          6    */
282 	{IRC|SCR|3,     INV     , INV ,   INV   , INV ,  INV    ,   INV   }, /* START */
283 	{   INV   ,      0      ,  0  ,    0    ,  0  ,   0     ,    0    }, /* STOP */
284 	{   INV   ,     INV     ,STR|2,  SCR|3  ,SCR|3,  SCR|5  ,   INV   }, /* TO+ */
285 	{   INV   ,     INV     ,  1  ,    1    ,  1  ,    1    ,   INV   }, /* TO- */
286 	{  STA|0  ,IRC|SCR|SCA|5,  2  ,  SCA|5  ,SCA|6,  SCA|5  ,SCR|SCA|5}, /* RCR+ */
287 	{  STA|0  ,IRC|SCR|SCN|3,  2  ,  SCN|3  ,SCN|4,  SCN|3  ,SCR|SCN|3}, /* RCR- */
288 	{  STA|0  ,    STA|1    ,  2  ,  IRC|4  ,SCR|3,    6    , SCR|3   }, /* RCA */
289 	{  STA|0  ,    STA|1    ,  2  ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3   }, /* RCN */
290 	{  STA|0  ,    STA|1    ,STA|2,  STA|3  ,STA|3,  STA|3  ,ZRC|STA|2}, /* RTR */
291 	{    0    ,      1      ,  1  ,    3    ,  3  ,    5    ,  SCR|3  }, /* RTA */
292 	{  SCJ|0  ,    SCJ|1    ,SCJ|2,  SCJ|3  ,SCJ|4,  SCJ|5  ,  SCJ|6  }, /* RUC */
293 	{    0    ,      1      ,  2  ,    3    ,  3  ,    5    ,    6    }, /* RXJ+ */
294 	{    0    ,      1      ,  1  ,    1    ,  1  ,    1    ,IRC|STR|2}, /* RXJ- */
295 };
296 
297 
298 /* SCA: RCR+ must supply id, len and data
299    SCN: RCR- must supply code, id, len and data
300    STA: RTR must supply id
301    SCJ: RUC must supply CP packet len and data */
302 static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
303 			 u8 id, unsigned int len, const void *data)
304 {
305 	int old_state, action;
306 	struct ppp *ppp = get_ppp(dev);
307 	struct proto *proto = get_proto(dev, pid);
308 
309 	old_state = proto->state;
310 	BUG_ON(old_state >= STATES);
311 	BUG_ON(event >= EVENTS);
312 
313 #if DEBUG_STATE
314 	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
315 	       proto_name(pid), event_names[event], state_names[proto->state]);
316 #endif
317 
318 	action = cp_table[event][old_state];
319 
320 	proto->state = action & STATE_MASK;
321 	if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
322 		mod_timer(&proto->timer, proto->timeout =
323 			  jiffies + ppp->req_timeout * HZ);
324 	if (action & ZRC)
325 		proto->restart_counter = 0;
326 	if (action & IRC)
327 		proto->restart_counter = (proto->state == STOPPING) ?
328 			ppp->term_retries : ppp->cr_retries;
329 
330 	if (action & SCR)	/* send Configure-Request */
331 		ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
332 			  0, NULL);
333 	if (action & SCA)	/* send Configure-Ack */
334 		ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
335 	if (action & SCN)	/* send Configure-Nak/Reject */
336 		ppp_tx_cp(dev, pid, code, id, len, data);
337 	if (action & STR)	/* send Terminate-Request */
338 		ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
339 	if (action & STA)	/* send Terminate-Ack */
340 		ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
341 	if (action & SCJ)	/* send Code-Reject */
342 		ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
343 
344 	if (old_state != OPENED && proto->state == OPENED) {
345 		netdev_info(dev, "%s up\n", proto_name(pid));
346 		if (pid == PID_LCP) {
347 			netif_dormant_off(dev);
348 			ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
349 			ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
350 			ppp->last_pong = jiffies;
351 			mod_timer(&proto->timer, proto->timeout =
352 				  jiffies + ppp->keepalive_interval * HZ);
353 		}
354 	}
355 	if (old_state == OPENED && proto->state != OPENED) {
356 		netdev_info(dev, "%s down\n", proto_name(pid));
357 		if (pid == PID_LCP) {
358 			netif_dormant_on(dev);
359 			ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
360 			ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
361 		}
362 	}
363 	if (old_state != CLOSED && proto->state == CLOSED)
364 		del_timer(&proto->timer);
365 
366 #if DEBUG_STATE
367 	printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
368 	       proto_name(pid), event_names[event], state_names[proto->state]);
369 #endif
370 }
371 
372 
373 static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
374 			    unsigned int req_len, const u8 *data)
375 {
376 	static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
377 	const u8 *opt;
378 	u8 *out;
379 	unsigned int len = req_len, nak_len = 0, rej_len = 0;
380 
381 	if (!(out = kmalloc(len, GFP_ATOMIC))) {
382 		dev->stats.rx_dropped++;
383 		return;	/* out of memory, ignore CR packet */
384 	}
385 
386 	for (opt = data; len; len -= opt[1], opt += opt[1]) {
387 		if (len < 2 || opt[1] < 2 || len < opt[1])
388 			goto err_out;
389 
390 		if (pid == PID_LCP)
391 			switch (opt[0]) {
392 			case LCP_OPTION_MRU:
393 				continue; /* MRU always OK and > 1500 bytes? */
394 
395 			case LCP_OPTION_ACCM: /* async control character map */
396 				if (opt[1] < sizeof(valid_accm))
397 					goto err_out;
398 				if (!memcmp(opt, valid_accm,
399 					    sizeof(valid_accm)))
400 					continue;
401 				if (!rej_len) { /* NAK it */
402 					memcpy(out + nak_len, valid_accm,
403 					       sizeof(valid_accm));
404 					nak_len += sizeof(valid_accm);
405 					continue;
406 				}
407 				break;
408 			case LCP_OPTION_MAGIC:
409 				if (len < 6)
410 					goto err_out;
411 				if (opt[1] != 6 || (!opt[2] && !opt[3] &&
412 						    !opt[4] && !opt[5]))
413 					break; /* reject invalid magic number */
414 				continue;
415 			}
416 		/* reject this option */
417 		memcpy(out + rej_len, opt, opt[1]);
418 		rej_len += opt[1];
419 	}
420 
421 	if (rej_len)
422 		ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
423 	else if (nak_len)
424 		ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
425 	else
426 		ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
427 
428 	kfree(out);
429 	return;
430 
431 err_out:
432 	dev->stats.rx_errors++;
433 	kfree(out);
434 }
435 
436 static int ppp_rx(struct sk_buff *skb)
437 {
438 	struct hdlc_header *hdr = (struct hdlc_header*)skb->data;
439 	struct net_device *dev = skb->dev;
440 	struct ppp *ppp = get_ppp(dev);
441 	struct proto *proto;
442 	struct cp_header *cp;
443 	unsigned long flags;
444 	unsigned int len;
445 	u16 pid;
446 #if DEBUG_CP
447 	int i;
448 	char *ptr;
449 #endif
450 
451 	spin_lock_irqsave(&ppp->lock, flags);
452 	/* Check HDLC header */
453 	if (skb->len < sizeof(struct hdlc_header))
454 		goto rx_error;
455 	cp = skb_pull(skb, sizeof(struct hdlc_header));
456 	if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
457 	    hdr->control != HDLC_CTRL_UI)
458 		goto rx_error;
459 
460 	pid = ntohs(hdr->protocol);
461 	proto = get_proto(dev, pid);
462 	if (!proto) {
463 		if (ppp->protos[IDX_LCP].state == OPENED)
464 			ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
465 				  ++ppp->seq, skb->len + 2, &hdr->protocol);
466 		goto rx_error;
467 	}
468 
469 	len = ntohs(cp->len);
470 	if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
471 	    skb->len < len /* truncated packet? */)
472 		goto rx_error;
473 	skb_pull(skb, sizeof(struct cp_header));
474 	len -= sizeof(struct cp_header);
475 
476 	/* HDLC and CP headers stripped from skb */
477 #if DEBUG_CP
478 	if (cp->code < CP_CODES)
479 		sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
480 			cp->id);
481 	else
482 		sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
483 	ptr = debug_buffer + strlen(debug_buffer);
484 	for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
485 		sprintf(ptr, " %02X", skb->data[i]);
486 		ptr += strlen(ptr);
487 	}
488 	printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
489 	       debug_buffer);
490 #endif
491 
492 	/* LCP only */
493 	if (pid == PID_LCP)
494 		switch (cp->code) {
495 		case LCP_PROTO_REJ:
496 			pid = ntohs(*(__be16*)skb->data);
497 			if (pid == PID_LCP || pid == PID_IPCP ||
498 			    pid == PID_IPV6CP)
499 				ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
500 					     0, NULL);
501 			goto out;
502 
503 		case LCP_ECHO_REQ: /* send Echo-Reply */
504 			if (len >= 4 && proto->state == OPENED)
505 				ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
506 					  cp->id, len - 4, skb->data + 4);
507 			goto out;
508 
509 		case LCP_ECHO_REPLY:
510 			if (cp->id == ppp->echo_id)
511 				ppp->last_pong = jiffies;
512 			goto out;
513 
514 		case LCP_DISC_REQ: /* discard */
515 			goto out;
516 		}
517 
518 	/* LCP, IPCP and IPV6CP */
519 	switch (cp->code) {
520 	case CP_CONF_REQ:
521 		ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
522 		break;
523 
524 	case CP_CONF_ACK:
525 		if (cp->id == proto->cr_id)
526 			ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
527 		break;
528 
529 	case CP_CONF_REJ:
530 	case CP_CONF_NAK:
531 		if (cp->id == proto->cr_id)
532 			ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
533 		break;
534 
535 	case CP_TERM_REQ:
536 		ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
537 		break;
538 
539 	case CP_TERM_ACK:
540 		ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
541 		break;
542 
543 	case CP_CODE_REJ:
544 		ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
545 		break;
546 
547 	default:
548 		len += sizeof(struct cp_header);
549 		if (len > dev->mtu)
550 			len = dev->mtu;
551 		ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
552 		break;
553 	}
554 	goto out;
555 
556 rx_error:
557 	dev->stats.rx_errors++;
558 out:
559 	spin_unlock_irqrestore(&ppp->lock, flags);
560 	dev_kfree_skb_any(skb);
561 	ppp_tx_flush();
562 	return NET_RX_DROP;
563 }
564 
565 static void ppp_timer(struct timer_list *t)
566 {
567 	struct proto *proto = from_timer(proto, t, timer);
568 	struct ppp *ppp = get_ppp(proto->dev);
569 	unsigned long flags;
570 
571 	spin_lock_irqsave(&ppp->lock, flags);
572 	/* mod_timer could be called after we entered this function but
573 	 * before we got the lock.
574 	 */
575 	if (timer_pending(&proto->timer)) {
576 		spin_unlock_irqrestore(&ppp->lock, flags);
577 		return;
578 	}
579 	switch (proto->state) {
580 	case STOPPING:
581 	case REQ_SENT:
582 	case ACK_RECV:
583 	case ACK_SENT:
584 		if (proto->restart_counter) {
585 			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
586 				     0, NULL);
587 			proto->restart_counter--;
588 		} else if (netif_carrier_ok(proto->dev))
589 			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
590 				     0, NULL);
591 		else
592 			ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
593 				     0, NULL);
594 		break;
595 
596 	case OPENED:
597 		if (proto->pid != PID_LCP)
598 			break;
599 		if (time_after(jiffies, ppp->last_pong +
600 			       ppp->keepalive_timeout * HZ)) {
601 			netdev_info(proto->dev, "Link down\n");
602 			ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
603 			ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
604 		} else {	/* send keep-alive packet */
605 			ppp->echo_id = ++ppp->seq;
606 			ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
607 				  ppp->echo_id, 0, NULL);
608 			proto->timer.expires = jiffies +
609 				ppp->keepalive_interval * HZ;
610 			add_timer(&proto->timer);
611 		}
612 		break;
613 	}
614 	spin_unlock_irqrestore(&ppp->lock, flags);
615 	ppp_tx_flush();
616 }
617 
618 
619 static void ppp_start(struct net_device *dev)
620 {
621 	struct ppp *ppp = get_ppp(dev);
622 	int i;
623 
624 	for (i = 0; i < IDX_COUNT; i++) {
625 		struct proto *proto = &ppp->protos[i];
626 		proto->dev = dev;
627 		timer_setup(&proto->timer, ppp_timer, 0);
628 		proto->state = CLOSED;
629 	}
630 	ppp->protos[IDX_LCP].pid = PID_LCP;
631 	ppp->protos[IDX_IPCP].pid = PID_IPCP;
632 	ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
633 
634 	ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
635 }
636 
637 static void ppp_stop(struct net_device *dev)
638 {
639 	ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
640 }
641 
642 static void ppp_close(struct net_device *dev)
643 {
644 	ppp_tx_flush();
645 }
646 
647 static struct hdlc_proto proto = {
648 	.start		= ppp_start,
649 	.stop		= ppp_stop,
650 	.close		= ppp_close,
651 	.type_trans	= ppp_type_trans,
652 	.ioctl		= ppp_ioctl,
653 	.netif_rx	= ppp_rx,
654 	.module		= THIS_MODULE,
655 };
656 
657 static const struct header_ops ppp_header_ops = {
658 	.create = ppp_hard_header,
659 };
660 
661 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
662 {
663 	hdlc_device *hdlc = dev_to_hdlc(dev);
664 	struct ppp *ppp;
665 	int result;
666 
667 	switch (ifr->ifr_settings.type) {
668 	case IF_GET_PROTO:
669 		if (dev_to_hdlc(dev)->proto != &proto)
670 			return -EINVAL;
671 		ifr->ifr_settings.type = IF_PROTO_PPP;
672 		return 0; /* return protocol only, no settable parameters */
673 
674 	case IF_PROTO_PPP:
675 		if (!capable(CAP_NET_ADMIN))
676 			return -EPERM;
677 
678 		if (dev->flags & IFF_UP)
679 			return -EBUSY;
680 
681 		/* no settable parameters */
682 
683 		result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
684 		if (result)
685 			return result;
686 
687 		result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
688 		if (result)
689 			return result;
690 
691 		ppp = get_ppp(dev);
692 		spin_lock_init(&ppp->lock);
693 		ppp->req_timeout = 2;
694 		ppp->cr_retries = 10;
695 		ppp->term_retries = 2;
696 		ppp->keepalive_interval = 10;
697 		ppp->keepalive_timeout = 60;
698 
699 		dev->hard_header_len = sizeof(struct hdlc_header);
700 		dev->header_ops = &ppp_header_ops;
701 		dev->type = ARPHRD_PPP;
702 		call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
703 		netif_dormant_on(dev);
704 		return 0;
705 	}
706 
707 	return -EINVAL;
708 }
709 
710 
711 static int __init mod_init(void)
712 {
713 	skb_queue_head_init(&tx_queue);
714 	register_hdlc_protocol(&proto);
715 	return 0;
716 }
717 
718 static void __exit mod_exit(void)
719 {
720 	unregister_hdlc_protocol(&proto);
721 }
722 
723 
724 module_init(mod_init);
725 module_exit(mod_exit);
726 
727 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
728 MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
729 MODULE_LICENSE("GPL v2");
730