1 /* 2 * Generic HDLC support routines for Linux 3 * Point-to-point protocol support 4 * 5 * Copyright (C) 1999 - 2008 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of version 2 of the GNU General Public License 9 * as published by the Free Software Foundation. 10 */ 11 12 #include <linux/errno.h> 13 #include <linux/hdlc.h> 14 #include <linux/if_arp.h> 15 #include <linux/inetdevice.h> 16 #include <linux/init.h> 17 #include <linux/kernel.h> 18 #include <linux/module.h> 19 #include <linux/pkt_sched.h> 20 #include <linux/poll.h> 21 #include <linux/skbuff.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 25 #define DEBUG_CP 0 /* also bytes# to dump */ 26 #define DEBUG_STATE 0 27 #define DEBUG_HARD_HEADER 0 28 29 #define HDLC_ADDR_ALLSTATIONS 0xFF 30 #define HDLC_CTRL_UI 0x03 31 32 #define PID_LCP 0xC021 33 #define PID_IP 0x0021 34 #define PID_IPCP 0x8021 35 #define PID_IPV6 0x0057 36 #define PID_IPV6CP 0x8057 37 38 enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT}; 39 enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ, 40 CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY, 41 LCP_DISC_REQ, CP_CODES}; 42 #if DEBUG_CP 43 static const char *const code_names[CP_CODES] = { 44 "0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq", 45 "TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard" 46 }; 47 static char debug_buffer[64 + 3 * DEBUG_CP]; 48 #endif 49 50 enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5}; 51 52 struct hdlc_header { 53 u8 address; 54 u8 control; 55 __be16 protocol; 56 }; 57 58 struct cp_header { 59 u8 code; 60 u8 id; 61 __be16 len; 62 }; 63 64 65 struct proto { 66 struct net_device *dev; 67 struct timer_list timer; 68 unsigned long timeout; 69 u16 pid; /* protocol ID */ 70 u8 state; 71 u8 cr_id; /* ID of last Configuration-Request */ 72 u8 restart_counter; 73 }; 74 75 struct ppp { 76 struct proto protos[IDX_COUNT]; 77 spinlock_t lock; 78 unsigned long last_pong; 79 unsigned int req_timeout, cr_retries, term_retries; 80 unsigned int keepalive_interval, keepalive_timeout; 81 u8 seq; /* local sequence number for requests */ 82 u8 echo_id; /* ID of last Echo-Request (LCP) */ 83 }; 84 85 enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED, 86 STATES, STATE_MASK = 0xF}; 87 enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA, 88 RUC, RXJ_GOOD, RXJ_BAD, EVENTS}; 89 enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100, 90 SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000}; 91 92 #if DEBUG_STATE 93 static const char *const state_names[STATES] = { 94 "Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent", 95 "Opened" 96 }; 97 static const char *const event_names[EVENTS] = { 98 "Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN", 99 "RTR", "RTA", "RUC", "RXJ+", "RXJ-" 100 }; 101 #endif 102 103 static struct sk_buff_head tx_queue; /* used when holding the spin lock */ 104 105 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr); 106 107 static inline struct ppp* get_ppp(struct net_device *dev) 108 { 109 return (struct ppp *)dev_to_hdlc(dev)->state; 110 } 111 112 static inline struct proto* get_proto(struct net_device *dev, u16 pid) 113 { 114 struct ppp *ppp = get_ppp(dev); 115 116 switch (pid) { 117 case PID_LCP: 118 return &ppp->protos[IDX_LCP]; 119 case PID_IPCP: 120 return &ppp->protos[IDX_IPCP]; 121 case PID_IPV6CP: 122 return &ppp->protos[IDX_IPV6CP]; 123 default: 124 return NULL; 125 } 126 } 127 128 static inline const char* proto_name(u16 pid) 129 { 130 switch (pid) { 131 case PID_LCP: 132 return "LCP"; 133 case PID_IPCP: 134 return "IPCP"; 135 case PID_IPV6CP: 136 return "IPV6CP"; 137 default: 138 return NULL; 139 } 140 } 141 142 static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev) 143 { 144 struct hdlc_header *data = (struct hdlc_header*)skb->data; 145 146 if (skb->len < sizeof(struct hdlc_header)) 147 return htons(ETH_P_HDLC); 148 if (data->address != HDLC_ADDR_ALLSTATIONS || 149 data->control != HDLC_CTRL_UI) 150 return htons(ETH_P_HDLC); 151 152 switch (data->protocol) { 153 case cpu_to_be16(PID_IP): 154 skb_pull(skb, sizeof(struct hdlc_header)); 155 return htons(ETH_P_IP); 156 157 case cpu_to_be16(PID_IPV6): 158 skb_pull(skb, sizeof(struct hdlc_header)); 159 return htons(ETH_P_IPV6); 160 161 default: 162 return htons(ETH_P_HDLC); 163 } 164 } 165 166 167 static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev, 168 u16 type, const void *daddr, const void *saddr, 169 unsigned int len) 170 { 171 struct hdlc_header *data; 172 #if DEBUG_HARD_HEADER 173 printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name); 174 #endif 175 176 skb_push(skb, sizeof(struct hdlc_header)); 177 data = (struct hdlc_header*)skb->data; 178 179 data->address = HDLC_ADDR_ALLSTATIONS; 180 data->control = HDLC_CTRL_UI; 181 switch (type) { 182 case ETH_P_IP: 183 data->protocol = htons(PID_IP); 184 break; 185 case ETH_P_IPV6: 186 data->protocol = htons(PID_IPV6); 187 break; 188 case PID_LCP: 189 case PID_IPCP: 190 case PID_IPV6CP: 191 data->protocol = htons(type); 192 break; 193 default: /* unknown protocol */ 194 data->protocol = 0; 195 } 196 return sizeof(struct hdlc_header); 197 } 198 199 200 static void ppp_tx_flush(void) 201 { 202 struct sk_buff *skb; 203 while ((skb = skb_dequeue(&tx_queue)) != NULL) 204 dev_queue_xmit(skb); 205 } 206 207 static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, 208 u8 id, unsigned int len, const void *data) 209 { 210 struct sk_buff *skb; 211 struct cp_header *cp; 212 unsigned int magic_len = 0; 213 static u32 magic; 214 215 #if DEBUG_CP 216 int i; 217 char *ptr; 218 #endif 219 220 if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY)) 221 magic_len = sizeof(magic); 222 223 skb = dev_alloc_skb(sizeof(struct hdlc_header) + 224 sizeof(struct cp_header) + magic_len + len); 225 if (!skb) { 226 netdev_warn(dev, "out of memory in ppp_tx_cp()\n"); 227 return; 228 } 229 skb_reserve(skb, sizeof(struct hdlc_header)); 230 231 cp = skb_put(skb, sizeof(struct cp_header)); 232 cp->code = code; 233 cp->id = id; 234 cp->len = htons(sizeof(struct cp_header) + magic_len + len); 235 236 if (magic_len) 237 skb_put_data(skb, &magic, magic_len); 238 if (len) 239 skb_put_data(skb, data, len); 240 241 #if DEBUG_CP 242 BUG_ON(code >= CP_CODES); 243 ptr = debug_buffer; 244 *ptr = '\x0'; 245 for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) { 246 sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]); 247 ptr += strlen(ptr); 248 } 249 printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name, 250 proto_name(pid), code_names[code], id, debug_buffer); 251 #endif 252 253 ppp_hard_header(skb, dev, pid, NULL, NULL, 0); 254 255 skb->priority = TC_PRIO_CONTROL; 256 skb->dev = dev; 257 skb_reset_network_header(skb); 258 skb_queue_tail(&tx_queue, skb); 259 } 260 261 262 /* State transition table (compare STD-51) 263 Events Actions 264 TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count 265 TO- = Timeout with counter expired zrc = Zero-Restart-Count 266 267 RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request 268 RCR- = Receive-Configure-Request (Bad) 269 RCA = Receive-Configure-Ack sca = Send-Configure-Ack 270 RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej 271 272 RTR = Receive-Terminate-Request str = Send-Terminate-Request 273 RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack 274 275 RUC = Receive-Unknown-Code scj = Send-Code-Reject 276 RXJ+ = Receive-Code-Reject (permitted) 277 or Receive-Protocol-Reject 278 RXJ- = Receive-Code-Reject (catastrophic) 279 or Receive-Protocol-Reject 280 */ 281 static int cp_table[EVENTS][STATES] = { 282 /* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED 283 0 1 2 3 4 5 6 */ 284 {IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */ 285 { INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */ 286 { INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */ 287 { INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */ 288 { STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */ 289 { STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */ 290 { STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */ 291 { STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */ 292 { STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */ 293 { 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */ 294 { SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */ 295 { 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */ 296 { 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */ 297 }; 298 299 300 /* SCA: RCR+ must supply id, len and data 301 SCN: RCR- must supply code, id, len and data 302 STA: RTR must supply id 303 SCJ: RUC must supply CP packet len and data */ 304 static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code, 305 u8 id, unsigned int len, const void *data) 306 { 307 int old_state, action; 308 struct ppp *ppp = get_ppp(dev); 309 struct proto *proto = get_proto(dev, pid); 310 311 old_state = proto->state; 312 BUG_ON(old_state >= STATES); 313 BUG_ON(event >= EVENTS); 314 315 #if DEBUG_STATE 316 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name, 317 proto_name(pid), event_names[event], state_names[proto->state]); 318 #endif 319 320 action = cp_table[event][old_state]; 321 322 proto->state = action & STATE_MASK; 323 if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */ 324 mod_timer(&proto->timer, proto->timeout = 325 jiffies + ppp->req_timeout * HZ); 326 if (action & ZRC) 327 proto->restart_counter = 0; 328 if (action & IRC) 329 proto->restart_counter = (proto->state == STOPPING) ? 330 ppp->term_retries : ppp->cr_retries; 331 332 if (action & SCR) /* send Configure-Request */ 333 ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq, 334 0, NULL); 335 if (action & SCA) /* send Configure-Ack */ 336 ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data); 337 if (action & SCN) /* send Configure-Nak/Reject */ 338 ppp_tx_cp(dev, pid, code, id, len, data); 339 if (action & STR) /* send Terminate-Request */ 340 ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL); 341 if (action & STA) /* send Terminate-Ack */ 342 ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL); 343 if (action & SCJ) /* send Code-Reject */ 344 ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data); 345 346 if (old_state != OPENED && proto->state == OPENED) { 347 netdev_info(dev, "%s up\n", proto_name(pid)); 348 if (pid == PID_LCP) { 349 netif_dormant_off(dev); 350 ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL); 351 ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL); 352 ppp->last_pong = jiffies; 353 mod_timer(&proto->timer, proto->timeout = 354 jiffies + ppp->keepalive_interval * HZ); 355 } 356 } 357 if (old_state == OPENED && proto->state != OPENED) { 358 netdev_info(dev, "%s down\n", proto_name(pid)); 359 if (pid == PID_LCP) { 360 netif_dormant_on(dev); 361 ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL); 362 ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL); 363 } 364 } 365 if (old_state != CLOSED && proto->state == CLOSED) 366 del_timer(&proto->timer); 367 368 #if DEBUG_STATE 369 printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name, 370 proto_name(pid), event_names[event], state_names[proto->state]); 371 #endif 372 } 373 374 375 static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, 376 unsigned int req_len, const u8 *data) 377 { 378 static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 }; 379 const u8 *opt; 380 u8 *out; 381 unsigned int len = req_len, nak_len = 0, rej_len = 0; 382 383 if (!(out = kmalloc(len, GFP_ATOMIC))) { 384 dev->stats.rx_dropped++; 385 return; /* out of memory, ignore CR packet */ 386 } 387 388 for (opt = data; len; len -= opt[1], opt += opt[1]) { 389 if (len < 2 || len < opt[1]) { 390 dev->stats.rx_errors++; 391 kfree(out); 392 return; /* bad packet, drop silently */ 393 } 394 395 if (pid == PID_LCP) 396 switch (opt[0]) { 397 case LCP_OPTION_MRU: 398 continue; /* MRU always OK and > 1500 bytes? */ 399 400 case LCP_OPTION_ACCM: /* async control character map */ 401 if (!memcmp(opt, valid_accm, 402 sizeof(valid_accm))) 403 continue; 404 if (!rej_len) { /* NAK it */ 405 memcpy(out + nak_len, valid_accm, 406 sizeof(valid_accm)); 407 nak_len += sizeof(valid_accm); 408 continue; 409 } 410 break; 411 case LCP_OPTION_MAGIC: 412 if (opt[1] != 6 || (!opt[2] && !opt[3] && 413 !opt[4] && !opt[5])) 414 break; /* reject invalid magic number */ 415 continue; 416 } 417 /* reject this option */ 418 memcpy(out + rej_len, opt, opt[1]); 419 rej_len += opt[1]; 420 } 421 422 if (rej_len) 423 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out); 424 else if (nak_len) 425 ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out); 426 else 427 ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); 428 429 kfree(out); 430 } 431 432 static int ppp_rx(struct sk_buff *skb) 433 { 434 struct hdlc_header *hdr = (struct hdlc_header*)skb->data; 435 struct net_device *dev = skb->dev; 436 struct ppp *ppp = get_ppp(dev); 437 struct proto *proto; 438 struct cp_header *cp; 439 unsigned long flags; 440 unsigned int len; 441 u16 pid; 442 #if DEBUG_CP 443 int i; 444 char *ptr; 445 #endif 446 447 spin_lock_irqsave(&ppp->lock, flags); 448 /* Check HDLC header */ 449 if (skb->len < sizeof(struct hdlc_header)) 450 goto rx_error; 451 cp = skb_pull(skb, sizeof(struct hdlc_header)); 452 if (hdr->address != HDLC_ADDR_ALLSTATIONS || 453 hdr->control != HDLC_CTRL_UI) 454 goto rx_error; 455 456 pid = ntohs(hdr->protocol); 457 proto = get_proto(dev, pid); 458 if (!proto) { 459 if (ppp->protos[IDX_LCP].state == OPENED) 460 ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ, 461 ++ppp->seq, skb->len + 2, &hdr->protocol); 462 goto rx_error; 463 } 464 465 len = ntohs(cp->len); 466 if (len < sizeof(struct cp_header) /* no complete CP header? */ || 467 skb->len < len /* truncated packet? */) 468 goto rx_error; 469 skb_pull(skb, sizeof(struct cp_header)); 470 len -= sizeof(struct cp_header); 471 472 /* HDLC and CP headers stripped from skb */ 473 #if DEBUG_CP 474 if (cp->code < CP_CODES) 475 sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code], 476 cp->id); 477 else 478 sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id); 479 ptr = debug_buffer + strlen(debug_buffer); 480 for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) { 481 sprintf(ptr, " %02X", skb->data[i]); 482 ptr += strlen(ptr); 483 } 484 printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid), 485 debug_buffer); 486 #endif 487 488 /* LCP only */ 489 if (pid == PID_LCP) 490 switch (cp->code) { 491 case LCP_PROTO_REJ: 492 pid = ntohs(*(__be16*)skb->data); 493 if (pid == PID_LCP || pid == PID_IPCP || 494 pid == PID_IPV6CP) 495 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 496 0, NULL); 497 goto out; 498 499 case LCP_ECHO_REQ: /* send Echo-Reply */ 500 if (len >= 4 && proto->state == OPENED) 501 ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY, 502 cp->id, len - 4, skb->data + 4); 503 goto out; 504 505 case LCP_ECHO_REPLY: 506 if (cp->id == ppp->echo_id) 507 ppp->last_pong = jiffies; 508 goto out; 509 510 case LCP_DISC_REQ: /* discard */ 511 goto out; 512 } 513 514 /* LCP, IPCP and IPV6CP */ 515 switch (cp->code) { 516 case CP_CONF_REQ: 517 ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data); 518 break; 519 520 case CP_CONF_ACK: 521 if (cp->id == proto->cr_id) 522 ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL); 523 break; 524 525 case CP_CONF_REJ: 526 case CP_CONF_NAK: 527 if (cp->id == proto->cr_id) 528 ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL); 529 break; 530 531 case CP_TERM_REQ: 532 ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL); 533 break; 534 535 case CP_TERM_ACK: 536 ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL); 537 break; 538 539 case CP_CODE_REJ: 540 ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL); 541 break; 542 543 default: 544 len += sizeof(struct cp_header); 545 if (len > dev->mtu) 546 len = dev->mtu; 547 ppp_cp_event(dev, pid, RUC, 0, 0, len, cp); 548 break; 549 } 550 goto out; 551 552 rx_error: 553 dev->stats.rx_errors++; 554 out: 555 spin_unlock_irqrestore(&ppp->lock, flags); 556 dev_kfree_skb_any(skb); 557 ppp_tx_flush(); 558 return NET_RX_DROP; 559 } 560 561 static void ppp_timer(struct timer_list *t) 562 { 563 struct proto *proto = from_timer(proto, t, timer); 564 struct ppp *ppp = get_ppp(proto->dev); 565 unsigned long flags; 566 567 spin_lock_irqsave(&ppp->lock, flags); 568 switch (proto->state) { 569 case STOPPING: 570 case REQ_SENT: 571 case ACK_RECV: 572 case ACK_SENT: 573 if (proto->restart_counter) { 574 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 575 0, NULL); 576 proto->restart_counter--; 577 } else if (netif_carrier_ok(proto->dev)) 578 ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 579 0, NULL); 580 else 581 ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 582 0, NULL); 583 break; 584 585 case OPENED: 586 if (proto->pid != PID_LCP) 587 break; 588 if (time_after(jiffies, ppp->last_pong + 589 ppp->keepalive_timeout * HZ)) { 590 netdev_info(proto->dev, "Link down\n"); 591 ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL); 592 ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL); 593 } else { /* send keep-alive packet */ 594 ppp->echo_id = ++ppp->seq; 595 ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ, 596 ppp->echo_id, 0, NULL); 597 proto->timer.expires = jiffies + 598 ppp->keepalive_interval * HZ; 599 add_timer(&proto->timer); 600 } 601 break; 602 } 603 spin_unlock_irqrestore(&ppp->lock, flags); 604 ppp_tx_flush(); 605 } 606 607 608 static void ppp_start(struct net_device *dev) 609 { 610 struct ppp *ppp = get_ppp(dev); 611 int i; 612 613 for (i = 0; i < IDX_COUNT; i++) { 614 struct proto *proto = &ppp->protos[i]; 615 proto->dev = dev; 616 timer_setup(&proto->timer, ppp_timer, 0); 617 proto->state = CLOSED; 618 } 619 ppp->protos[IDX_LCP].pid = PID_LCP; 620 ppp->protos[IDX_IPCP].pid = PID_IPCP; 621 ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP; 622 623 ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL); 624 } 625 626 static void ppp_stop(struct net_device *dev) 627 { 628 ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL); 629 } 630 631 static void ppp_close(struct net_device *dev) 632 { 633 ppp_tx_flush(); 634 } 635 636 static struct hdlc_proto proto = { 637 .start = ppp_start, 638 .stop = ppp_stop, 639 .close = ppp_close, 640 .type_trans = ppp_type_trans, 641 .ioctl = ppp_ioctl, 642 .netif_rx = ppp_rx, 643 .module = THIS_MODULE, 644 }; 645 646 static const struct header_ops ppp_header_ops = { 647 .create = ppp_hard_header, 648 }; 649 650 static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) 651 { 652 hdlc_device *hdlc = dev_to_hdlc(dev); 653 struct ppp *ppp; 654 int result; 655 656 switch (ifr->ifr_settings.type) { 657 case IF_GET_PROTO: 658 if (dev_to_hdlc(dev)->proto != &proto) 659 return -EINVAL; 660 ifr->ifr_settings.type = IF_PROTO_PPP; 661 return 0; /* return protocol only, no settable parameters */ 662 663 case IF_PROTO_PPP: 664 if (!capable(CAP_NET_ADMIN)) 665 return -EPERM; 666 667 if (dev->flags & IFF_UP) 668 return -EBUSY; 669 670 /* no settable parameters */ 671 672 result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 673 if (result) 674 return result; 675 676 result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp)); 677 if (result) 678 return result; 679 680 ppp = get_ppp(dev); 681 spin_lock_init(&ppp->lock); 682 ppp->req_timeout = 2; 683 ppp->cr_retries = 10; 684 ppp->term_retries = 2; 685 ppp->keepalive_interval = 10; 686 ppp->keepalive_timeout = 60; 687 688 dev->hard_header_len = sizeof(struct hdlc_header); 689 dev->header_ops = &ppp_header_ops; 690 dev->type = ARPHRD_PPP; 691 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); 692 netif_dormant_on(dev); 693 return 0; 694 } 695 696 return -EINVAL; 697 } 698 699 700 static int __init mod_init(void) 701 { 702 skb_queue_head_init(&tx_queue); 703 register_hdlc_protocol(&proto); 704 return 0; 705 } 706 707 static void __exit mod_exit(void) 708 { 709 unregister_hdlc_protocol(&proto); 710 } 711 712 713 module_init(mod_init); 714 module_exit(mod_exit); 715 716 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 717 MODULE_DESCRIPTION("PPP protocol support for generic HDLC"); 718 MODULE_LICENSE("GPL v2"); 719