1 /* 2 * Generic HDLC support routines for Linux 3 * Frame Relay support 4 * 5 * Copyright (C) 1999 - 2003 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of version 2 of the GNU General Public License 9 * as published by the Free Software Foundation. 10 * 11 12 Theory of PVC state 13 14 DCE mode: 15 16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable" 17 0,x -> 1,1 if "link reliable" when sending FULL STATUS 18 1,1 -> 1,0 if received FULL STATUS ACK 19 20 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" 21 -> 1 when "PVC up" and (exist,new) = 1,0 22 23 DTE mode: 24 (exist,new,active) = FULL STATUS if "link reliable" 25 = 0, 0, 0 if "link unreliable" 26 No LMI: 27 active = open and "link reliable" 28 exist = new = not used 29 30 */ 31 32 #include <linux/module.h> 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <linux/poll.h> 36 #include <linux/errno.h> 37 #include <linux/if_arp.h> 38 #include <linux/init.h> 39 #include <linux/skbuff.h> 40 #include <linux/pkt_sched.h> 41 #include <linux/random.h> 42 #include <linux/inetdevice.h> 43 #include <linux/lapb.h> 44 #include <linux/rtnetlink.h> 45 #include <linux/etherdevice.h> 46 #include <linux/hdlc.h> 47 48 #undef DEBUG_PKT 49 #undef DEBUG_ECN 50 #undef DEBUG_LINK 51 52 #define MAXLEN_LMISTAT 20 /* max size of status enquiry frame */ 53 54 #define PVC_STATE_NEW 0x01 55 #define PVC_STATE_ACTIVE 0x02 56 #define PVC_STATE_FECN 0x08 /* FECN condition */ 57 #define PVC_STATE_BECN 0x10 /* BECN condition */ 58 59 60 #define FR_UI 0x03 61 #define FR_PAD 0x00 62 63 #define NLPID_IP 0xCC 64 #define NLPID_IPV6 0x8E 65 #define NLPID_SNAP 0x80 66 #define NLPID_PAD 0x00 67 #define NLPID_Q933 0x08 68 69 70 #define LMI_DLCI 0 /* LMI DLCI */ 71 #define LMI_PROTO 0x08 72 #define LMI_CALLREF 0x00 /* Call Reference */ 73 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI lockshift */ 74 #define LMI_REPTYPE 1 /* report type */ 75 #define LMI_CCITT_REPTYPE 0x51 76 #define LMI_ALIVE 3 /* keep alive */ 77 #define LMI_CCITT_ALIVE 0x53 78 #define LMI_PVCSTAT 7 /* pvc status */ 79 #define LMI_CCITT_PVCSTAT 0x57 80 #define LMI_FULLREP 0 /* full report */ 81 #define LMI_INTEGRITY 1 /* link integrity report */ 82 #define LMI_SINGLE 2 /* single pvc report */ 83 #define LMI_STATUS_ENQUIRY 0x75 84 #define LMI_STATUS 0x7D /* reply */ 85 86 #define LMI_REPT_LEN 1 /* report type element length */ 87 #define LMI_INTEG_LEN 2 /* link integrity element length */ 88 89 #define LMI_LENGTH 13 /* standard LMI frame length */ 90 #define LMI_ANSI_LENGTH 14 91 92 93 typedef struct { 94 #if defined(__LITTLE_ENDIAN_BITFIELD) 95 unsigned ea1: 1; 96 unsigned cr: 1; 97 unsigned dlcih: 6; 98 99 unsigned ea2: 1; 100 unsigned de: 1; 101 unsigned becn: 1; 102 unsigned fecn: 1; 103 unsigned dlcil: 4; 104 #else 105 unsigned dlcih: 6; 106 unsigned cr: 1; 107 unsigned ea1: 1; 108 109 unsigned dlcil: 4; 110 unsigned fecn: 1; 111 unsigned becn: 1; 112 unsigned de: 1; 113 unsigned ea2: 1; 114 #endif 115 }__attribute__ ((packed)) fr_hdr; 116 117 118 static inline u16 q922_to_dlci(u8 *hdr) 119 { 120 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); 121 } 122 123 124 125 static inline void dlci_to_q922(u8 *hdr, u16 dlci) 126 { 127 hdr[0] = (dlci >> 2) & 0xFC; 128 hdr[1] = ((dlci << 4) & 0xF0) | 0x01; 129 } 130 131 132 133 static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci) 134 { 135 pvc_device *pvc = hdlc->state.fr.first_pvc; 136 137 while (pvc) { 138 if (pvc->dlci == dlci) 139 return pvc; 140 if (pvc->dlci > dlci) 141 return NULL; /* the listed is sorted */ 142 pvc = pvc->next; 143 } 144 145 return NULL; 146 } 147 148 149 static inline pvc_device* add_pvc(struct net_device *dev, u16 dlci) 150 { 151 hdlc_device *hdlc = dev_to_hdlc(dev); 152 pvc_device *pvc, **pvc_p = &hdlc->state.fr.first_pvc; 153 154 while (*pvc_p) { 155 if ((*pvc_p)->dlci == dlci) 156 return *pvc_p; 157 if ((*pvc_p)->dlci > dlci) 158 break; /* the list is sorted */ 159 pvc_p = &(*pvc_p)->next; 160 } 161 162 pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC); 163 if (!pvc) 164 return NULL; 165 166 memset(pvc, 0, sizeof(pvc_device)); 167 pvc->dlci = dlci; 168 pvc->master = dev; 169 pvc->next = *pvc_p; /* Put it in the chain */ 170 *pvc_p = pvc; 171 return pvc; 172 } 173 174 175 static inline int pvc_is_used(pvc_device *pvc) 176 { 177 return pvc->main != NULL || pvc->ether != NULL; 178 } 179 180 181 static inline void pvc_carrier(int on, pvc_device *pvc) 182 { 183 if (on) { 184 if (pvc->main) 185 if (!netif_carrier_ok(pvc->main)) 186 netif_carrier_on(pvc->main); 187 if (pvc->ether) 188 if (!netif_carrier_ok(pvc->ether)) 189 netif_carrier_on(pvc->ether); 190 } else { 191 if (pvc->main) 192 if (netif_carrier_ok(pvc->main)) 193 netif_carrier_off(pvc->main); 194 if (pvc->ether) 195 if (netif_carrier_ok(pvc->ether)) 196 netif_carrier_off(pvc->ether); 197 } 198 } 199 200 201 static inline void delete_unused_pvcs(hdlc_device *hdlc) 202 { 203 pvc_device **pvc_p = &hdlc->state.fr.first_pvc; 204 205 while (*pvc_p) { 206 if (!pvc_is_used(*pvc_p)) { 207 pvc_device *pvc = *pvc_p; 208 *pvc_p = pvc->next; 209 kfree(pvc); 210 continue; 211 } 212 pvc_p = &(*pvc_p)->next; 213 } 214 } 215 216 217 static inline struct net_device** get_dev_p(pvc_device *pvc, int type) 218 { 219 if (type == ARPHRD_ETHER) 220 return &pvc->ether; 221 else 222 return &pvc->main; 223 } 224 225 226 static inline u16 status_to_dlci(u8 *status, int *active, int *new) 227 { 228 *new = (status[2] & 0x08) ? 1 : 0; 229 *active = (status[2] & 0x02) ? 1 : 0; 230 231 return ((status[0] & 0x3F) << 4) | ((status[1] & 0x78) >> 3); 232 } 233 234 235 static inline void dlci_to_status(u16 dlci, u8 *status, int active, int new) 236 { 237 status[0] = (dlci >> 4) & 0x3F; 238 status[1] = ((dlci << 3) & 0x78) | 0x80; 239 status[2] = 0x80; 240 241 if (new) 242 status[2] |= 0x08; 243 else if (active) 244 status[2] |= 0x02; 245 } 246 247 248 249 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) 250 { 251 u16 head_len; 252 struct sk_buff *skb = *skb_p; 253 254 switch (skb->protocol) { 255 case __constant_ntohs(ETH_P_IP): 256 head_len = 4; 257 skb_push(skb, head_len); 258 skb->data[3] = NLPID_IP; 259 break; 260 261 case __constant_ntohs(ETH_P_IPV6): 262 head_len = 4; 263 skb_push(skb, head_len); 264 skb->data[3] = NLPID_IPV6; 265 break; 266 267 case __constant_ntohs(LMI_PROTO): 268 head_len = 4; 269 skb_push(skb, head_len); 270 skb->data[3] = LMI_PROTO; 271 break; 272 273 case __constant_ntohs(ETH_P_802_3): 274 head_len = 10; 275 if (skb_headroom(skb) < head_len) { 276 struct sk_buff *skb2 = skb_realloc_headroom(skb, 277 head_len); 278 if (!skb2) 279 return -ENOBUFS; 280 dev_kfree_skb(skb); 281 skb = *skb_p = skb2; 282 } 283 skb_push(skb, head_len); 284 skb->data[3] = FR_PAD; 285 skb->data[4] = NLPID_SNAP; 286 skb->data[5] = FR_PAD; 287 skb->data[6] = 0x80; 288 skb->data[7] = 0xC2; 289 skb->data[8] = 0x00; 290 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ 291 break; 292 293 default: 294 head_len = 10; 295 skb_push(skb, head_len); 296 skb->data[3] = FR_PAD; 297 skb->data[4] = NLPID_SNAP; 298 skb->data[5] = FR_PAD; 299 skb->data[6] = FR_PAD; 300 skb->data[7] = FR_PAD; 301 *(u16*)(skb->data + 8) = skb->protocol; 302 } 303 304 dlci_to_q922(skb->data, dlci); 305 skb->data[2] = FR_UI; 306 return 0; 307 } 308 309 310 311 static int pvc_open(struct net_device *dev) 312 { 313 pvc_device *pvc = dev_to_pvc(dev); 314 315 if ((pvc->master->flags & IFF_UP) == 0) 316 return -EIO; /* Master must be UP in order to activate PVC */ 317 318 if (pvc->open_count++ == 0) { 319 hdlc_device *hdlc = dev_to_hdlc(pvc->master); 320 if (hdlc->state.fr.settings.lmi == LMI_NONE) 321 pvc->state.active = hdlc->carrier; 322 323 pvc_carrier(pvc->state.active, pvc); 324 hdlc->state.fr.dce_changed = 1; 325 } 326 return 0; 327 } 328 329 330 331 static int pvc_close(struct net_device *dev) 332 { 333 pvc_device *pvc = dev_to_pvc(dev); 334 335 if (--pvc->open_count == 0) { 336 hdlc_device *hdlc = dev_to_hdlc(pvc->master); 337 if (hdlc->state.fr.settings.lmi == LMI_NONE) 338 pvc->state.active = 0; 339 340 if (hdlc->state.fr.settings.dce) { 341 hdlc->state.fr.dce_changed = 1; 342 pvc->state.active = 0; 343 } 344 } 345 return 0; 346 } 347 348 349 350 int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 351 { 352 pvc_device *pvc = dev_to_pvc(dev); 353 fr_proto_pvc_info info; 354 355 if (ifr->ifr_settings.type == IF_GET_PROTO) { 356 if (dev->type == ARPHRD_ETHER) 357 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; 358 else 359 ifr->ifr_settings.type = IF_PROTO_FR_PVC; 360 361 if (ifr->ifr_settings.size < sizeof(info)) { 362 /* data size wanted */ 363 ifr->ifr_settings.size = sizeof(info); 364 return -ENOBUFS; 365 } 366 367 info.dlci = pvc->dlci; 368 memcpy(info.master, pvc->master->name, IFNAMSIZ); 369 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, 370 &info, sizeof(info))) 371 return -EFAULT; 372 return 0; 373 } 374 375 return -EINVAL; 376 } 377 378 379 static inline struct net_device_stats *pvc_get_stats(struct net_device *dev) 380 { 381 return netdev_priv(dev); 382 } 383 384 385 386 static int pvc_xmit(struct sk_buff *skb, struct net_device *dev) 387 { 388 pvc_device *pvc = dev_to_pvc(dev); 389 struct net_device_stats *stats = pvc_get_stats(dev); 390 391 if (pvc->state.active) { 392 if (dev->type == ARPHRD_ETHER) { 393 int pad = ETH_ZLEN - skb->len; 394 if (pad > 0) { /* Pad the frame with zeros */ 395 int len = skb->len; 396 if (skb_tailroom(skb) < pad) 397 if (pskb_expand_head(skb, 0, pad, 398 GFP_ATOMIC)) { 399 stats->tx_dropped++; 400 dev_kfree_skb(skb); 401 return 0; 402 } 403 skb_put(skb, pad); 404 memset(skb->data + len, 0, pad); 405 } 406 skb->protocol = __constant_htons(ETH_P_802_3); 407 } 408 if (!fr_hard_header(&skb, pvc->dlci)) { 409 stats->tx_bytes += skb->len; 410 stats->tx_packets++; 411 if (pvc->state.fecn) /* TX Congestion counter */ 412 stats->tx_compressed++; 413 skb->dev = pvc->master; 414 dev_queue_xmit(skb); 415 return 0; 416 } 417 } 418 419 stats->tx_dropped++; 420 dev_kfree_skb(skb); 421 return 0; 422 } 423 424 425 426 static int pvc_change_mtu(struct net_device *dev, int new_mtu) 427 { 428 if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU)) 429 return -EINVAL; 430 dev->mtu = new_mtu; 431 return 0; 432 } 433 434 435 436 static inline void fr_log_dlci_active(pvc_device *pvc) 437 { 438 printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n", 439 pvc->master->name, 440 pvc->dlci, 441 pvc->main ? pvc->main->name : "", 442 pvc->main && pvc->ether ? " " : "", 443 pvc->ether ? pvc->ether->name : "", 444 pvc->state.new ? " new" : "", 445 !pvc->state.exist ? "deleted" : 446 pvc->state.active ? "active" : "inactive"); 447 } 448 449 450 451 static inline u8 fr_lmi_nextseq(u8 x) 452 { 453 x++; 454 return x ? x : 1; 455 } 456 457 458 459 static void fr_lmi_send(struct net_device *dev, int fullrep) 460 { 461 hdlc_device *hdlc = dev_to_hdlc(dev); 462 struct sk_buff *skb; 463 pvc_device *pvc = hdlc->state.fr.first_pvc; 464 int len = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? LMI_ANSI_LENGTH 465 : LMI_LENGTH; 466 int stat_len = 3; 467 u8 *data; 468 int i = 0; 469 470 if (hdlc->state.fr.settings.dce && fullrep) { 471 len += hdlc->state.fr.dce_pvc_count * (2 + stat_len); 472 if (len > HDLC_MAX_MRU) { 473 printk(KERN_WARNING "%s: Too many PVCs while sending " 474 "LMI full report\n", dev->name); 475 return; 476 } 477 } 478 479 skb = dev_alloc_skb(len); 480 if (!skb) { 481 printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n", 482 dev->name); 483 return; 484 } 485 memset(skb->data, 0, len); 486 skb_reserve(skb, 4); 487 skb->protocol = __constant_htons(LMI_PROTO); 488 fr_hard_header(&skb, LMI_DLCI); 489 data = skb->tail; 490 data[i++] = LMI_CALLREF; 491 data[i++] = hdlc->state.fr.settings.dce 492 ? LMI_STATUS : LMI_STATUS_ENQUIRY; 493 if (hdlc->state.fr.settings.lmi == LMI_ANSI) 494 data[i++] = LMI_ANSI_LOCKSHIFT; 495 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT) 496 ? LMI_CCITT_REPTYPE : LMI_REPTYPE; 497 data[i++] = LMI_REPT_LEN; 498 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; 499 500 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT) 501 ? LMI_CCITT_ALIVE : LMI_ALIVE; 502 data[i++] = LMI_INTEG_LEN; 503 data[i++] = hdlc->state.fr.txseq =fr_lmi_nextseq(hdlc->state.fr.txseq); 504 data[i++] = hdlc->state.fr.rxseq; 505 506 if (hdlc->state.fr.settings.dce && fullrep) { 507 while (pvc) { 508 data[i++] = (hdlc->state.fr.settings.lmi == LMI_CCITT) 509 ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT; 510 data[i++] = stat_len; 511 512 /* LMI start/restart */ 513 if (hdlc->state.fr.reliable && !pvc->state.exist) { 514 pvc->state.exist = pvc->state.new = 1; 515 fr_log_dlci_active(pvc); 516 } 517 518 /* ifconfig PVC up */ 519 if (pvc->open_count && !pvc->state.active && 520 pvc->state.exist && !pvc->state.new) { 521 pvc_carrier(1, pvc); 522 pvc->state.active = 1; 523 fr_log_dlci_active(pvc); 524 } 525 526 dlci_to_status(pvc->dlci, data + i, 527 pvc->state.active, pvc->state.new); 528 i += stat_len; 529 pvc = pvc->next; 530 } 531 } 532 533 skb_put(skb, i); 534 skb->priority = TC_PRIO_CONTROL; 535 skb->dev = dev; 536 skb->nh.raw = skb->data; 537 538 dev_queue_xmit(skb); 539 } 540 541 542 543 static void fr_set_link_state(int reliable, struct net_device *dev) 544 { 545 hdlc_device *hdlc = dev_to_hdlc(dev); 546 pvc_device *pvc = hdlc->state.fr.first_pvc; 547 548 hdlc->state.fr.reliable = reliable; 549 if (reliable) { 550 if (!netif_carrier_ok(dev)) 551 netif_carrier_on(dev); 552 553 hdlc->state.fr.n391cnt = 0; /* Request full status */ 554 hdlc->state.fr.dce_changed = 1; 555 556 if (hdlc->state.fr.settings.lmi == LMI_NONE) { 557 while (pvc) { /* Activate all PVCs */ 558 pvc_carrier(1, pvc); 559 pvc->state.exist = pvc->state.active = 1; 560 pvc->state.new = 0; 561 pvc = pvc->next; 562 } 563 } 564 } else { 565 if (netif_carrier_ok(dev)) 566 netif_carrier_off(dev); 567 568 while (pvc) { /* Deactivate all PVCs */ 569 pvc_carrier(0, pvc); 570 pvc->state.exist = pvc->state.active = 0; 571 pvc->state.new = 0; 572 pvc = pvc->next; 573 } 574 } 575 } 576 577 578 579 static void fr_timer(unsigned long arg) 580 { 581 struct net_device *dev = (struct net_device *)arg; 582 hdlc_device *hdlc = dev_to_hdlc(dev); 583 int i, cnt = 0, reliable; 584 u32 list; 585 586 if (hdlc->state.fr.settings.dce) 587 reliable = hdlc->state.fr.request && 588 time_before(jiffies, hdlc->state.fr.last_poll + 589 hdlc->state.fr.settings.t392 * HZ); 590 else { 591 hdlc->state.fr.last_errors <<= 1; /* Shift the list */ 592 if (hdlc->state.fr.request) { 593 if (hdlc->state.fr.reliable) 594 printk(KERN_INFO "%s: No LMI status reply " 595 "received\n", dev->name); 596 hdlc->state.fr.last_errors |= 1; 597 } 598 599 list = hdlc->state.fr.last_errors; 600 for (i = 0; i < hdlc->state.fr.settings.n393; i++, list >>= 1) 601 cnt += (list & 1); /* errors count */ 602 603 reliable = (cnt < hdlc->state.fr.settings.n392); 604 } 605 606 if (hdlc->state.fr.reliable != reliable) { 607 printk(KERN_INFO "%s: Link %sreliable\n", dev->name, 608 reliable ? "" : "un"); 609 fr_set_link_state(reliable, dev); 610 } 611 612 if (hdlc->state.fr.settings.dce) 613 hdlc->state.fr.timer.expires = jiffies + 614 hdlc->state.fr.settings.t392 * HZ; 615 else { 616 if (hdlc->state.fr.n391cnt) 617 hdlc->state.fr.n391cnt--; 618 619 fr_lmi_send(dev, hdlc->state.fr.n391cnt == 0); 620 621 hdlc->state.fr.last_poll = jiffies; 622 hdlc->state.fr.request = 1; 623 hdlc->state.fr.timer.expires = jiffies + 624 hdlc->state.fr.settings.t391 * HZ; 625 } 626 627 hdlc->state.fr.timer.function = fr_timer; 628 hdlc->state.fr.timer.data = arg; 629 add_timer(&hdlc->state.fr.timer); 630 } 631 632 633 634 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) 635 { 636 hdlc_device *hdlc = dev_to_hdlc(dev); 637 int stat_len; 638 pvc_device *pvc; 639 int reptype = -1, error, no_ram; 640 u8 rxseq, txseq; 641 int i; 642 643 if (skb->len < ((hdlc->state.fr.settings.lmi == LMI_ANSI) 644 ? LMI_ANSI_LENGTH : LMI_LENGTH)) { 645 printk(KERN_INFO "%s: Short LMI frame\n", dev->name); 646 return 1; 647 } 648 649 if (skb->data[5] != (!hdlc->state.fr.settings.dce ? 650 LMI_STATUS : LMI_STATUS_ENQUIRY)) { 651 printk(KERN_INFO "%s: LMI msgtype=%x, Not LMI status %s\n", 652 dev->name, skb->data[2], 653 hdlc->state.fr.settings.dce ? "enquiry" : "reply"); 654 return 1; 655 } 656 657 i = (hdlc->state.fr.settings.lmi == LMI_ANSI) ? 7 : 6; 658 659 if (skb->data[i] != 660 ((hdlc->state.fr.settings.lmi == LMI_CCITT) 661 ? LMI_CCITT_REPTYPE : LMI_REPTYPE)) { 662 printk(KERN_INFO "%s: Not a report type=%x\n", 663 dev->name, skb->data[i]); 664 return 1; 665 } 666 i++; 667 668 i++; /* Skip length field */ 669 670 reptype = skb->data[i++]; 671 672 if (skb->data[i]!= 673 ((hdlc->state.fr.settings.lmi == LMI_CCITT) 674 ? LMI_CCITT_ALIVE : LMI_ALIVE)) { 675 printk(KERN_INFO "%s: Unsupported status element=%x\n", 676 dev->name, skb->data[i]); 677 return 1; 678 } 679 i++; 680 681 i++; /* Skip length field */ 682 683 hdlc->state.fr.rxseq = skb->data[i++]; /* TX sequence from peer */ 684 rxseq = skb->data[i++]; /* Should confirm our sequence */ 685 686 txseq = hdlc->state.fr.txseq; 687 688 if (hdlc->state.fr.settings.dce) { 689 if (reptype != LMI_FULLREP && reptype != LMI_INTEGRITY) { 690 printk(KERN_INFO "%s: Unsupported report type=%x\n", 691 dev->name, reptype); 692 return 1; 693 } 694 hdlc->state.fr.last_poll = jiffies; 695 } 696 697 error = 0; 698 if (!hdlc->state.fr.reliable) 699 error = 1; 700 701 if (rxseq == 0 || rxseq != txseq) { 702 hdlc->state.fr.n391cnt = 0; /* Ask for full report next time */ 703 error = 1; 704 } 705 706 if (hdlc->state.fr.settings.dce) { 707 if (hdlc->state.fr.fullrep_sent && !error) { 708 /* Stop sending full report - the last one has been confirmed by DTE */ 709 hdlc->state.fr.fullrep_sent = 0; 710 pvc = hdlc->state.fr.first_pvc; 711 while (pvc) { 712 if (pvc->state.new) { 713 pvc->state.new = 0; 714 715 /* Tell DTE that new PVC is now active */ 716 hdlc->state.fr.dce_changed = 1; 717 } 718 pvc = pvc->next; 719 } 720 } 721 722 if (hdlc->state.fr.dce_changed) { 723 reptype = LMI_FULLREP; 724 hdlc->state.fr.fullrep_sent = 1; 725 hdlc->state.fr.dce_changed = 0; 726 } 727 728 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); 729 return 0; 730 } 731 732 /* DTE */ 733 734 hdlc->state.fr.request = 0; /* got response, no request pending */ 735 736 if (error) 737 return 0; 738 739 if (reptype != LMI_FULLREP) 740 return 0; 741 742 stat_len = 3; 743 pvc = hdlc->state.fr.first_pvc; 744 745 while (pvc) { 746 pvc->state.deleted = 1; 747 pvc = pvc->next; 748 } 749 750 no_ram = 0; 751 while (skb->len >= i + 2 + stat_len) { 752 u16 dlci; 753 unsigned int active, new; 754 755 if (skb->data[i] != ((hdlc->state.fr.settings.lmi == LMI_CCITT) 756 ? LMI_CCITT_PVCSTAT : LMI_PVCSTAT)) { 757 printk(KERN_WARNING "%s: Invalid PVCSTAT ID: %x\n", 758 dev->name, skb->data[i]); 759 return 1; 760 } 761 i++; 762 763 if (skb->data[i] != stat_len) { 764 printk(KERN_WARNING "%s: Invalid PVCSTAT length: %x\n", 765 dev->name, skb->data[i]); 766 return 1; 767 } 768 i++; 769 770 dlci = status_to_dlci(skb->data + i, &active, &new); 771 772 pvc = add_pvc(dev, dlci); 773 774 if (!pvc && !no_ram) { 775 printk(KERN_WARNING 776 "%s: Memory squeeze on fr_lmi_recv()\n", 777 dev->name); 778 no_ram = 1; 779 } 780 781 if (pvc) { 782 pvc->state.exist = 1; 783 pvc->state.deleted = 0; 784 if (active != pvc->state.active || 785 new != pvc->state.new || 786 !pvc->state.exist) { 787 pvc->state.new = new; 788 pvc->state.active = active; 789 pvc_carrier(active, pvc); 790 fr_log_dlci_active(pvc); 791 } 792 } 793 794 i += stat_len; 795 } 796 797 pvc = hdlc->state.fr.first_pvc; 798 799 while (pvc) { 800 if (pvc->state.deleted && pvc->state.exist) { 801 pvc_carrier(0, pvc); 802 pvc->state.active = pvc->state.new = 0; 803 pvc->state.exist = 0; 804 fr_log_dlci_active(pvc); 805 } 806 pvc = pvc->next; 807 } 808 809 /* Next full report after N391 polls */ 810 hdlc->state.fr.n391cnt = hdlc->state.fr.settings.n391; 811 812 return 0; 813 } 814 815 816 817 static int fr_rx(struct sk_buff *skb) 818 { 819 struct net_device *ndev = skb->dev; 820 hdlc_device *hdlc = dev_to_hdlc(ndev); 821 fr_hdr *fh = (fr_hdr*)skb->data; 822 u8 *data = skb->data; 823 u16 dlci; 824 pvc_device *pvc; 825 struct net_device *dev = NULL; 826 827 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) 828 goto rx_error; 829 830 dlci = q922_to_dlci(skb->data); 831 832 if (dlci == LMI_DLCI) { 833 if (hdlc->state.fr.settings.lmi == LMI_NONE) 834 goto rx_error; /* LMI packet with no LMI? */ 835 836 if (data[3] == LMI_PROTO) { 837 if (fr_lmi_recv(ndev, skb)) 838 goto rx_error; 839 else { 840 dev_kfree_skb_any(skb); 841 return NET_RX_SUCCESS; 842 } 843 } 844 845 printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n", 846 ndev->name); 847 goto rx_error; 848 } 849 850 pvc = find_pvc(hdlc, dlci); 851 if (!pvc) { 852 #ifdef DEBUG_PKT 853 printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n", 854 ndev->name, dlci); 855 #endif 856 dev_kfree_skb_any(skb); 857 return NET_RX_DROP; 858 } 859 860 if (pvc->state.fecn != fh->fecn) { 861 #ifdef DEBUG_ECN 862 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", ndev->name, 863 dlci, fh->fecn ? "N" : "FF"); 864 #endif 865 pvc->state.fecn ^= 1; 866 } 867 868 if (pvc->state.becn != fh->becn) { 869 #ifdef DEBUG_ECN 870 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", ndev->name, 871 dlci, fh->becn ? "N" : "FF"); 872 #endif 873 pvc->state.becn ^= 1; 874 } 875 876 877 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 878 hdlc->stats.rx_dropped++; 879 return NET_RX_DROP; 880 } 881 882 if (data[3] == NLPID_IP) { 883 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 884 dev = pvc->main; 885 skb->protocol = htons(ETH_P_IP); 886 887 } else if (data[3] == NLPID_IPV6) { 888 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 889 dev = pvc->main; 890 skb->protocol = htons(ETH_P_IPV6); 891 892 } else if (skb->len > 10 && data[3] == FR_PAD && 893 data[4] == NLPID_SNAP && data[5] == FR_PAD) { 894 u16 oui = ntohs(*(u16*)(data + 6)); 895 u16 pid = ntohs(*(u16*)(data + 8)); 896 skb_pull(skb, 10); 897 898 switch ((((u32)oui) << 16) | pid) { 899 case ETH_P_ARP: /* routed frame with SNAP */ 900 case ETH_P_IPX: 901 case ETH_P_IP: /* a long variant */ 902 case ETH_P_IPV6: 903 dev = pvc->main; 904 skb->protocol = htons(pid); 905 break; 906 907 case 0x80C20007: /* bridged Ethernet frame */ 908 if ((dev = pvc->ether) != NULL) 909 skb->protocol = eth_type_trans(skb, dev); 910 break; 911 912 default: 913 printk(KERN_INFO "%s: Unsupported protocol, OUI=%x " 914 "PID=%x\n", ndev->name, oui, pid); 915 dev_kfree_skb_any(skb); 916 return NET_RX_DROP; 917 } 918 } else { 919 printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x " 920 "length = %i\n", ndev->name, data[3], skb->len); 921 dev_kfree_skb_any(skb); 922 return NET_RX_DROP; 923 } 924 925 if (dev) { 926 struct net_device_stats *stats = pvc_get_stats(dev); 927 stats->rx_packets++; /* PVC traffic */ 928 stats->rx_bytes += skb->len; 929 if (pvc->state.becn) 930 stats->rx_compressed++; 931 skb->dev = dev; 932 netif_rx(skb); 933 return NET_RX_SUCCESS; 934 } else { 935 dev_kfree_skb_any(skb); 936 return NET_RX_DROP; 937 } 938 939 rx_error: 940 hdlc->stats.rx_errors++; /* Mark error */ 941 dev_kfree_skb_any(skb); 942 return NET_RX_DROP; 943 } 944 945 946 947 static void fr_start(struct net_device *dev) 948 { 949 hdlc_device *hdlc = dev_to_hdlc(dev); 950 #ifdef DEBUG_LINK 951 printk(KERN_DEBUG "fr_start\n"); 952 #endif 953 if (hdlc->state.fr.settings.lmi != LMI_NONE) { 954 hdlc->state.fr.reliable = 0; 955 hdlc->state.fr.dce_changed = 1; 956 hdlc->state.fr.request = 0; 957 hdlc->state.fr.fullrep_sent = 0; 958 hdlc->state.fr.last_errors = 0xFFFFFFFF; 959 hdlc->state.fr.n391cnt = 0; 960 hdlc->state.fr.txseq = hdlc->state.fr.rxseq = 0; 961 962 init_timer(&hdlc->state.fr.timer); 963 /* First poll after 1 s */ 964 hdlc->state.fr.timer.expires = jiffies + HZ; 965 hdlc->state.fr.timer.function = fr_timer; 966 hdlc->state.fr.timer.data = (unsigned long)dev; 967 add_timer(&hdlc->state.fr.timer); 968 } else 969 fr_set_link_state(1, dev); 970 } 971 972 973 974 static void fr_stop(struct net_device *dev) 975 { 976 hdlc_device *hdlc = dev_to_hdlc(dev); 977 #ifdef DEBUG_LINK 978 printk(KERN_DEBUG "fr_stop\n"); 979 #endif 980 if (hdlc->state.fr.settings.lmi != LMI_NONE) 981 del_timer_sync(&hdlc->state.fr.timer); 982 fr_set_link_state(0, dev); 983 } 984 985 986 987 static void fr_close(struct net_device *dev) 988 { 989 hdlc_device *hdlc = dev_to_hdlc(dev); 990 pvc_device *pvc = hdlc->state.fr.first_pvc; 991 992 while (pvc) { /* Shutdown all PVCs for this FRAD */ 993 if (pvc->main) 994 dev_close(pvc->main); 995 if (pvc->ether) 996 dev_close(pvc->ether); 997 pvc = pvc->next; 998 } 999 } 1000 1001 static void dlci_setup(struct net_device *dev) 1002 { 1003 dev->type = ARPHRD_DLCI; 1004 dev->flags = IFF_POINTOPOINT; 1005 dev->hard_header_len = 10; 1006 dev->addr_len = 2; 1007 } 1008 1009 static int fr_add_pvc(struct net_device *master, unsigned int dlci, int type) 1010 { 1011 hdlc_device *hdlc = dev_to_hdlc(master); 1012 pvc_device *pvc = NULL; 1013 struct net_device *dev; 1014 int result, used; 1015 char * prefix = "pvc%d"; 1016 1017 if (type == ARPHRD_ETHER) 1018 prefix = "pvceth%d"; 1019 1020 if ((pvc = add_pvc(master, dlci)) == NULL) { 1021 printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n", 1022 master->name); 1023 return -ENOBUFS; 1024 } 1025 1026 if (*get_dev_p(pvc, type)) 1027 return -EEXIST; 1028 1029 used = pvc_is_used(pvc); 1030 1031 if (type == ARPHRD_ETHER) 1032 dev = alloc_netdev(sizeof(struct net_device_stats), 1033 "pvceth%d", ether_setup); 1034 else 1035 dev = alloc_netdev(sizeof(struct net_device_stats), 1036 "pvc%d", dlci_setup); 1037 1038 if (!dev) { 1039 printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n", 1040 master->name); 1041 delete_unused_pvcs(hdlc); 1042 return -ENOBUFS; 1043 } 1044 1045 if (type == ARPHRD_ETHER) { 1046 memcpy(dev->dev_addr, "\x00\x01", 2); 1047 get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2); 1048 } else { 1049 *(u16*)dev->dev_addr = htons(dlci); 1050 dlci_to_q922(dev->broadcast, dlci); 1051 } 1052 dev->hard_start_xmit = pvc_xmit; 1053 dev->get_stats = pvc_get_stats; 1054 dev->open = pvc_open; 1055 dev->stop = pvc_close; 1056 dev->do_ioctl = pvc_ioctl; 1057 dev->change_mtu = pvc_change_mtu; 1058 dev->mtu = HDLC_MAX_MTU; 1059 dev->tx_queue_len = 0; 1060 dev->priv = pvc; 1061 1062 result = dev_alloc_name(dev, dev->name); 1063 if (result < 0) { 1064 free_netdev(dev); 1065 delete_unused_pvcs(hdlc); 1066 return result; 1067 } 1068 1069 if (register_netdevice(dev) != 0) { 1070 free_netdev(dev); 1071 delete_unused_pvcs(hdlc); 1072 return -EIO; 1073 } 1074 1075 dev->destructor = free_netdev; 1076 *get_dev_p(pvc, type) = dev; 1077 if (!used) { 1078 hdlc->state.fr.dce_changed = 1; 1079 hdlc->state.fr.dce_pvc_count++; 1080 } 1081 return 0; 1082 } 1083 1084 1085 1086 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) 1087 { 1088 pvc_device *pvc; 1089 struct net_device *dev; 1090 1091 if ((pvc = find_pvc(hdlc, dlci)) == NULL) 1092 return -ENOENT; 1093 1094 if ((dev = *get_dev_p(pvc, type)) == NULL) 1095 return -ENOENT; 1096 1097 if (dev->flags & IFF_UP) 1098 return -EBUSY; /* PVC in use */ 1099 1100 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */ 1101 *get_dev_p(pvc, type) = NULL; 1102 1103 if (!pvc_is_used(pvc)) { 1104 hdlc->state.fr.dce_pvc_count--; 1105 hdlc->state.fr.dce_changed = 1; 1106 } 1107 delete_unused_pvcs(hdlc); 1108 return 0; 1109 } 1110 1111 1112 1113 static void fr_destroy(hdlc_device *hdlc) 1114 { 1115 pvc_device *pvc; 1116 1117 pvc = hdlc->state.fr.first_pvc; 1118 hdlc->state.fr.first_pvc = NULL; /* All PVCs destroyed */ 1119 hdlc->state.fr.dce_pvc_count = 0; 1120 hdlc->state.fr.dce_changed = 1; 1121 1122 while (pvc) { 1123 pvc_device *next = pvc->next; 1124 /* destructors will free_netdev() main and ether */ 1125 if (pvc->main) 1126 unregister_netdevice(pvc->main); 1127 1128 if (pvc->ether) 1129 unregister_netdevice(pvc->ether); 1130 1131 kfree(pvc); 1132 pvc = next; 1133 } 1134 } 1135 1136 1137 1138 int hdlc_fr_ioctl(struct net_device *dev, struct ifreq *ifr) 1139 { 1140 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr; 1141 const size_t size = sizeof(fr_proto); 1142 fr_proto new_settings; 1143 hdlc_device *hdlc = dev_to_hdlc(dev); 1144 fr_proto_pvc pvc; 1145 int result; 1146 1147 switch (ifr->ifr_settings.type) { 1148 case IF_GET_PROTO: 1149 ifr->ifr_settings.type = IF_PROTO_FR; 1150 if (ifr->ifr_settings.size < size) { 1151 ifr->ifr_settings.size = size; /* data size wanted */ 1152 return -ENOBUFS; 1153 } 1154 if (copy_to_user(fr_s, &hdlc->state.fr.settings, size)) 1155 return -EFAULT; 1156 return 0; 1157 1158 case IF_PROTO_FR: 1159 if(!capable(CAP_NET_ADMIN)) 1160 return -EPERM; 1161 1162 if(dev->flags & IFF_UP) 1163 return -EBUSY; 1164 1165 if (copy_from_user(&new_settings, fr_s, size)) 1166 return -EFAULT; 1167 1168 if (new_settings.lmi == LMI_DEFAULT) 1169 new_settings.lmi = LMI_ANSI; 1170 1171 if ((new_settings.lmi != LMI_NONE && 1172 new_settings.lmi != LMI_ANSI && 1173 new_settings.lmi != LMI_CCITT) || 1174 new_settings.t391 < 1 || 1175 new_settings.t392 < 2 || 1176 new_settings.n391 < 1 || 1177 new_settings.n392 < 1 || 1178 new_settings.n393 < new_settings.n392 || 1179 new_settings.n393 > 32 || 1180 (new_settings.dce != 0 && 1181 new_settings.dce != 1)) 1182 return -EINVAL; 1183 1184 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 1185 if (result) 1186 return result; 1187 1188 if (hdlc->proto.id != IF_PROTO_FR) { 1189 hdlc_proto_detach(hdlc); 1190 hdlc->state.fr.first_pvc = NULL; 1191 hdlc->state.fr.dce_pvc_count = 0; 1192 } 1193 memcpy(&hdlc->state.fr.settings, &new_settings, size); 1194 memset(&hdlc->proto, 0, sizeof(hdlc->proto)); 1195 1196 hdlc->proto.close = fr_close; 1197 hdlc->proto.start = fr_start; 1198 hdlc->proto.stop = fr_stop; 1199 hdlc->proto.detach = fr_destroy; 1200 hdlc->proto.netif_rx = fr_rx; 1201 hdlc->proto.id = IF_PROTO_FR; 1202 dev->hard_start_xmit = hdlc->xmit; 1203 dev->hard_header = NULL; 1204 dev->type = ARPHRD_FRAD; 1205 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1206 dev->addr_len = 0; 1207 return 0; 1208 1209 case IF_PROTO_FR_ADD_PVC: 1210 case IF_PROTO_FR_DEL_PVC: 1211 case IF_PROTO_FR_ADD_ETH_PVC: 1212 case IF_PROTO_FR_DEL_ETH_PVC: 1213 if(!capable(CAP_NET_ADMIN)) 1214 return -EPERM; 1215 1216 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc, 1217 sizeof(fr_proto_pvc))) 1218 return -EFAULT; 1219 1220 if (pvc.dlci <= 0 || pvc.dlci >= 1024) 1221 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ 1222 1223 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || 1224 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) 1225 result = ARPHRD_ETHER; /* bridged Ethernet device */ 1226 else 1227 result = ARPHRD_DLCI; 1228 1229 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || 1230 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) 1231 return fr_add_pvc(dev, pvc.dlci, result); 1232 else 1233 return fr_del_pvc(hdlc, pvc.dlci, result); 1234 } 1235 1236 return -EINVAL; 1237 } 1238