1 /* 2 * Generic HDLC support routines for Linux 3 * Frame Relay support 4 * 5 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of version 2 of the GNU General Public License 9 * as published by the Free Software Foundation. 10 * 11 12 Theory of PVC state 13 14 DCE mode: 15 16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable" 17 0,x -> 1,1 if "link reliable" when sending FULL STATUS 18 1,1 -> 1,0 if received FULL STATUS ACK 19 20 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create" 21 -> 1 when "PVC up" and (exist,new) = 1,0 22 23 DTE mode: 24 (exist,new,active) = FULL STATUS if "link reliable" 25 = 0, 0, 0 if "link unreliable" 26 No LMI: 27 active = open and "link reliable" 28 exist = new = not used 29 30 CCITT LMI: ITU-T Q.933 Annex A 31 ANSI LMI: ANSI T1.617 Annex D 32 CISCO LMI: the original, aka "Gang of Four" LMI 33 34 */ 35 36 #include <linux/errno.h> 37 #include <linux/etherdevice.h> 38 #include <linux/hdlc.h> 39 #include <linux/if_arp.h> 40 #include <linux/inetdevice.h> 41 #include <linux/init.h> 42 #include <linux/kernel.h> 43 #include <linux/module.h> 44 #include <linux/pkt_sched.h> 45 #include <linux/poll.h> 46 #include <linux/rtnetlink.h> 47 #include <linux/skbuff.h> 48 #include <linux/slab.h> 49 50 #undef DEBUG_PKT 51 #undef DEBUG_ECN 52 #undef DEBUG_LINK 53 #undef DEBUG_PROTO 54 #undef DEBUG_PVC 55 56 #define FR_UI 0x03 57 #define FR_PAD 0x00 58 59 #define NLPID_IP 0xCC 60 #define NLPID_IPV6 0x8E 61 #define NLPID_SNAP 0x80 62 #define NLPID_PAD 0x00 63 #define NLPID_CCITT_ANSI_LMI 0x08 64 #define NLPID_CISCO_LMI 0x09 65 66 67 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */ 68 #define LMI_CISCO_DLCI 1023 69 70 #define LMI_CALLREF 0x00 /* Call Reference */ 71 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */ 72 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */ 73 #define LMI_CCITT_REPTYPE 0x51 74 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */ 75 #define LMI_CCITT_ALIVE 0x53 76 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */ 77 #define LMI_CCITT_PVCSTAT 0x57 78 79 #define LMI_FULLREP 0x00 /* full report */ 80 #define LMI_INTEGRITY 0x01 /* link integrity report */ 81 #define LMI_SINGLE 0x02 /* single PVC report */ 82 83 #define LMI_STATUS_ENQUIRY 0x75 84 #define LMI_STATUS 0x7D /* reply */ 85 86 #define LMI_REPT_LEN 1 /* report type element length */ 87 #define LMI_INTEG_LEN 2 /* link integrity element length */ 88 89 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */ 90 #define LMI_ANSI_LENGTH 14 91 92 93 struct fr_hdr { 94 #if defined(__LITTLE_ENDIAN_BITFIELD) 95 unsigned ea1: 1; 96 unsigned cr: 1; 97 unsigned dlcih: 6; 98 99 unsigned ea2: 1; 100 unsigned de: 1; 101 unsigned becn: 1; 102 unsigned fecn: 1; 103 unsigned dlcil: 4; 104 #else 105 unsigned dlcih: 6; 106 unsigned cr: 1; 107 unsigned ea1: 1; 108 109 unsigned dlcil: 4; 110 unsigned fecn: 1; 111 unsigned becn: 1; 112 unsigned de: 1; 113 unsigned ea2: 1; 114 #endif 115 } __packed; 116 117 118 struct pvc_device { 119 struct net_device *frad; 120 struct net_device *main; 121 struct net_device *ether; /* bridged Ethernet interface */ 122 struct pvc_device *next; /* Sorted in ascending DLCI order */ 123 int dlci; 124 int open_count; 125 126 struct { 127 unsigned int new: 1; 128 unsigned int active: 1; 129 unsigned int exist: 1; 130 unsigned int deleted: 1; 131 unsigned int fecn: 1; 132 unsigned int becn: 1; 133 unsigned int bandwidth; /* Cisco LMI reporting only */ 134 }state; 135 }; 136 137 struct frad_state { 138 fr_proto settings; 139 struct pvc_device *first_pvc; 140 int dce_pvc_count; 141 142 struct timer_list timer; 143 unsigned long last_poll; 144 int reliable; 145 int dce_changed; 146 int request; 147 int fullrep_sent; 148 u32 last_errors; /* last errors bit list */ 149 u8 n391cnt; 150 u8 txseq; /* TX sequence number */ 151 u8 rxseq; /* RX sequence number */ 152 }; 153 154 155 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr); 156 157 158 static inline u16 q922_to_dlci(u8 *hdr) 159 { 160 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4); 161 } 162 163 164 static inline void dlci_to_q922(u8 *hdr, u16 dlci) 165 { 166 hdr[0] = (dlci >> 2) & 0xFC; 167 hdr[1] = ((dlci << 4) & 0xF0) | 0x01; 168 } 169 170 171 static inline struct frad_state* state(hdlc_device *hdlc) 172 { 173 return(struct frad_state *)(hdlc->state); 174 } 175 176 177 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci) 178 { 179 struct pvc_device *pvc = state(hdlc)->first_pvc; 180 181 while (pvc) { 182 if (pvc->dlci == dlci) 183 return pvc; 184 if (pvc->dlci > dlci) 185 return NULL; /* the list is sorted */ 186 pvc = pvc->next; 187 } 188 189 return NULL; 190 } 191 192 193 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci) 194 { 195 hdlc_device *hdlc = dev_to_hdlc(dev); 196 struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc; 197 198 while (*pvc_p) { 199 if ((*pvc_p)->dlci == dlci) 200 return *pvc_p; 201 if ((*pvc_p)->dlci > dlci) 202 break; /* the list is sorted */ 203 pvc_p = &(*pvc_p)->next; 204 } 205 206 pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC); 207 #ifdef DEBUG_PVC 208 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev); 209 #endif 210 if (!pvc) 211 return NULL; 212 213 pvc->dlci = dlci; 214 pvc->frad = dev; 215 pvc->next = *pvc_p; /* Put it in the chain */ 216 *pvc_p = pvc; 217 return pvc; 218 } 219 220 221 static inline int pvc_is_used(struct pvc_device *pvc) 222 { 223 return pvc->main || pvc->ether; 224 } 225 226 227 static inline void pvc_carrier(int on, struct pvc_device *pvc) 228 { 229 if (on) { 230 if (pvc->main) 231 if (!netif_carrier_ok(pvc->main)) 232 netif_carrier_on(pvc->main); 233 if (pvc->ether) 234 if (!netif_carrier_ok(pvc->ether)) 235 netif_carrier_on(pvc->ether); 236 } else { 237 if (pvc->main) 238 if (netif_carrier_ok(pvc->main)) 239 netif_carrier_off(pvc->main); 240 if (pvc->ether) 241 if (netif_carrier_ok(pvc->ether)) 242 netif_carrier_off(pvc->ether); 243 } 244 } 245 246 247 static inline void delete_unused_pvcs(hdlc_device *hdlc) 248 { 249 struct pvc_device **pvc_p = &state(hdlc)->first_pvc; 250 251 while (*pvc_p) { 252 if (!pvc_is_used(*pvc_p)) { 253 struct pvc_device *pvc = *pvc_p; 254 #ifdef DEBUG_PVC 255 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc); 256 #endif 257 *pvc_p = pvc->next; 258 kfree(pvc); 259 continue; 260 } 261 pvc_p = &(*pvc_p)->next; 262 } 263 } 264 265 266 static inline struct net_device **get_dev_p(struct pvc_device *pvc, 267 int type) 268 { 269 if (type == ARPHRD_ETHER) 270 return &pvc->ether; 271 else 272 return &pvc->main; 273 } 274 275 276 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci) 277 { 278 u16 head_len; 279 struct sk_buff *skb = *skb_p; 280 281 switch (skb->protocol) { 282 case cpu_to_be16(NLPID_CCITT_ANSI_LMI): 283 head_len = 4; 284 skb_push(skb, head_len); 285 skb->data[3] = NLPID_CCITT_ANSI_LMI; 286 break; 287 288 case cpu_to_be16(NLPID_CISCO_LMI): 289 head_len = 4; 290 skb_push(skb, head_len); 291 skb->data[3] = NLPID_CISCO_LMI; 292 break; 293 294 case cpu_to_be16(ETH_P_IP): 295 head_len = 4; 296 skb_push(skb, head_len); 297 skb->data[3] = NLPID_IP; 298 break; 299 300 case cpu_to_be16(ETH_P_IPV6): 301 head_len = 4; 302 skb_push(skb, head_len); 303 skb->data[3] = NLPID_IPV6; 304 break; 305 306 case cpu_to_be16(ETH_P_802_3): 307 head_len = 10; 308 if (skb_headroom(skb) < head_len) { 309 struct sk_buff *skb2 = skb_realloc_headroom(skb, 310 head_len); 311 if (!skb2) 312 return -ENOBUFS; 313 dev_kfree_skb(skb); 314 skb = *skb_p = skb2; 315 } 316 skb_push(skb, head_len); 317 skb->data[3] = FR_PAD; 318 skb->data[4] = NLPID_SNAP; 319 skb->data[5] = FR_PAD; 320 skb->data[6] = 0x80; 321 skb->data[7] = 0xC2; 322 skb->data[8] = 0x00; 323 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */ 324 break; 325 326 default: 327 head_len = 10; 328 skb_push(skb, head_len); 329 skb->data[3] = FR_PAD; 330 skb->data[4] = NLPID_SNAP; 331 skb->data[5] = FR_PAD; 332 skb->data[6] = FR_PAD; 333 skb->data[7] = FR_PAD; 334 *(__be16*)(skb->data + 8) = skb->protocol; 335 } 336 337 dlci_to_q922(skb->data, dlci); 338 skb->data[2] = FR_UI; 339 return 0; 340 } 341 342 343 344 static int pvc_open(struct net_device *dev) 345 { 346 struct pvc_device *pvc = dev->ml_priv; 347 348 if ((pvc->frad->flags & IFF_UP) == 0) 349 return -EIO; /* Frad must be UP in order to activate PVC */ 350 351 if (pvc->open_count++ == 0) { 352 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 353 if (state(hdlc)->settings.lmi == LMI_NONE) 354 pvc->state.active = netif_carrier_ok(pvc->frad); 355 356 pvc_carrier(pvc->state.active, pvc); 357 state(hdlc)->dce_changed = 1; 358 } 359 return 0; 360 } 361 362 363 364 static int pvc_close(struct net_device *dev) 365 { 366 struct pvc_device *pvc = dev->ml_priv; 367 368 if (--pvc->open_count == 0) { 369 hdlc_device *hdlc = dev_to_hdlc(pvc->frad); 370 if (state(hdlc)->settings.lmi == LMI_NONE) 371 pvc->state.active = 0; 372 373 if (state(hdlc)->settings.dce) { 374 state(hdlc)->dce_changed = 1; 375 pvc->state.active = 0; 376 } 377 } 378 return 0; 379 } 380 381 382 383 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 384 { 385 struct pvc_device *pvc = dev->ml_priv; 386 fr_proto_pvc_info info; 387 388 if (ifr->ifr_settings.type == IF_GET_PROTO) { 389 if (dev->type == ARPHRD_ETHER) 390 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC; 391 else 392 ifr->ifr_settings.type = IF_PROTO_FR_PVC; 393 394 if (ifr->ifr_settings.size < sizeof(info)) { 395 /* data size wanted */ 396 ifr->ifr_settings.size = sizeof(info); 397 return -ENOBUFS; 398 } 399 400 info.dlci = pvc->dlci; 401 memcpy(info.master, pvc->frad->name, IFNAMSIZ); 402 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info, 403 &info, sizeof(info))) 404 return -EFAULT; 405 return 0; 406 } 407 408 return -EINVAL; 409 } 410 411 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) 412 { 413 struct pvc_device *pvc = dev->ml_priv; 414 415 if (pvc->state.active) { 416 if (dev->type == ARPHRD_ETHER) { 417 int pad = ETH_ZLEN - skb->len; 418 if (pad > 0) { /* Pad the frame with zeros */ 419 int len = skb->len; 420 if (skb_tailroom(skb) < pad) 421 if (pskb_expand_head(skb, 0, pad, 422 GFP_ATOMIC)) { 423 dev->stats.tx_dropped++; 424 dev_kfree_skb(skb); 425 return NETDEV_TX_OK; 426 } 427 skb_put(skb, pad); 428 memset(skb->data + len, 0, pad); 429 } 430 skb->protocol = cpu_to_be16(ETH_P_802_3); 431 } 432 if (!fr_hard_header(&skb, pvc->dlci)) { 433 dev->stats.tx_bytes += skb->len; 434 dev->stats.tx_packets++; 435 if (pvc->state.fecn) /* TX Congestion counter */ 436 dev->stats.tx_compressed++; 437 skb->dev = pvc->frad; 438 dev_queue_xmit(skb); 439 return NETDEV_TX_OK; 440 } 441 } 442 443 dev->stats.tx_dropped++; 444 dev_kfree_skb(skb); 445 return NETDEV_TX_OK; 446 } 447 448 static inline void fr_log_dlci_active(struct pvc_device *pvc) 449 { 450 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n", 451 pvc->dlci, 452 pvc->main ? pvc->main->name : "", 453 pvc->main && pvc->ether ? " " : "", 454 pvc->ether ? pvc->ether->name : "", 455 pvc->state.new ? " new" : "", 456 !pvc->state.exist ? "deleted" : 457 pvc->state.active ? "active" : "inactive"); 458 } 459 460 461 462 static inline u8 fr_lmi_nextseq(u8 x) 463 { 464 x++; 465 return x ? x : 1; 466 } 467 468 469 static void fr_lmi_send(struct net_device *dev, int fullrep) 470 { 471 hdlc_device *hdlc = dev_to_hdlc(dev); 472 struct sk_buff *skb; 473 struct pvc_device *pvc = state(hdlc)->first_pvc; 474 int lmi = state(hdlc)->settings.lmi; 475 int dce = state(hdlc)->settings.dce; 476 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH; 477 int stat_len = (lmi == LMI_CISCO) ? 6 : 3; 478 u8 *data; 479 int i = 0; 480 481 if (dce && fullrep) { 482 len += state(hdlc)->dce_pvc_count * (2 + stat_len); 483 if (len > HDLC_MAX_MRU) { 484 netdev_warn(dev, "Too many PVCs while sending LMI full report\n"); 485 return; 486 } 487 } 488 489 skb = dev_alloc_skb(len); 490 if (!skb) { 491 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n"); 492 return; 493 } 494 memset(skb->data, 0, len); 495 skb_reserve(skb, 4); 496 if (lmi == LMI_CISCO) { 497 skb->protocol = cpu_to_be16(NLPID_CISCO_LMI); 498 fr_hard_header(&skb, LMI_CISCO_DLCI); 499 } else { 500 skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI); 501 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); 502 } 503 data = skb_tail_pointer(skb); 504 data[i++] = LMI_CALLREF; 505 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; 506 if (lmi == LMI_ANSI) 507 data[i++] = LMI_ANSI_LOCKSHIFT; 508 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 509 LMI_ANSI_CISCO_REPTYPE; 510 data[i++] = LMI_REPT_LEN; 511 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY; 512 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE; 513 data[i++] = LMI_INTEG_LEN; 514 data[i++] = state(hdlc)->txseq = 515 fr_lmi_nextseq(state(hdlc)->txseq); 516 data[i++] = state(hdlc)->rxseq; 517 518 if (dce && fullrep) { 519 while (pvc) { 520 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 521 LMI_ANSI_CISCO_PVCSTAT; 522 data[i++] = stat_len; 523 524 /* LMI start/restart */ 525 if (state(hdlc)->reliable && !pvc->state.exist) { 526 pvc->state.exist = pvc->state.new = 1; 527 fr_log_dlci_active(pvc); 528 } 529 530 /* ifconfig PVC up */ 531 if (pvc->open_count && !pvc->state.active && 532 pvc->state.exist && !pvc->state.new) { 533 pvc_carrier(1, pvc); 534 pvc->state.active = 1; 535 fr_log_dlci_active(pvc); 536 } 537 538 if (lmi == LMI_CISCO) { 539 data[i] = pvc->dlci >> 8; 540 data[i + 1] = pvc->dlci & 0xFF; 541 } else { 542 data[i] = (pvc->dlci >> 4) & 0x3F; 543 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80; 544 data[i + 2] = 0x80; 545 } 546 547 if (pvc->state.new) 548 data[i + 2] |= 0x08; 549 else if (pvc->state.active) 550 data[i + 2] |= 0x02; 551 552 i += stat_len; 553 pvc = pvc->next; 554 } 555 } 556 557 skb_put(skb, i); 558 skb->priority = TC_PRIO_CONTROL; 559 skb->dev = dev; 560 skb_reset_network_header(skb); 561 562 dev_queue_xmit(skb); 563 } 564 565 566 567 static void fr_set_link_state(int reliable, struct net_device *dev) 568 { 569 hdlc_device *hdlc = dev_to_hdlc(dev); 570 struct pvc_device *pvc = state(hdlc)->first_pvc; 571 572 state(hdlc)->reliable = reliable; 573 if (reliable) { 574 netif_dormant_off(dev); 575 state(hdlc)->n391cnt = 0; /* Request full status */ 576 state(hdlc)->dce_changed = 1; 577 578 if (state(hdlc)->settings.lmi == LMI_NONE) { 579 while (pvc) { /* Activate all PVCs */ 580 pvc_carrier(1, pvc); 581 pvc->state.exist = pvc->state.active = 1; 582 pvc->state.new = 0; 583 pvc = pvc->next; 584 } 585 } 586 } else { 587 netif_dormant_on(dev); 588 while (pvc) { /* Deactivate all PVCs */ 589 pvc_carrier(0, pvc); 590 pvc->state.exist = pvc->state.active = 0; 591 pvc->state.new = 0; 592 if (!state(hdlc)->settings.dce) 593 pvc->state.bandwidth = 0; 594 pvc = pvc->next; 595 } 596 } 597 } 598 599 600 static void fr_timer(unsigned long arg) 601 { 602 struct net_device *dev = (struct net_device *)arg; 603 hdlc_device *hdlc = dev_to_hdlc(dev); 604 int i, cnt = 0, reliable; 605 u32 list; 606 607 if (state(hdlc)->settings.dce) { 608 reliable = state(hdlc)->request && 609 time_before(jiffies, state(hdlc)->last_poll + 610 state(hdlc)->settings.t392 * HZ); 611 state(hdlc)->request = 0; 612 } else { 613 state(hdlc)->last_errors <<= 1; /* Shift the list */ 614 if (state(hdlc)->request) { 615 if (state(hdlc)->reliable) 616 netdev_info(dev, "No LMI status reply received\n"); 617 state(hdlc)->last_errors |= 1; 618 } 619 620 list = state(hdlc)->last_errors; 621 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1) 622 cnt += (list & 1); /* errors count */ 623 624 reliable = (cnt < state(hdlc)->settings.n392); 625 } 626 627 if (state(hdlc)->reliable != reliable) { 628 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un"); 629 fr_set_link_state(reliable, dev); 630 } 631 632 if (state(hdlc)->settings.dce) 633 state(hdlc)->timer.expires = jiffies + 634 state(hdlc)->settings.t392 * HZ; 635 else { 636 if (state(hdlc)->n391cnt) 637 state(hdlc)->n391cnt--; 638 639 fr_lmi_send(dev, state(hdlc)->n391cnt == 0); 640 641 state(hdlc)->last_poll = jiffies; 642 state(hdlc)->request = 1; 643 state(hdlc)->timer.expires = jiffies + 644 state(hdlc)->settings.t391 * HZ; 645 } 646 647 state(hdlc)->timer.function = fr_timer; 648 state(hdlc)->timer.data = arg; 649 add_timer(&state(hdlc)->timer); 650 } 651 652 653 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb) 654 { 655 hdlc_device *hdlc = dev_to_hdlc(dev); 656 struct pvc_device *pvc; 657 u8 rxseq, txseq; 658 int lmi = state(hdlc)->settings.lmi; 659 int dce = state(hdlc)->settings.dce; 660 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i; 661 662 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH : 663 LMI_CCITT_CISCO_LENGTH)) { 664 netdev_info(dev, "Short LMI frame\n"); 665 return 1; 666 } 667 668 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI : 669 NLPID_CCITT_ANSI_LMI)) { 670 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n"); 671 return 1; 672 } 673 674 if (skb->data[4] != LMI_CALLREF) { 675 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n", 676 skb->data[4]); 677 return 1; 678 } 679 680 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) { 681 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n", 682 skb->data[5]); 683 return 1; 684 } 685 686 if (lmi == LMI_ANSI) { 687 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) { 688 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n", 689 skb->data[6]); 690 return 1; 691 } 692 i = 7; 693 } else 694 i = 6; 695 696 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE : 697 LMI_ANSI_CISCO_REPTYPE)) { 698 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n", 699 skb->data[i]); 700 return 1; 701 } 702 703 if (skb->data[++i] != LMI_REPT_LEN) { 704 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n", 705 skb->data[i]); 706 return 1; 707 } 708 709 reptype = skb->data[++i]; 710 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) { 711 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n", 712 reptype); 713 return 1; 714 } 715 716 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE : 717 LMI_ANSI_CISCO_ALIVE)) { 718 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n", 719 skb->data[i]); 720 return 1; 721 } 722 723 if (skb->data[++i] != LMI_INTEG_LEN) { 724 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n", 725 skb->data[i]); 726 return 1; 727 } 728 i++; 729 730 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */ 731 rxseq = skb->data[i++]; /* Should confirm our sequence */ 732 733 txseq = state(hdlc)->txseq; 734 735 if (dce) 736 state(hdlc)->last_poll = jiffies; 737 738 error = 0; 739 if (!state(hdlc)->reliable) 740 error = 1; 741 742 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */ 743 state(hdlc)->n391cnt = 0; 744 error = 1; 745 } 746 747 if (dce) { 748 if (state(hdlc)->fullrep_sent && !error) { 749 /* Stop sending full report - the last one has been confirmed by DTE */ 750 state(hdlc)->fullrep_sent = 0; 751 pvc = state(hdlc)->first_pvc; 752 while (pvc) { 753 if (pvc->state.new) { 754 pvc->state.new = 0; 755 756 /* Tell DTE that new PVC is now active */ 757 state(hdlc)->dce_changed = 1; 758 } 759 pvc = pvc->next; 760 } 761 } 762 763 if (state(hdlc)->dce_changed) { 764 reptype = LMI_FULLREP; 765 state(hdlc)->fullrep_sent = 1; 766 state(hdlc)->dce_changed = 0; 767 } 768 769 state(hdlc)->request = 1; /* got request */ 770 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0); 771 return 0; 772 } 773 774 /* DTE */ 775 776 state(hdlc)->request = 0; /* got response, no request pending */ 777 778 if (error) 779 return 0; 780 781 if (reptype != LMI_FULLREP) 782 return 0; 783 784 pvc = state(hdlc)->first_pvc; 785 786 while (pvc) { 787 pvc->state.deleted = 1; 788 pvc = pvc->next; 789 } 790 791 no_ram = 0; 792 while (skb->len >= i + 2 + stat_len) { 793 u16 dlci; 794 u32 bw; 795 unsigned int active, new; 796 797 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT : 798 LMI_ANSI_CISCO_PVCSTAT)) { 799 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n", 800 skb->data[i]); 801 return 1; 802 } 803 804 if (skb->data[++i] != stat_len) { 805 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n", 806 skb->data[i]); 807 return 1; 808 } 809 i++; 810 811 new = !! (skb->data[i + 2] & 0x08); 812 active = !! (skb->data[i + 2] & 0x02); 813 if (lmi == LMI_CISCO) { 814 dlci = (skb->data[i] << 8) | skb->data[i + 1]; 815 bw = (skb->data[i + 3] << 16) | 816 (skb->data[i + 4] << 8) | 817 (skb->data[i + 5]); 818 } else { 819 dlci = ((skb->data[i] & 0x3F) << 4) | 820 ((skb->data[i + 1] & 0x78) >> 3); 821 bw = 0; 822 } 823 824 pvc = add_pvc(dev, dlci); 825 826 if (!pvc && !no_ram) { 827 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n"); 828 no_ram = 1; 829 } 830 831 if (pvc) { 832 pvc->state.exist = 1; 833 pvc->state.deleted = 0; 834 if (active != pvc->state.active || 835 new != pvc->state.new || 836 bw != pvc->state.bandwidth || 837 !pvc->state.exist) { 838 pvc->state.new = new; 839 pvc->state.active = active; 840 pvc->state.bandwidth = bw; 841 pvc_carrier(active, pvc); 842 fr_log_dlci_active(pvc); 843 } 844 } 845 846 i += stat_len; 847 } 848 849 pvc = state(hdlc)->first_pvc; 850 851 while (pvc) { 852 if (pvc->state.deleted && pvc->state.exist) { 853 pvc_carrier(0, pvc); 854 pvc->state.active = pvc->state.new = 0; 855 pvc->state.exist = 0; 856 pvc->state.bandwidth = 0; 857 fr_log_dlci_active(pvc); 858 } 859 pvc = pvc->next; 860 } 861 862 /* Next full report after N391 polls */ 863 state(hdlc)->n391cnt = state(hdlc)->settings.n391; 864 865 return 0; 866 } 867 868 869 static int fr_rx(struct sk_buff *skb) 870 { 871 struct net_device *frad = skb->dev; 872 hdlc_device *hdlc = dev_to_hdlc(frad); 873 struct fr_hdr *fh = (struct fr_hdr *)skb->data; 874 u8 *data = skb->data; 875 u16 dlci; 876 struct pvc_device *pvc; 877 struct net_device *dev = NULL; 878 879 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI) 880 goto rx_error; 881 882 dlci = q922_to_dlci(skb->data); 883 884 if ((dlci == LMI_CCITT_ANSI_DLCI && 885 (state(hdlc)->settings.lmi == LMI_ANSI || 886 state(hdlc)->settings.lmi == LMI_CCITT)) || 887 (dlci == LMI_CISCO_DLCI && 888 state(hdlc)->settings.lmi == LMI_CISCO)) { 889 if (fr_lmi_recv(frad, skb)) 890 goto rx_error; 891 dev_kfree_skb_any(skb); 892 return NET_RX_SUCCESS; 893 } 894 895 pvc = find_pvc(hdlc, dlci); 896 if (!pvc) { 897 #ifdef DEBUG_PKT 898 netdev_info(frad, "No PVC for received frame's DLCI %d\n", 899 dlci); 900 #endif 901 dev_kfree_skb_any(skb); 902 return NET_RX_DROP; 903 } 904 905 if (pvc->state.fecn != fh->fecn) { 906 #ifdef DEBUG_ECN 907 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name, 908 dlci, fh->fecn ? "N" : "FF"); 909 #endif 910 pvc->state.fecn ^= 1; 911 } 912 913 if (pvc->state.becn != fh->becn) { 914 #ifdef DEBUG_ECN 915 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name, 916 dlci, fh->becn ? "N" : "FF"); 917 #endif 918 pvc->state.becn ^= 1; 919 } 920 921 922 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 923 frad->stats.rx_dropped++; 924 return NET_RX_DROP; 925 } 926 927 if (data[3] == NLPID_IP) { 928 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 929 dev = pvc->main; 930 skb->protocol = htons(ETH_P_IP); 931 932 } else if (data[3] == NLPID_IPV6) { 933 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */ 934 dev = pvc->main; 935 skb->protocol = htons(ETH_P_IPV6); 936 937 } else if (skb->len > 10 && data[3] == FR_PAD && 938 data[4] == NLPID_SNAP && data[5] == FR_PAD) { 939 u16 oui = ntohs(*(__be16*)(data + 6)); 940 u16 pid = ntohs(*(__be16*)(data + 8)); 941 skb_pull(skb, 10); 942 943 switch ((((u32)oui) << 16) | pid) { 944 case ETH_P_ARP: /* routed frame with SNAP */ 945 case ETH_P_IPX: 946 case ETH_P_IP: /* a long variant */ 947 case ETH_P_IPV6: 948 dev = pvc->main; 949 skb->protocol = htons(pid); 950 break; 951 952 case 0x80C20007: /* bridged Ethernet frame */ 953 if ((dev = pvc->ether) != NULL) 954 skb->protocol = eth_type_trans(skb, dev); 955 break; 956 957 default: 958 netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n", 959 oui, pid); 960 dev_kfree_skb_any(skb); 961 return NET_RX_DROP; 962 } 963 } else { 964 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n", 965 data[3], skb->len); 966 dev_kfree_skb_any(skb); 967 return NET_RX_DROP; 968 } 969 970 if (dev) { 971 dev->stats.rx_packets++; /* PVC traffic */ 972 dev->stats.rx_bytes += skb->len; 973 if (pvc->state.becn) 974 dev->stats.rx_compressed++; 975 skb->dev = dev; 976 netif_rx(skb); 977 return NET_RX_SUCCESS; 978 } else { 979 dev_kfree_skb_any(skb); 980 return NET_RX_DROP; 981 } 982 983 rx_error: 984 frad->stats.rx_errors++; /* Mark error */ 985 dev_kfree_skb_any(skb); 986 return NET_RX_DROP; 987 } 988 989 990 991 static void fr_start(struct net_device *dev) 992 { 993 hdlc_device *hdlc = dev_to_hdlc(dev); 994 #ifdef DEBUG_LINK 995 printk(KERN_DEBUG "fr_start\n"); 996 #endif 997 if (state(hdlc)->settings.lmi != LMI_NONE) { 998 state(hdlc)->reliable = 0; 999 state(hdlc)->dce_changed = 1; 1000 state(hdlc)->request = 0; 1001 state(hdlc)->fullrep_sent = 0; 1002 state(hdlc)->last_errors = 0xFFFFFFFF; 1003 state(hdlc)->n391cnt = 0; 1004 state(hdlc)->txseq = state(hdlc)->rxseq = 0; 1005 1006 init_timer(&state(hdlc)->timer); 1007 /* First poll after 1 s */ 1008 state(hdlc)->timer.expires = jiffies + HZ; 1009 state(hdlc)->timer.function = fr_timer; 1010 state(hdlc)->timer.data = (unsigned long)dev; 1011 add_timer(&state(hdlc)->timer); 1012 } else 1013 fr_set_link_state(1, dev); 1014 } 1015 1016 1017 static void fr_stop(struct net_device *dev) 1018 { 1019 hdlc_device *hdlc = dev_to_hdlc(dev); 1020 #ifdef DEBUG_LINK 1021 printk(KERN_DEBUG "fr_stop\n"); 1022 #endif 1023 if (state(hdlc)->settings.lmi != LMI_NONE) 1024 del_timer_sync(&state(hdlc)->timer); 1025 fr_set_link_state(0, dev); 1026 } 1027 1028 1029 static void fr_close(struct net_device *dev) 1030 { 1031 hdlc_device *hdlc = dev_to_hdlc(dev); 1032 struct pvc_device *pvc = state(hdlc)->first_pvc; 1033 1034 while (pvc) { /* Shutdown all PVCs for this FRAD */ 1035 if (pvc->main) 1036 dev_close(pvc->main); 1037 if (pvc->ether) 1038 dev_close(pvc->ether); 1039 pvc = pvc->next; 1040 } 1041 } 1042 1043 1044 static void pvc_setup(struct net_device *dev) 1045 { 1046 dev->type = ARPHRD_DLCI; 1047 dev->flags = IFF_POINTOPOINT; 1048 dev->hard_header_len = 10; 1049 dev->addr_len = 2; 1050 netif_keep_dst(dev); 1051 } 1052 1053 static const struct net_device_ops pvc_ops = { 1054 .ndo_open = pvc_open, 1055 .ndo_stop = pvc_close, 1056 .ndo_change_mtu = hdlc_change_mtu, 1057 .ndo_start_xmit = pvc_xmit, 1058 .ndo_do_ioctl = pvc_ioctl, 1059 }; 1060 1061 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) 1062 { 1063 hdlc_device *hdlc = dev_to_hdlc(frad); 1064 struct pvc_device *pvc; 1065 struct net_device *dev; 1066 int used; 1067 1068 if ((pvc = add_pvc(frad, dlci)) == NULL) { 1069 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n"); 1070 return -ENOBUFS; 1071 } 1072 1073 if (*get_dev_p(pvc, type)) 1074 return -EEXIST; 1075 1076 used = pvc_is_used(pvc); 1077 1078 if (type == ARPHRD_ETHER) 1079 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, 1080 ether_setup); 1081 else 1082 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); 1083 1084 if (!dev) { 1085 netdev_warn(frad, "Memory squeeze on fr_pvc()\n"); 1086 delete_unused_pvcs(hdlc); 1087 return -ENOBUFS; 1088 } 1089 1090 if (type == ARPHRD_ETHER) { 1091 dev->priv_flags &= ~IFF_TX_SKB_SHARING; 1092 eth_hw_addr_random(dev); 1093 } else { 1094 *(__be16*)dev->dev_addr = htons(dlci); 1095 dlci_to_q922(dev->broadcast, dlci); 1096 } 1097 dev->netdev_ops = &pvc_ops; 1098 dev->mtu = HDLC_MAX_MTU; 1099 dev->priv_flags |= IFF_NO_QUEUE; 1100 dev->ml_priv = pvc; 1101 1102 if (register_netdevice(dev) != 0) { 1103 free_netdev(dev); 1104 delete_unused_pvcs(hdlc); 1105 return -EIO; 1106 } 1107 1108 dev->destructor = free_netdev; 1109 *get_dev_p(pvc, type) = dev; 1110 if (!used) { 1111 state(hdlc)->dce_changed = 1; 1112 state(hdlc)->dce_pvc_count++; 1113 } 1114 return 0; 1115 } 1116 1117 1118 1119 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type) 1120 { 1121 struct pvc_device *pvc; 1122 struct net_device *dev; 1123 1124 if ((pvc = find_pvc(hdlc, dlci)) == NULL) 1125 return -ENOENT; 1126 1127 if ((dev = *get_dev_p(pvc, type)) == NULL) 1128 return -ENOENT; 1129 1130 if (dev->flags & IFF_UP) 1131 return -EBUSY; /* PVC in use */ 1132 1133 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */ 1134 *get_dev_p(pvc, type) = NULL; 1135 1136 if (!pvc_is_used(pvc)) { 1137 state(hdlc)->dce_pvc_count--; 1138 state(hdlc)->dce_changed = 1; 1139 } 1140 delete_unused_pvcs(hdlc); 1141 return 0; 1142 } 1143 1144 1145 1146 static void fr_destroy(struct net_device *frad) 1147 { 1148 hdlc_device *hdlc = dev_to_hdlc(frad); 1149 struct pvc_device *pvc = state(hdlc)->first_pvc; 1150 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */ 1151 state(hdlc)->dce_pvc_count = 0; 1152 state(hdlc)->dce_changed = 1; 1153 1154 while (pvc) { 1155 struct pvc_device *next = pvc->next; 1156 /* destructors will free_netdev() main and ether */ 1157 if (pvc->main) 1158 unregister_netdevice(pvc->main); 1159 1160 if (pvc->ether) 1161 unregister_netdevice(pvc->ether); 1162 1163 kfree(pvc); 1164 pvc = next; 1165 } 1166 } 1167 1168 1169 static struct hdlc_proto proto = { 1170 .close = fr_close, 1171 .start = fr_start, 1172 .stop = fr_stop, 1173 .detach = fr_destroy, 1174 .ioctl = fr_ioctl, 1175 .netif_rx = fr_rx, 1176 .module = THIS_MODULE, 1177 }; 1178 1179 1180 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) 1181 { 1182 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr; 1183 const size_t size = sizeof(fr_proto); 1184 fr_proto new_settings; 1185 hdlc_device *hdlc = dev_to_hdlc(dev); 1186 fr_proto_pvc pvc; 1187 int result; 1188 1189 switch (ifr->ifr_settings.type) { 1190 case IF_GET_PROTO: 1191 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ 1192 return -EINVAL; 1193 ifr->ifr_settings.type = IF_PROTO_FR; 1194 if (ifr->ifr_settings.size < size) { 1195 ifr->ifr_settings.size = size; /* data size wanted */ 1196 return -ENOBUFS; 1197 } 1198 if (copy_to_user(fr_s, &state(hdlc)->settings, size)) 1199 return -EFAULT; 1200 return 0; 1201 1202 case IF_PROTO_FR: 1203 if (!capable(CAP_NET_ADMIN)) 1204 return -EPERM; 1205 1206 if (dev->flags & IFF_UP) 1207 return -EBUSY; 1208 1209 if (copy_from_user(&new_settings, fr_s, size)) 1210 return -EFAULT; 1211 1212 if (new_settings.lmi == LMI_DEFAULT) 1213 new_settings.lmi = LMI_ANSI; 1214 1215 if ((new_settings.lmi != LMI_NONE && 1216 new_settings.lmi != LMI_ANSI && 1217 new_settings.lmi != LMI_CCITT && 1218 new_settings.lmi != LMI_CISCO) || 1219 new_settings.t391 < 1 || 1220 new_settings.t392 < 2 || 1221 new_settings.n391 < 1 || 1222 new_settings.n392 < 1 || 1223 new_settings.n393 < new_settings.n392 || 1224 new_settings.n393 > 32 || 1225 (new_settings.dce != 0 && 1226 new_settings.dce != 1)) 1227 return -EINVAL; 1228 1229 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); 1230 if (result) 1231 return result; 1232 1233 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */ 1234 result = attach_hdlc_protocol(dev, &proto, 1235 sizeof(struct frad_state)); 1236 if (result) 1237 return result; 1238 state(hdlc)->first_pvc = NULL; 1239 state(hdlc)->dce_pvc_count = 0; 1240 } 1241 memcpy(&state(hdlc)->settings, &new_settings, size); 1242 dev->type = ARPHRD_FRAD; 1243 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); 1244 return 0; 1245 1246 case IF_PROTO_FR_ADD_PVC: 1247 case IF_PROTO_FR_DEL_PVC: 1248 case IF_PROTO_FR_ADD_ETH_PVC: 1249 case IF_PROTO_FR_DEL_ETH_PVC: 1250 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */ 1251 return -EINVAL; 1252 1253 if (!capable(CAP_NET_ADMIN)) 1254 return -EPERM; 1255 1256 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc, 1257 sizeof(fr_proto_pvc))) 1258 return -EFAULT; 1259 1260 if (pvc.dlci <= 0 || pvc.dlci >= 1024) 1261 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */ 1262 1263 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC || 1264 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC) 1265 result = ARPHRD_ETHER; /* bridged Ethernet device */ 1266 else 1267 result = ARPHRD_DLCI; 1268 1269 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC || 1270 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC) 1271 return fr_add_pvc(dev, pvc.dlci, result); 1272 else 1273 return fr_del_pvc(hdlc, pvc.dlci, result); 1274 } 1275 1276 return -EINVAL; 1277 } 1278 1279 1280 static int __init mod_init(void) 1281 { 1282 register_hdlc_protocol(&proto); 1283 return 0; 1284 } 1285 1286 1287 static void __exit mod_exit(void) 1288 { 1289 unregister_hdlc_protocol(&proto); 1290 } 1291 1292 1293 module_init(mod_init); 1294 module_exit(mod_exit); 1295 1296 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); 1297 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); 1298 MODULE_LICENSE("GPL v2"); 1299