1 /* 2 * INET 802.1Q VLAN 3 * Ethernet-type device handling. 4 * 5 * Authors: Ben Greear <greearb@candelatech.com> 6 * Please send support related email to: netdev@vger.kernel.org 7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html 8 * 9 * Fixes: 10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>; 11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>; 12 * Correct all the locking - David S. Miller <davem@redhat.com>; 13 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * as published by the Free Software Foundation; either version 18 * 2 of the License, or (at your option) any later version. 19 */ 20 21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22 23 #include <linux/capability.h> 24 #include <linux/module.h> 25 #include <linux/netdevice.h> 26 #include <linux/skbuff.h> 27 #include <linux/slab.h> 28 #include <linux/init.h> 29 #include <linux/rculist.h> 30 #include <net/p8022.h> 31 #include <net/arp.h> 32 #include <linux/rtnetlink.h> 33 #include <linux/notifier.h> 34 #include <net/rtnetlink.h> 35 #include <net/net_namespace.h> 36 #include <net/netns/generic.h> 37 #include <linux/uaccess.h> 38 39 #include <linux/if_vlan.h> 40 #include "vlan.h" 41 #include "vlanproc.h" 42 43 #define DRV_VERSION "1.8" 44 45 /* Global VLAN variables */ 46 47 unsigned int vlan_net_id __read_mostly; 48 49 const char vlan_fullname[] = "802.1Q VLAN Support"; 50 const char vlan_version[] = DRV_VERSION; 51 52 /* End of global variables definitions. */ 53 54 static int vlan_group_prealloc_vid(struct vlan_group *vg, 55 __be16 vlan_proto, u16 vlan_id) 56 { 57 struct net_device **array; 58 unsigned int pidx, vidx; 59 unsigned int size; 60 61 ASSERT_RTNL(); 62 63 pidx = vlan_proto_idx(vlan_proto); 64 vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN; 65 array = vg->vlan_devices_arrays[pidx][vidx]; 66 if (array != NULL) 67 return 0; 68 69 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; 70 array = kzalloc(size, GFP_KERNEL); 71 if (array == NULL) 72 return -ENOBUFS; 73 74 vg->vlan_devices_arrays[pidx][vidx] = array; 75 return 0; 76 } 77 78 void unregister_vlan_dev(struct net_device *dev, struct list_head *head) 79 { 80 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 81 struct net_device *real_dev = vlan->real_dev; 82 struct vlan_info *vlan_info; 83 struct vlan_group *grp; 84 u16 vlan_id = vlan->vlan_id; 85 86 ASSERT_RTNL(); 87 88 vlan_info = rtnl_dereference(real_dev->vlan_info); 89 BUG_ON(!vlan_info); 90 91 grp = &vlan_info->grp; 92 93 grp->nr_vlan_devs--; 94 95 if (vlan->flags & VLAN_FLAG_MVRP) 96 vlan_mvrp_request_leave(dev); 97 if (vlan->flags & VLAN_FLAG_GVRP) 98 vlan_gvrp_request_leave(dev); 99 100 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL); 101 102 netdev_upper_dev_unlink(real_dev, dev); 103 /* Because unregister_netdevice_queue() makes sure at least one rcu 104 * grace period is respected before device freeing, 105 * we dont need to call synchronize_net() here. 106 */ 107 unregister_netdevice_queue(dev, head); 108 109 if (grp->nr_vlan_devs == 0) { 110 vlan_mvrp_uninit_applicant(real_dev); 111 vlan_gvrp_uninit_applicant(real_dev); 112 } 113 114 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); 115 116 /* Get rid of the vlan's reference to real_dev */ 117 dev_put(real_dev); 118 } 119 120 int vlan_check_real_dev(struct net_device *real_dev, 121 __be16 protocol, u16 vlan_id) 122 { 123 const char *name = real_dev->name; 124 125 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { 126 pr_info("VLANs not supported on %s\n", name); 127 return -EOPNOTSUPP; 128 } 129 130 if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) 131 return -EEXIST; 132 133 return 0; 134 } 135 136 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack) 137 { 138 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 139 struct net_device *real_dev = vlan->real_dev; 140 u16 vlan_id = vlan->vlan_id; 141 struct vlan_info *vlan_info; 142 struct vlan_group *grp; 143 int err; 144 145 err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id); 146 if (err) 147 return err; 148 149 vlan_info = rtnl_dereference(real_dev->vlan_info); 150 /* vlan_info should be there now. vlan_vid_add took care of it */ 151 BUG_ON(!vlan_info); 152 153 grp = &vlan_info->grp; 154 if (grp->nr_vlan_devs == 0) { 155 err = vlan_gvrp_init_applicant(real_dev); 156 if (err < 0) 157 goto out_vid_del; 158 err = vlan_mvrp_init_applicant(real_dev); 159 if (err < 0) 160 goto out_uninit_gvrp; 161 } 162 163 err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id); 164 if (err < 0) 165 goto out_uninit_mvrp; 166 167 vlan->nest_level = dev_get_nest_level(real_dev) + 1; 168 err = register_netdevice(dev); 169 if (err < 0) 170 goto out_uninit_mvrp; 171 172 err = netdev_upper_dev_link(real_dev, dev, extack); 173 if (err) 174 goto out_unregister_netdev; 175 176 /* Account for reference in struct vlan_dev_priv */ 177 dev_hold(real_dev); 178 179 netif_stacked_transfer_operstate(real_dev, dev); 180 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ 181 182 /* So, got the sucker initialized, now lets place 183 * it into our local structure. 184 */ 185 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); 186 grp->nr_vlan_devs++; 187 188 return 0; 189 190 out_unregister_netdev: 191 unregister_netdevice(dev); 192 out_uninit_mvrp: 193 if (grp->nr_vlan_devs == 0) 194 vlan_mvrp_uninit_applicant(real_dev); 195 out_uninit_gvrp: 196 if (grp->nr_vlan_devs == 0) 197 vlan_gvrp_uninit_applicant(real_dev); 198 out_vid_del: 199 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); 200 return err; 201 } 202 203 /* Attach a VLAN device to a mac address (ie Ethernet Card). 204 * Returns 0 if the device was created or a negative error code otherwise. 205 */ 206 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) 207 { 208 struct net_device *new_dev; 209 struct vlan_dev_priv *vlan; 210 struct net *net = dev_net(real_dev); 211 struct vlan_net *vn = net_generic(net, vlan_net_id); 212 char name[IFNAMSIZ]; 213 int err; 214 215 if (vlan_id >= VLAN_VID_MASK) 216 return -ERANGE; 217 218 err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id); 219 if (err < 0) 220 return err; 221 222 /* Gotta set up the fields for the device. */ 223 switch (vn->name_type) { 224 case VLAN_NAME_TYPE_RAW_PLUS_VID: 225 /* name will look like: eth1.0005 */ 226 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id); 227 break; 228 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: 229 /* Put our vlan.VID in the name. 230 * Name will look like: vlan5 231 */ 232 snprintf(name, IFNAMSIZ, "vlan%i", vlan_id); 233 break; 234 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: 235 /* Put our vlan.VID in the name. 236 * Name will look like: eth0.5 237 */ 238 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id); 239 break; 240 case VLAN_NAME_TYPE_PLUS_VID: 241 /* Put our vlan.VID in the name. 242 * Name will look like: vlan0005 243 */ 244 default: 245 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); 246 } 247 248 new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, 249 NET_NAME_UNKNOWN, vlan_setup); 250 251 if (new_dev == NULL) 252 return -ENOBUFS; 253 254 dev_net_set(new_dev, net); 255 /* need 4 bytes for extra VLAN header info, 256 * hope the underlying device can handle it. 257 */ 258 new_dev->mtu = real_dev->mtu; 259 260 vlan = vlan_dev_priv(new_dev); 261 vlan->vlan_proto = htons(ETH_P_8021Q); 262 vlan->vlan_id = vlan_id; 263 vlan->real_dev = real_dev; 264 vlan->dent = NULL; 265 vlan->flags = VLAN_FLAG_REORDER_HDR; 266 267 new_dev->rtnl_link_ops = &vlan_link_ops; 268 err = register_vlan_dev(new_dev, NULL); 269 if (err < 0) 270 goto out_free_newdev; 271 272 return 0; 273 274 out_free_newdev: 275 if (new_dev->reg_state == NETREG_UNINITIALIZED) 276 free_netdev(new_dev); 277 return err; 278 } 279 280 static void vlan_sync_address(struct net_device *dev, 281 struct net_device *vlandev) 282 { 283 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); 284 285 /* May be called without an actual change */ 286 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) 287 return; 288 289 /* vlan continues to inherit address of lower device */ 290 if (vlan_dev_inherit_address(vlandev, dev)) 291 goto out; 292 293 /* vlan address was different from the old address and is equal to 294 * the new address */ 295 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 296 ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 297 dev_uc_del(dev, vlandev->dev_addr); 298 299 /* vlan address was equal to the old address and is different from 300 * the new address */ 301 if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 302 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 303 dev_uc_add(dev, vlandev->dev_addr); 304 305 out: 306 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); 307 } 308 309 static void vlan_transfer_features(struct net_device *dev, 310 struct net_device *vlandev) 311 { 312 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); 313 314 vlandev->gso_max_size = dev->gso_max_size; 315 vlandev->gso_max_segs = dev->gso_max_segs; 316 317 if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) 318 vlandev->hard_header_len = dev->hard_header_len; 319 else 320 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; 321 322 #if IS_ENABLED(CONFIG_FCOE) 323 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 324 #endif 325 326 vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 327 vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE); 328 329 netdev_update_features(vlandev); 330 } 331 332 static int __vlan_device_event(struct net_device *dev, unsigned long event) 333 { 334 int err = 0; 335 336 switch (event) { 337 case NETDEV_CHANGENAME: 338 vlan_proc_rem_dev(dev); 339 err = vlan_proc_add_dev(dev); 340 break; 341 case NETDEV_REGISTER: 342 err = vlan_proc_add_dev(dev); 343 break; 344 case NETDEV_UNREGISTER: 345 vlan_proc_rem_dev(dev); 346 break; 347 } 348 349 return err; 350 } 351 352 static int vlan_device_event(struct notifier_block *unused, unsigned long event, 353 void *ptr) 354 { 355 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 356 struct vlan_group *grp; 357 struct vlan_info *vlan_info; 358 int i, flgs; 359 struct net_device *vlandev; 360 struct vlan_dev_priv *vlan; 361 bool last = false; 362 LIST_HEAD(list); 363 364 if (is_vlan_dev(dev)) { 365 int err = __vlan_device_event(dev, event); 366 367 if (err) 368 return notifier_from_errno(err); 369 } 370 371 if ((event == NETDEV_UP) && 372 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { 373 pr_info("adding VLAN 0 to HW filter on device %s\n", 374 dev->name); 375 vlan_vid_add(dev, htons(ETH_P_8021Q), 0); 376 } 377 if (event == NETDEV_DOWN && 378 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 379 vlan_vid_del(dev, htons(ETH_P_8021Q), 0); 380 381 vlan_info = rtnl_dereference(dev->vlan_info); 382 if (!vlan_info) 383 goto out; 384 grp = &vlan_info->grp; 385 386 /* It is OK that we do not hold the group lock right now, 387 * as we run under the RTNL lock. 388 */ 389 390 switch (event) { 391 case NETDEV_CHANGE: 392 /* Propagate real device state to vlan devices */ 393 vlan_group_for_each_dev(grp, i, vlandev) 394 netif_stacked_transfer_operstate(dev, vlandev); 395 break; 396 397 case NETDEV_CHANGEADDR: 398 /* Adjust unicast filters on underlying device */ 399 vlan_group_for_each_dev(grp, i, vlandev) { 400 flgs = vlandev->flags; 401 if (!(flgs & IFF_UP)) 402 continue; 403 404 vlan_sync_address(dev, vlandev); 405 } 406 break; 407 408 case NETDEV_CHANGEMTU: 409 vlan_group_for_each_dev(grp, i, vlandev) { 410 if (vlandev->mtu <= dev->mtu) 411 continue; 412 413 dev_set_mtu(vlandev, dev->mtu); 414 } 415 break; 416 417 case NETDEV_FEAT_CHANGE: 418 /* Propagate device features to underlying device */ 419 vlan_group_for_each_dev(grp, i, vlandev) 420 vlan_transfer_features(dev, vlandev); 421 break; 422 423 case NETDEV_DOWN: { 424 struct net_device *tmp; 425 LIST_HEAD(close_list); 426 427 /* Put all VLANs for this dev in the down state too. */ 428 vlan_group_for_each_dev(grp, i, vlandev) { 429 flgs = vlandev->flags; 430 if (!(flgs & IFF_UP)) 431 continue; 432 433 vlan = vlan_dev_priv(vlandev); 434 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 435 list_add(&vlandev->close_list, &close_list); 436 } 437 438 dev_close_many(&close_list, false); 439 440 list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) { 441 netif_stacked_transfer_operstate(dev, vlandev); 442 list_del_init(&vlandev->close_list); 443 } 444 list_del(&close_list); 445 break; 446 } 447 case NETDEV_UP: 448 /* Put all VLANs for this dev in the up state too. */ 449 vlan_group_for_each_dev(grp, i, vlandev) { 450 flgs = dev_get_flags(vlandev); 451 if (flgs & IFF_UP) 452 continue; 453 454 vlan = vlan_dev_priv(vlandev); 455 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 456 dev_change_flags(vlandev, flgs | IFF_UP); 457 netif_stacked_transfer_operstate(dev, vlandev); 458 } 459 break; 460 461 case NETDEV_UNREGISTER: 462 /* twiddle thumbs on netns device moves */ 463 if (dev->reg_state != NETREG_UNREGISTERING) 464 break; 465 466 vlan_group_for_each_dev(grp, i, vlandev) { 467 /* removal of last vid destroys vlan_info, abort 468 * afterwards */ 469 if (vlan_info->nr_vids == 1) 470 last = true; 471 472 unregister_vlan_dev(vlandev, &list); 473 if (last) 474 break; 475 } 476 unregister_netdevice_many(&list); 477 break; 478 479 case NETDEV_PRE_TYPE_CHANGE: 480 /* Forbid underlaying device to change its type. */ 481 if (vlan_uses_dev(dev)) 482 return NOTIFY_BAD; 483 break; 484 485 case NETDEV_NOTIFY_PEERS: 486 case NETDEV_BONDING_FAILOVER: 487 case NETDEV_RESEND_IGMP: 488 /* Propagate to vlan devices */ 489 vlan_group_for_each_dev(grp, i, vlandev) 490 call_netdevice_notifiers(event, vlandev); 491 break; 492 } 493 494 out: 495 return NOTIFY_DONE; 496 } 497 498 static struct notifier_block vlan_notifier_block __read_mostly = { 499 .notifier_call = vlan_device_event, 500 }; 501 502 /* 503 * VLAN IOCTL handler. 504 * o execute requested action or pass command to the device driver 505 * arg is really a struct vlan_ioctl_args __user *. 506 */ 507 static int vlan_ioctl_handler(struct net *net, void __user *arg) 508 { 509 int err; 510 struct vlan_ioctl_args args; 511 struct net_device *dev = NULL; 512 513 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args))) 514 return -EFAULT; 515 516 /* Null terminate this sucker, just in case. */ 517 args.device1[sizeof(args.device1) - 1] = 0; 518 args.u.device2[sizeof(args.u.device2) - 1] = 0; 519 520 rtnl_lock(); 521 522 switch (args.cmd) { 523 case SET_VLAN_INGRESS_PRIORITY_CMD: 524 case SET_VLAN_EGRESS_PRIORITY_CMD: 525 case SET_VLAN_FLAG_CMD: 526 case ADD_VLAN_CMD: 527 case DEL_VLAN_CMD: 528 case GET_VLAN_REALDEV_NAME_CMD: 529 case GET_VLAN_VID_CMD: 530 err = -ENODEV; 531 dev = __dev_get_by_name(net, args.device1); 532 if (!dev) 533 goto out; 534 535 err = -EINVAL; 536 if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev)) 537 goto out; 538 } 539 540 switch (args.cmd) { 541 case SET_VLAN_INGRESS_PRIORITY_CMD: 542 err = -EPERM; 543 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 544 break; 545 vlan_dev_set_ingress_priority(dev, 546 args.u.skb_priority, 547 args.vlan_qos); 548 err = 0; 549 break; 550 551 case SET_VLAN_EGRESS_PRIORITY_CMD: 552 err = -EPERM; 553 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 554 break; 555 err = vlan_dev_set_egress_priority(dev, 556 args.u.skb_priority, 557 args.vlan_qos); 558 break; 559 560 case SET_VLAN_FLAG_CMD: 561 err = -EPERM; 562 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 563 break; 564 err = vlan_dev_change_flags(dev, 565 args.vlan_qos ? args.u.flag : 0, 566 args.u.flag); 567 break; 568 569 case SET_VLAN_NAME_TYPE_CMD: 570 err = -EPERM; 571 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 572 break; 573 if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { 574 struct vlan_net *vn; 575 576 vn = net_generic(net, vlan_net_id); 577 vn->name_type = args.u.name_type; 578 err = 0; 579 } else { 580 err = -EINVAL; 581 } 582 break; 583 584 case ADD_VLAN_CMD: 585 err = -EPERM; 586 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 587 break; 588 err = register_vlan_device(dev, args.u.VID); 589 break; 590 591 case DEL_VLAN_CMD: 592 err = -EPERM; 593 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 594 break; 595 unregister_vlan_dev(dev, NULL); 596 err = 0; 597 break; 598 599 case GET_VLAN_REALDEV_NAME_CMD: 600 err = 0; 601 vlan_dev_get_realdev_name(dev, args.u.device2); 602 if (copy_to_user(arg, &args, 603 sizeof(struct vlan_ioctl_args))) 604 err = -EFAULT; 605 break; 606 607 case GET_VLAN_VID_CMD: 608 err = 0; 609 args.u.VID = vlan_dev_vlan_id(dev); 610 if (copy_to_user(arg, &args, 611 sizeof(struct vlan_ioctl_args))) 612 err = -EFAULT; 613 break; 614 615 default: 616 err = -EOPNOTSUPP; 617 break; 618 } 619 out: 620 rtnl_unlock(); 621 return err; 622 } 623 624 static struct sk_buff **vlan_gro_receive(struct sk_buff **head, 625 struct sk_buff *skb) 626 { 627 struct sk_buff *p, **pp = NULL; 628 struct vlan_hdr *vhdr; 629 unsigned int hlen, off_vlan; 630 const struct packet_offload *ptype; 631 __be16 type; 632 int flush = 1; 633 634 off_vlan = skb_gro_offset(skb); 635 hlen = off_vlan + sizeof(*vhdr); 636 vhdr = skb_gro_header_fast(skb, off_vlan); 637 if (skb_gro_header_hard(skb, hlen)) { 638 vhdr = skb_gro_header_slow(skb, hlen, off_vlan); 639 if (unlikely(!vhdr)) 640 goto out; 641 } 642 643 type = vhdr->h_vlan_encapsulated_proto; 644 645 rcu_read_lock(); 646 ptype = gro_find_receive_by_type(type); 647 if (!ptype) 648 goto out_unlock; 649 650 flush = 0; 651 652 for (p = *head; p; p = p->next) { 653 struct vlan_hdr *vhdr2; 654 655 if (!NAPI_GRO_CB(p)->same_flow) 656 continue; 657 658 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan); 659 if (compare_vlan_header(vhdr, vhdr2)) 660 NAPI_GRO_CB(p)->same_flow = 0; 661 } 662 663 skb_gro_pull(skb, sizeof(*vhdr)); 664 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr)); 665 pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); 666 667 out_unlock: 668 rcu_read_unlock(); 669 out: 670 NAPI_GRO_CB(skb)->flush |= flush; 671 672 return pp; 673 } 674 675 static int vlan_gro_complete(struct sk_buff *skb, int nhoff) 676 { 677 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff); 678 __be16 type = vhdr->h_vlan_encapsulated_proto; 679 struct packet_offload *ptype; 680 int err = -ENOENT; 681 682 rcu_read_lock(); 683 ptype = gro_find_complete_by_type(type); 684 if (ptype) 685 err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr)); 686 687 rcu_read_unlock(); 688 return err; 689 } 690 691 static struct packet_offload vlan_packet_offloads[] __read_mostly = { 692 { 693 .type = cpu_to_be16(ETH_P_8021Q), 694 .priority = 10, 695 .callbacks = { 696 .gro_receive = vlan_gro_receive, 697 .gro_complete = vlan_gro_complete, 698 }, 699 }, 700 { 701 .type = cpu_to_be16(ETH_P_8021AD), 702 .priority = 10, 703 .callbacks = { 704 .gro_receive = vlan_gro_receive, 705 .gro_complete = vlan_gro_complete, 706 }, 707 }, 708 }; 709 710 static int __net_init vlan_init_net(struct net *net) 711 { 712 struct vlan_net *vn = net_generic(net, vlan_net_id); 713 int err; 714 715 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; 716 717 err = vlan_proc_init(net); 718 719 return err; 720 } 721 722 static void __net_exit vlan_exit_net(struct net *net) 723 { 724 vlan_proc_cleanup(net); 725 } 726 727 static struct pernet_operations vlan_net_ops = { 728 .init = vlan_init_net, 729 .exit = vlan_exit_net, 730 .id = &vlan_net_id, 731 .size = sizeof(struct vlan_net), 732 }; 733 734 static int __init vlan_proto_init(void) 735 { 736 int err; 737 unsigned int i; 738 739 pr_info("%s v%s\n", vlan_fullname, vlan_version); 740 741 err = register_pernet_subsys(&vlan_net_ops); 742 if (err < 0) 743 goto err0; 744 745 err = register_netdevice_notifier(&vlan_notifier_block); 746 if (err < 0) 747 goto err2; 748 749 err = vlan_gvrp_init(); 750 if (err < 0) 751 goto err3; 752 753 err = vlan_mvrp_init(); 754 if (err < 0) 755 goto err4; 756 757 err = vlan_netlink_init(); 758 if (err < 0) 759 goto err5; 760 761 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) 762 dev_add_offload(&vlan_packet_offloads[i]); 763 764 vlan_ioctl_set(vlan_ioctl_handler); 765 return 0; 766 767 err5: 768 vlan_mvrp_uninit(); 769 err4: 770 vlan_gvrp_uninit(); 771 err3: 772 unregister_netdevice_notifier(&vlan_notifier_block); 773 err2: 774 unregister_pernet_subsys(&vlan_net_ops); 775 err0: 776 return err; 777 } 778 779 static void __exit vlan_cleanup_module(void) 780 { 781 unsigned int i; 782 783 vlan_ioctl_set(NULL); 784 785 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++) 786 dev_remove_offload(&vlan_packet_offloads[i]); 787 788 vlan_netlink_fini(); 789 790 unregister_netdevice_notifier(&vlan_notifier_block); 791 792 unregister_pernet_subsys(&vlan_net_ops); 793 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 794 795 vlan_mvrp_uninit(); 796 vlan_gvrp_uninit(); 797 } 798 799 module_init(vlan_proto_init); 800 module_exit(vlan_cleanup_module); 801 802 MODULE_LICENSE("GPL"); 803 MODULE_VERSION(DRV_VERSION); 804