1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET 802.1Q VLAN 4 * Ethernet-type device handling. 5 * 6 * Authors: Ben Greear <greearb@candelatech.com> 7 * Please send support related email to: netdev@vger.kernel.org 8 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html 9 * 10 * Fixes: 11 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>; 12 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>; 13 * Correct all the locking - David S. Miller <davem@redhat.com>; 14 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com> 15 */ 16 17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 19 #include <linux/capability.h> 20 #include <linux/module.h> 21 #include <linux/netdevice.h> 22 #include <linux/skbuff.h> 23 #include <linux/slab.h> 24 #include <linux/init.h> 25 #include <linux/rculist.h> 26 #include <net/p8022.h> 27 #include <net/arp.h> 28 #include <linux/rtnetlink.h> 29 #include <linux/notifier.h> 30 #include <net/rtnetlink.h> 31 #include <net/net_namespace.h> 32 #include <net/netns/generic.h> 33 #include <linux/uaccess.h> 34 35 #include <linux/if_vlan.h> 36 #include "vlan.h" 37 #include "vlanproc.h" 38 39 #define DRV_VERSION "1.8" 40 41 /* Global VLAN variables */ 42 43 unsigned int vlan_net_id __read_mostly; 44 45 const char vlan_fullname[] = "802.1Q VLAN Support"; 46 const char vlan_version[] = DRV_VERSION; 47 48 /* End of global variables definitions. */ 49 50 static int vlan_group_prealloc_vid(struct vlan_group *vg, 51 __be16 vlan_proto, u16 vlan_id) 52 { 53 struct net_device **array; 54 unsigned int vidx; 55 unsigned int size; 56 int pidx; 57 58 ASSERT_RTNL(); 59 60 pidx = vlan_proto_idx(vlan_proto); 61 if (pidx < 0) 62 return -EINVAL; 63 64 vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN; 65 array = vg->vlan_devices_arrays[pidx][vidx]; 66 if (array != NULL) 67 return 0; 68 69 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; 70 array = kzalloc(size, GFP_KERNEL_ACCOUNT); 71 if (array == NULL) 72 return -ENOBUFS; 73 74 /* paired with smp_rmb() in __vlan_group_get_device() */ 75 smp_wmb(); 76 77 vg->vlan_devices_arrays[pidx][vidx] = array; 78 return 0; 79 } 80 81 static void vlan_stacked_transfer_operstate(const struct net_device *rootdev, 82 struct net_device *dev, 83 struct vlan_dev_priv *vlan) 84 { 85 if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING)) 86 netif_stacked_transfer_operstate(rootdev, dev); 87 } 88 89 void unregister_vlan_dev(struct net_device *dev, struct list_head *head) 90 { 91 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 92 struct net_device *real_dev = vlan->real_dev; 93 struct vlan_info *vlan_info; 94 struct vlan_group *grp; 95 u16 vlan_id = vlan->vlan_id; 96 97 ASSERT_RTNL(); 98 99 vlan_info = rtnl_dereference(real_dev->vlan_info); 100 BUG_ON(!vlan_info); 101 102 grp = &vlan_info->grp; 103 104 grp->nr_vlan_devs--; 105 106 if (vlan->flags & VLAN_FLAG_MVRP) 107 vlan_mvrp_request_leave(dev); 108 if (vlan->flags & VLAN_FLAG_GVRP) 109 vlan_gvrp_request_leave(dev); 110 111 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL); 112 113 netdev_upper_dev_unlink(real_dev, dev); 114 /* Because unregister_netdevice_queue() makes sure at least one rcu 115 * grace period is respected before device freeing, 116 * we dont need to call synchronize_net() here. 117 */ 118 unregister_netdevice_queue(dev, head); 119 120 if (grp->nr_vlan_devs == 0) { 121 vlan_mvrp_uninit_applicant(real_dev); 122 vlan_gvrp_uninit_applicant(real_dev); 123 } 124 125 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); 126 127 /* Get rid of the vlan's reference to real_dev */ 128 dev_put(real_dev); 129 } 130 131 int vlan_check_real_dev(struct net_device *real_dev, 132 __be16 protocol, u16 vlan_id, 133 struct netlink_ext_ack *extack) 134 { 135 const char *name = real_dev->name; 136 137 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { 138 pr_info("VLANs not supported on %s\n", name); 139 NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device"); 140 return -EOPNOTSUPP; 141 } 142 143 if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) { 144 NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists"); 145 return -EEXIST; 146 } 147 148 return 0; 149 } 150 151 int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack) 152 { 153 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 154 struct net_device *real_dev = vlan->real_dev; 155 u16 vlan_id = vlan->vlan_id; 156 struct vlan_info *vlan_info; 157 struct vlan_group *grp; 158 int err; 159 160 err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id); 161 if (err) 162 return err; 163 164 vlan_info = rtnl_dereference(real_dev->vlan_info); 165 /* vlan_info should be there now. vlan_vid_add took care of it */ 166 BUG_ON(!vlan_info); 167 168 grp = &vlan_info->grp; 169 if (grp->nr_vlan_devs == 0) { 170 err = vlan_gvrp_init_applicant(real_dev); 171 if (err < 0) 172 goto out_vid_del; 173 err = vlan_mvrp_init_applicant(real_dev); 174 if (err < 0) 175 goto out_uninit_gvrp; 176 } 177 178 err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id); 179 if (err < 0) 180 goto out_uninit_mvrp; 181 182 err = register_netdevice(dev); 183 if (err < 0) 184 goto out_uninit_mvrp; 185 186 err = netdev_upper_dev_link(real_dev, dev, extack); 187 if (err) 188 goto out_unregister_netdev; 189 190 /* Account for reference in struct vlan_dev_priv */ 191 dev_hold(real_dev); 192 193 vlan_stacked_transfer_operstate(real_dev, dev, vlan); 194 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ 195 196 /* So, got the sucker initialized, now lets place 197 * it into our local structure. 198 */ 199 vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev); 200 grp->nr_vlan_devs++; 201 202 return 0; 203 204 out_unregister_netdev: 205 unregister_netdevice(dev); 206 out_uninit_mvrp: 207 if (grp->nr_vlan_devs == 0) 208 vlan_mvrp_uninit_applicant(real_dev); 209 out_uninit_gvrp: 210 if (grp->nr_vlan_devs == 0) 211 vlan_gvrp_uninit_applicant(real_dev); 212 out_vid_del: 213 vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id); 214 return err; 215 } 216 217 /* Attach a VLAN device to a mac address (ie Ethernet Card). 218 * Returns 0 if the device was created or a negative error code otherwise. 219 */ 220 static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) 221 { 222 struct net_device *new_dev; 223 struct vlan_dev_priv *vlan; 224 struct net *net = dev_net(real_dev); 225 struct vlan_net *vn = net_generic(net, vlan_net_id); 226 char name[IFNAMSIZ]; 227 int err; 228 229 if (vlan_id >= VLAN_VID_MASK) 230 return -ERANGE; 231 232 err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id, 233 NULL); 234 if (err < 0) 235 return err; 236 237 /* Gotta set up the fields for the device. */ 238 switch (vn->name_type) { 239 case VLAN_NAME_TYPE_RAW_PLUS_VID: 240 /* name will look like: eth1.0005 */ 241 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id); 242 break; 243 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: 244 /* Put our vlan.VID in the name. 245 * Name will look like: vlan5 246 */ 247 snprintf(name, IFNAMSIZ, "vlan%i", vlan_id); 248 break; 249 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: 250 /* Put our vlan.VID in the name. 251 * Name will look like: eth0.5 252 */ 253 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id); 254 break; 255 case VLAN_NAME_TYPE_PLUS_VID: 256 /* Put our vlan.VID in the name. 257 * Name will look like: vlan0005 258 */ 259 default: 260 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); 261 } 262 263 new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, 264 NET_NAME_UNKNOWN, vlan_setup); 265 266 if (new_dev == NULL) 267 return -ENOBUFS; 268 269 dev_net_set(new_dev, net); 270 /* need 4 bytes for extra VLAN header info, 271 * hope the underlying device can handle it. 272 */ 273 new_dev->mtu = real_dev->mtu; 274 275 vlan = vlan_dev_priv(new_dev); 276 vlan->vlan_proto = htons(ETH_P_8021Q); 277 vlan->vlan_id = vlan_id; 278 vlan->real_dev = real_dev; 279 vlan->dent = NULL; 280 vlan->flags = VLAN_FLAG_REORDER_HDR; 281 282 new_dev->rtnl_link_ops = &vlan_link_ops; 283 err = register_vlan_dev(new_dev, NULL); 284 if (err < 0) 285 goto out_free_newdev; 286 287 return 0; 288 289 out_free_newdev: 290 free_netdev(new_dev); 291 return err; 292 } 293 294 static void vlan_sync_address(struct net_device *dev, 295 struct net_device *vlandev) 296 { 297 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); 298 299 /* May be called without an actual change */ 300 if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) 301 return; 302 303 /* vlan continues to inherit address of lower device */ 304 if (vlan_dev_inherit_address(vlandev, dev)) 305 goto out; 306 307 /* vlan address was different from the old address and is equal to 308 * the new address */ 309 if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 310 ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 311 dev_uc_del(dev, vlandev->dev_addr); 312 313 /* vlan address was equal to the old address and is different from 314 * the new address */ 315 if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && 316 !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) 317 dev_uc_add(dev, vlandev->dev_addr); 318 319 out: 320 ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); 321 } 322 323 static void vlan_transfer_features(struct net_device *dev, 324 struct net_device *vlandev) 325 { 326 struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); 327 328 vlandev->gso_max_size = dev->gso_max_size; 329 vlandev->gso_max_segs = dev->gso_max_segs; 330 331 if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) 332 vlandev->hard_header_len = dev->hard_header_len; 333 else 334 vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; 335 336 #if IS_ENABLED(CONFIG_FCOE) 337 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 338 #endif 339 340 vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 341 vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE); 342 vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev); 343 344 netdev_update_features(vlandev); 345 } 346 347 static int __vlan_device_event(struct net_device *dev, unsigned long event) 348 { 349 int err = 0; 350 351 switch (event) { 352 case NETDEV_CHANGENAME: 353 vlan_proc_rem_dev(dev); 354 err = vlan_proc_add_dev(dev); 355 break; 356 case NETDEV_REGISTER: 357 err = vlan_proc_add_dev(dev); 358 break; 359 case NETDEV_UNREGISTER: 360 vlan_proc_rem_dev(dev); 361 break; 362 } 363 364 return err; 365 } 366 367 static int vlan_device_event(struct notifier_block *unused, unsigned long event, 368 void *ptr) 369 { 370 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); 371 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 372 struct vlan_group *grp; 373 struct vlan_info *vlan_info; 374 int i, flgs; 375 struct net_device *vlandev; 376 struct vlan_dev_priv *vlan; 377 bool last = false; 378 LIST_HEAD(list); 379 int err; 380 381 if (is_vlan_dev(dev)) { 382 int err = __vlan_device_event(dev, event); 383 384 if (err) 385 return notifier_from_errno(err); 386 } 387 388 if ((event == NETDEV_UP) && 389 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) { 390 pr_info("adding VLAN 0 to HW filter on device %s\n", 391 dev->name); 392 vlan_vid_add(dev, htons(ETH_P_8021Q), 0); 393 } 394 if (event == NETDEV_DOWN && 395 (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 396 vlan_vid_del(dev, htons(ETH_P_8021Q), 0); 397 398 vlan_info = rtnl_dereference(dev->vlan_info); 399 if (!vlan_info) 400 goto out; 401 grp = &vlan_info->grp; 402 403 /* It is OK that we do not hold the group lock right now, 404 * as we run under the RTNL lock. 405 */ 406 407 switch (event) { 408 case NETDEV_CHANGE: 409 /* Propagate real device state to vlan devices */ 410 vlan_group_for_each_dev(grp, i, vlandev) 411 vlan_stacked_transfer_operstate(dev, vlandev, 412 vlan_dev_priv(vlandev)); 413 break; 414 415 case NETDEV_CHANGEADDR: 416 /* Adjust unicast filters on underlying device */ 417 vlan_group_for_each_dev(grp, i, vlandev) { 418 flgs = vlandev->flags; 419 if (!(flgs & IFF_UP)) 420 continue; 421 422 vlan_sync_address(dev, vlandev); 423 } 424 break; 425 426 case NETDEV_CHANGEMTU: 427 vlan_group_for_each_dev(grp, i, vlandev) { 428 if (vlandev->mtu <= dev->mtu) 429 continue; 430 431 dev_set_mtu(vlandev, dev->mtu); 432 } 433 break; 434 435 case NETDEV_FEAT_CHANGE: 436 /* Propagate device features to underlying device */ 437 vlan_group_for_each_dev(grp, i, vlandev) 438 vlan_transfer_features(dev, vlandev); 439 break; 440 441 case NETDEV_DOWN: { 442 struct net_device *tmp; 443 LIST_HEAD(close_list); 444 445 /* Put all VLANs for this dev in the down state too. */ 446 vlan_group_for_each_dev(grp, i, vlandev) { 447 flgs = vlandev->flags; 448 if (!(flgs & IFF_UP)) 449 continue; 450 451 vlan = vlan_dev_priv(vlandev); 452 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 453 list_add(&vlandev->close_list, &close_list); 454 } 455 456 dev_close_many(&close_list, false); 457 458 list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) { 459 vlan_stacked_transfer_operstate(dev, vlandev, 460 vlan_dev_priv(vlandev)); 461 list_del_init(&vlandev->close_list); 462 } 463 list_del(&close_list); 464 break; 465 } 466 case NETDEV_UP: 467 /* Put all VLANs for this dev in the up state too. */ 468 vlan_group_for_each_dev(grp, i, vlandev) { 469 flgs = dev_get_flags(vlandev); 470 if (flgs & IFF_UP) 471 continue; 472 473 vlan = vlan_dev_priv(vlandev); 474 if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) 475 dev_change_flags(vlandev, flgs | IFF_UP, 476 extack); 477 vlan_stacked_transfer_operstate(dev, vlandev, vlan); 478 } 479 break; 480 481 case NETDEV_UNREGISTER: 482 /* twiddle thumbs on netns device moves */ 483 if (dev->reg_state != NETREG_UNREGISTERING) 484 break; 485 486 vlan_group_for_each_dev(grp, i, vlandev) { 487 /* removal of last vid destroys vlan_info, abort 488 * afterwards */ 489 if (vlan_info->nr_vids == 1) 490 last = true; 491 492 unregister_vlan_dev(vlandev, &list); 493 if (last) 494 break; 495 } 496 unregister_netdevice_many(&list); 497 break; 498 499 case NETDEV_PRE_TYPE_CHANGE: 500 /* Forbid underlaying device to change its type. */ 501 if (vlan_uses_dev(dev)) 502 return NOTIFY_BAD; 503 break; 504 505 case NETDEV_NOTIFY_PEERS: 506 case NETDEV_BONDING_FAILOVER: 507 case NETDEV_RESEND_IGMP: 508 /* Propagate to vlan devices */ 509 vlan_group_for_each_dev(grp, i, vlandev) 510 call_netdevice_notifiers(event, vlandev); 511 break; 512 513 case NETDEV_CVLAN_FILTER_PUSH_INFO: 514 err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q)); 515 if (err) 516 return notifier_from_errno(err); 517 break; 518 519 case NETDEV_CVLAN_FILTER_DROP_INFO: 520 vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q)); 521 break; 522 523 case NETDEV_SVLAN_FILTER_PUSH_INFO: 524 err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD)); 525 if (err) 526 return notifier_from_errno(err); 527 break; 528 529 case NETDEV_SVLAN_FILTER_DROP_INFO: 530 vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD)); 531 break; 532 } 533 534 out: 535 return NOTIFY_DONE; 536 } 537 538 static struct notifier_block vlan_notifier_block __read_mostly = { 539 .notifier_call = vlan_device_event, 540 }; 541 542 /* 543 * VLAN IOCTL handler. 544 * o execute requested action or pass command to the device driver 545 * arg is really a struct vlan_ioctl_args __user *. 546 */ 547 static int vlan_ioctl_handler(struct net *net, void __user *arg) 548 { 549 int err; 550 struct vlan_ioctl_args args; 551 struct net_device *dev = NULL; 552 553 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args))) 554 return -EFAULT; 555 556 /* Null terminate this sucker, just in case. */ 557 args.device1[sizeof(args.device1) - 1] = 0; 558 args.u.device2[sizeof(args.u.device2) - 1] = 0; 559 560 rtnl_lock(); 561 562 switch (args.cmd) { 563 case SET_VLAN_INGRESS_PRIORITY_CMD: 564 case SET_VLAN_EGRESS_PRIORITY_CMD: 565 case SET_VLAN_FLAG_CMD: 566 case ADD_VLAN_CMD: 567 case DEL_VLAN_CMD: 568 case GET_VLAN_REALDEV_NAME_CMD: 569 case GET_VLAN_VID_CMD: 570 err = -ENODEV; 571 dev = __dev_get_by_name(net, args.device1); 572 if (!dev) 573 goto out; 574 575 err = -EINVAL; 576 if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev)) 577 goto out; 578 } 579 580 switch (args.cmd) { 581 case SET_VLAN_INGRESS_PRIORITY_CMD: 582 err = -EPERM; 583 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 584 break; 585 vlan_dev_set_ingress_priority(dev, 586 args.u.skb_priority, 587 args.vlan_qos); 588 err = 0; 589 break; 590 591 case SET_VLAN_EGRESS_PRIORITY_CMD: 592 err = -EPERM; 593 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 594 break; 595 err = vlan_dev_set_egress_priority(dev, 596 args.u.skb_priority, 597 args.vlan_qos); 598 break; 599 600 case SET_VLAN_FLAG_CMD: 601 err = -EPERM; 602 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 603 break; 604 err = vlan_dev_change_flags(dev, 605 args.vlan_qos ? args.u.flag : 0, 606 args.u.flag); 607 break; 608 609 case SET_VLAN_NAME_TYPE_CMD: 610 err = -EPERM; 611 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 612 break; 613 if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { 614 struct vlan_net *vn; 615 616 vn = net_generic(net, vlan_net_id); 617 vn->name_type = args.u.name_type; 618 err = 0; 619 } else { 620 err = -EINVAL; 621 } 622 break; 623 624 case ADD_VLAN_CMD: 625 err = -EPERM; 626 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 627 break; 628 err = register_vlan_device(dev, args.u.VID); 629 break; 630 631 case DEL_VLAN_CMD: 632 err = -EPERM; 633 if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 634 break; 635 unregister_vlan_dev(dev, NULL); 636 err = 0; 637 break; 638 639 case GET_VLAN_REALDEV_NAME_CMD: 640 err = 0; 641 vlan_dev_get_realdev_name(dev, args.u.device2, 642 sizeof(args.u.device2)); 643 if (copy_to_user(arg, &args, 644 sizeof(struct vlan_ioctl_args))) 645 err = -EFAULT; 646 break; 647 648 case GET_VLAN_VID_CMD: 649 err = 0; 650 args.u.VID = vlan_dev_vlan_id(dev); 651 if (copy_to_user(arg, &args, 652 sizeof(struct vlan_ioctl_args))) 653 err = -EFAULT; 654 break; 655 656 default: 657 err = -EOPNOTSUPP; 658 break; 659 } 660 out: 661 rtnl_unlock(); 662 return err; 663 } 664 665 static int __net_init vlan_init_net(struct net *net) 666 { 667 struct vlan_net *vn = net_generic(net, vlan_net_id); 668 int err; 669 670 vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; 671 672 err = vlan_proc_init(net); 673 674 return err; 675 } 676 677 static void __net_exit vlan_exit_net(struct net *net) 678 { 679 vlan_proc_cleanup(net); 680 } 681 682 static struct pernet_operations vlan_net_ops = { 683 .init = vlan_init_net, 684 .exit = vlan_exit_net, 685 .id = &vlan_net_id, 686 .size = sizeof(struct vlan_net), 687 }; 688 689 static int __init vlan_proto_init(void) 690 { 691 int err; 692 693 pr_info("%s v%s\n", vlan_fullname, vlan_version); 694 695 err = register_pernet_subsys(&vlan_net_ops); 696 if (err < 0) 697 goto err0; 698 699 err = register_netdevice_notifier(&vlan_notifier_block); 700 if (err < 0) 701 goto err2; 702 703 err = vlan_gvrp_init(); 704 if (err < 0) 705 goto err3; 706 707 err = vlan_mvrp_init(); 708 if (err < 0) 709 goto err4; 710 711 err = vlan_netlink_init(); 712 if (err < 0) 713 goto err5; 714 715 vlan_ioctl_set(vlan_ioctl_handler); 716 return 0; 717 718 err5: 719 vlan_mvrp_uninit(); 720 err4: 721 vlan_gvrp_uninit(); 722 err3: 723 unregister_netdevice_notifier(&vlan_notifier_block); 724 err2: 725 unregister_pernet_subsys(&vlan_net_ops); 726 err0: 727 return err; 728 } 729 730 static void __exit vlan_cleanup_module(void) 731 { 732 vlan_ioctl_set(NULL); 733 734 vlan_netlink_fini(); 735 736 unregister_netdevice_notifier(&vlan_notifier_block); 737 738 unregister_pernet_subsys(&vlan_net_ops); 739 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 740 741 vlan_mvrp_uninit(); 742 vlan_gvrp_uninit(); 743 } 744 745 module_init(vlan_proto_init); 746 module_exit(vlan_cleanup_module); 747 748 MODULE_LICENSE("GPL"); 749 MODULE_VERSION(DRV_VERSION); 750