1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Linux NET3: IP/IP protocol decoder modified to support 4 * virtual tunnel interface 5 * 6 * Authors: 7 * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012 8 */ 9 10 /* 11 This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c 12 13 For comments look at net/ipv4/ip_gre.c --ANK 14 */ 15 16 17 #include <linux/capability.h> 18 #include <linux/module.h> 19 #include <linux/types.h> 20 #include <linux/kernel.h> 21 #include <linux/uaccess.h> 22 #include <linux/skbuff.h> 23 #include <linux/netdevice.h> 24 #include <linux/in.h> 25 #include <linux/tcp.h> 26 #include <linux/udp.h> 27 #include <linux/if_arp.h> 28 #include <linux/init.h> 29 #include <linux/netfilter_ipv4.h> 30 #include <linux/if_ether.h> 31 #include <linux/icmpv6.h> 32 33 #include <net/sock.h> 34 #include <net/ip.h> 35 #include <net/icmp.h> 36 #include <net/ip_tunnels.h> 37 #include <net/inet_ecn.h> 38 #include <net/xfrm.h> 39 #include <net/net_namespace.h> 40 #include <net/netns/generic.h> 41 42 static struct rtnl_link_ops vti_link_ops __read_mostly; 43 44 static unsigned int vti_net_id __read_mostly; 45 static int vti_tunnel_init(struct net_device *dev); 46 47 static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi, 48 int encap_type, bool update_skb_dev) 49 { 50 struct ip_tunnel *tunnel; 51 const struct iphdr *iph = ip_hdr(skb); 52 struct net *net = dev_net(skb->dev); 53 struct ip_tunnel_net *itn = net_generic(net, vti_net_id); 54 55 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 56 iph->saddr, iph->daddr, 0); 57 if (tunnel) { 58 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 59 goto drop; 60 61 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; 62 63 if (update_skb_dev) 64 skb->dev = tunnel->dev; 65 66 return xfrm_input(skb, nexthdr, spi, encap_type); 67 } 68 69 return -EINVAL; 70 drop: 71 kfree_skb(skb); 72 return 0; 73 } 74 75 static int vti_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, 76 int encap_type) 77 { 78 return vti_input(skb, nexthdr, spi, encap_type, false); 79 } 80 81 static int vti_rcv(struct sk_buff *skb, __be32 spi, bool update_skb_dev) 82 { 83 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 84 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 85 86 return vti_input(skb, ip_hdr(skb)->protocol, spi, 0, update_skb_dev); 87 } 88 89 static int vti_rcv_proto(struct sk_buff *skb) 90 { 91 return vti_rcv(skb, 0, false); 92 } 93 94 static int vti_rcv_tunnel(struct sk_buff *skb) 95 { 96 return vti_rcv(skb, ip_hdr(skb)->saddr, true); 97 } 98 99 static int vti_rcv_cb(struct sk_buff *skb, int err) 100 { 101 unsigned short family; 102 struct net_device *dev; 103 struct pcpu_sw_netstats *tstats; 104 struct xfrm_state *x; 105 const struct xfrm_mode *inner_mode; 106 struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4; 107 u32 orig_mark = skb->mark; 108 int ret; 109 110 if (!tunnel) 111 return 1; 112 113 dev = tunnel->dev; 114 115 if (err) { 116 dev->stats.rx_errors++; 117 dev->stats.rx_dropped++; 118 119 return 0; 120 } 121 122 x = xfrm_input_state(skb); 123 124 inner_mode = &x->inner_mode; 125 126 if (x->sel.family == AF_UNSPEC) { 127 inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); 128 if (inner_mode == NULL) { 129 XFRM_INC_STATS(dev_net(skb->dev), 130 LINUX_MIB_XFRMINSTATEMODEERROR); 131 return -EINVAL; 132 } 133 } 134 135 family = inner_mode->family; 136 137 skb->mark = be32_to_cpu(tunnel->parms.i_key); 138 ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family); 139 skb->mark = orig_mark; 140 141 if (!ret) 142 return -EPERM; 143 144 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev))); 145 skb->dev = dev; 146 147 tstats = this_cpu_ptr(dev->tstats); 148 149 u64_stats_update_begin(&tstats->syncp); 150 tstats->rx_packets++; 151 tstats->rx_bytes += skb->len; 152 u64_stats_update_end(&tstats->syncp); 153 154 return 0; 155 } 156 157 static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src) 158 { 159 xfrm_address_t *daddr = (xfrm_address_t *)&dst; 160 xfrm_address_t *saddr = (xfrm_address_t *)&src; 161 162 /* if there is no transform then this tunnel is not functional. 163 * Or if the xfrm is not mode tunnel. 164 */ 165 if (!x || x->props.mode != XFRM_MODE_TUNNEL || 166 x->props.family != AF_INET) 167 return false; 168 169 if (!dst) 170 return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET); 171 172 if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET)) 173 return false; 174 175 return true; 176 } 177 178 static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, 179 struct flowi *fl) 180 { 181 struct ip_tunnel *tunnel = netdev_priv(dev); 182 struct ip_tunnel_parm *parms = &tunnel->parms; 183 struct dst_entry *dst = skb_dst(skb); 184 struct net_device *tdev; /* Device to other host */ 185 int pkt_len = skb->len; 186 int err; 187 int mtu; 188 189 if (!dst) { 190 struct rtable *rt; 191 192 fl->u.ip4.flowi4_oif = dev->ifindex; 193 fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC; 194 rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); 195 if (IS_ERR(rt)) { 196 dev->stats.tx_carrier_errors++; 197 goto tx_error_icmp; 198 } 199 dst = &rt->dst; 200 skb_dst_set(skb, dst); 201 } 202 203 dst_hold(dst); 204 dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0); 205 if (IS_ERR(dst)) { 206 dev->stats.tx_carrier_errors++; 207 goto tx_error_icmp; 208 } 209 210 if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) { 211 dev->stats.tx_carrier_errors++; 212 dst_release(dst); 213 goto tx_error_icmp; 214 } 215 216 tdev = dst->dev; 217 218 if (tdev == dev) { 219 dst_release(dst); 220 dev->stats.collisions++; 221 goto tx_error; 222 } 223 224 mtu = dst_mtu(dst); 225 if (skb->len > mtu) { 226 skb_dst_update_pmtu_no_confirm(skb, mtu); 227 if (skb->protocol == htons(ETH_P_IP)) { 228 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 229 htonl(mtu)); 230 } else { 231 if (mtu < IPV6_MIN_MTU) 232 mtu = IPV6_MIN_MTU; 233 234 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 235 } 236 237 dst_release(dst); 238 goto tx_error; 239 } 240 241 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); 242 skb_dst_set(skb, dst); 243 skb->dev = skb_dst(skb)->dev; 244 245 err = dst_output(tunnel->net, skb->sk, skb); 246 if (net_xmit_eval(err) == 0) 247 err = pkt_len; 248 iptunnel_xmit_stats(dev, err); 249 return NETDEV_TX_OK; 250 251 tx_error_icmp: 252 dst_link_failure(skb); 253 tx_error: 254 dev->stats.tx_errors++; 255 kfree_skb(skb); 256 return NETDEV_TX_OK; 257 } 258 259 /* This function assumes it is being called from dev_queue_xmit() 260 * and that skb is filled properly by that function. 261 */ 262 static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 263 { 264 struct ip_tunnel *tunnel = netdev_priv(dev); 265 struct flowi fl; 266 267 if (!pskb_inet_may_pull(skb)) 268 goto tx_err; 269 270 memset(&fl, 0, sizeof(fl)); 271 272 switch (skb->protocol) { 273 case htons(ETH_P_IP): 274 xfrm_decode_session(skb, &fl, AF_INET); 275 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 276 break; 277 case htons(ETH_P_IPV6): 278 xfrm_decode_session(skb, &fl, AF_INET6); 279 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 280 break; 281 default: 282 goto tx_err; 283 } 284 285 /* override mark with tunnel output key */ 286 fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key); 287 288 return vti_xmit(skb, dev, &fl); 289 290 tx_err: 291 dev->stats.tx_errors++; 292 kfree_skb(skb); 293 return NETDEV_TX_OK; 294 } 295 296 static int vti4_err(struct sk_buff *skb, u32 info) 297 { 298 __be32 spi; 299 __u32 mark; 300 struct xfrm_state *x; 301 struct ip_tunnel *tunnel; 302 struct ip_esp_hdr *esph; 303 struct ip_auth_hdr *ah ; 304 struct ip_comp_hdr *ipch; 305 struct net *net = dev_net(skb->dev); 306 const struct iphdr *iph = (const struct iphdr *)skb->data; 307 int protocol = iph->protocol; 308 struct ip_tunnel_net *itn = net_generic(net, vti_net_id); 309 310 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 311 iph->daddr, iph->saddr, 0); 312 if (!tunnel) 313 return -1; 314 315 mark = be32_to_cpu(tunnel->parms.o_key); 316 317 switch (protocol) { 318 case IPPROTO_ESP: 319 esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 320 spi = esph->spi; 321 break; 322 case IPPROTO_AH: 323 ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); 324 spi = ah->spi; 325 break; 326 case IPPROTO_COMP: 327 ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 328 spi = htonl(ntohs(ipch->cpi)); 329 break; 330 default: 331 return 0; 332 } 333 334 switch (icmp_hdr(skb)->type) { 335 case ICMP_DEST_UNREACH: 336 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 337 return 0; 338 case ICMP_REDIRECT: 339 break; 340 default: 341 return 0; 342 } 343 344 x = xfrm_state_lookup(net, mark, (const xfrm_address_t *)&iph->daddr, 345 spi, protocol, AF_INET); 346 if (!x) 347 return 0; 348 349 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) 350 ipv4_update_pmtu(skb, net, info, 0, protocol); 351 else 352 ipv4_redirect(skb, net, 0, protocol); 353 xfrm_state_put(x); 354 355 return 0; 356 } 357 358 static int 359 vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 360 { 361 int err = 0; 362 struct ip_tunnel_parm p; 363 364 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 365 return -EFAULT; 366 367 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { 368 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP || 369 p.iph.ihl != 5) 370 return -EINVAL; 371 } 372 373 if (!(p.i_flags & GRE_KEY)) 374 p.i_key = 0; 375 if (!(p.o_flags & GRE_KEY)) 376 p.o_key = 0; 377 378 p.i_flags = VTI_ISVTI; 379 380 err = ip_tunnel_ioctl(dev, &p, cmd); 381 if (err) 382 return err; 383 384 if (cmd != SIOCDELTUNNEL) { 385 p.i_flags |= GRE_KEY; 386 p.o_flags |= GRE_KEY; 387 } 388 389 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 390 return -EFAULT; 391 return 0; 392 } 393 394 static const struct net_device_ops vti_netdev_ops = { 395 .ndo_init = vti_tunnel_init, 396 .ndo_uninit = ip_tunnel_uninit, 397 .ndo_start_xmit = vti_tunnel_xmit, 398 .ndo_do_ioctl = vti_tunnel_ioctl, 399 .ndo_change_mtu = ip_tunnel_change_mtu, 400 .ndo_get_stats64 = ip_tunnel_get_stats64, 401 .ndo_get_iflink = ip_tunnel_get_iflink, 402 }; 403 404 static void vti_tunnel_setup(struct net_device *dev) 405 { 406 dev->netdev_ops = &vti_netdev_ops; 407 dev->type = ARPHRD_TUNNEL; 408 ip_tunnel_setup(dev, vti_net_id); 409 } 410 411 static int vti_tunnel_init(struct net_device *dev) 412 { 413 struct ip_tunnel *tunnel = netdev_priv(dev); 414 struct iphdr *iph = &tunnel->parms.iph; 415 416 memcpy(dev->dev_addr, &iph->saddr, 4); 417 memcpy(dev->broadcast, &iph->daddr, 4); 418 419 dev->flags = IFF_NOARP; 420 dev->addr_len = 4; 421 dev->features |= NETIF_F_LLTX; 422 netif_keep_dst(dev); 423 424 return ip_tunnel_init(dev); 425 } 426 427 static void __net_init vti_fb_tunnel_init(struct net_device *dev) 428 { 429 struct ip_tunnel *tunnel = netdev_priv(dev); 430 struct iphdr *iph = &tunnel->parms.iph; 431 432 iph->version = 4; 433 iph->protocol = IPPROTO_IPIP; 434 iph->ihl = 5; 435 } 436 437 static struct xfrm4_protocol vti_esp4_protocol __read_mostly = { 438 .handler = vti_rcv_proto, 439 .input_handler = vti_input_proto, 440 .cb_handler = vti_rcv_cb, 441 .err_handler = vti4_err, 442 .priority = 100, 443 }; 444 445 static struct xfrm4_protocol vti_ah4_protocol __read_mostly = { 446 .handler = vti_rcv_proto, 447 .input_handler = vti_input_proto, 448 .cb_handler = vti_rcv_cb, 449 .err_handler = vti4_err, 450 .priority = 100, 451 }; 452 453 static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { 454 .handler = vti_rcv_proto, 455 .input_handler = vti_input_proto, 456 .cb_handler = vti_rcv_cb, 457 .err_handler = vti4_err, 458 .priority = 100, 459 }; 460 461 static struct xfrm_tunnel ipip_handler __read_mostly = { 462 .handler = vti_rcv_tunnel, 463 .err_handler = vti4_err, 464 .priority = 0, 465 }; 466 467 static int __net_init vti_init_net(struct net *net) 468 { 469 int err; 470 struct ip_tunnel_net *itn; 471 472 err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0"); 473 if (err) 474 return err; 475 itn = net_generic(net, vti_net_id); 476 if (itn->fb_tunnel_dev) 477 vti_fb_tunnel_init(itn->fb_tunnel_dev); 478 return 0; 479 } 480 481 static void __net_exit vti_exit_batch_net(struct list_head *list_net) 482 { 483 ip_tunnel_delete_nets(list_net, vti_net_id, &vti_link_ops); 484 } 485 486 static struct pernet_operations vti_net_ops = { 487 .init = vti_init_net, 488 .exit_batch = vti_exit_batch_net, 489 .id = &vti_net_id, 490 .size = sizeof(struct ip_tunnel_net), 491 }; 492 493 static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], 494 struct netlink_ext_ack *extack) 495 { 496 return 0; 497 } 498 499 static void vti_netlink_parms(struct nlattr *data[], 500 struct ip_tunnel_parm *parms, 501 __u32 *fwmark) 502 { 503 memset(parms, 0, sizeof(*parms)); 504 505 parms->iph.protocol = IPPROTO_IPIP; 506 507 if (!data) 508 return; 509 510 parms->i_flags = VTI_ISVTI; 511 512 if (data[IFLA_VTI_LINK]) 513 parms->link = nla_get_u32(data[IFLA_VTI_LINK]); 514 515 if (data[IFLA_VTI_IKEY]) 516 parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]); 517 518 if (data[IFLA_VTI_OKEY]) 519 parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]); 520 521 if (data[IFLA_VTI_LOCAL]) 522 parms->iph.saddr = nla_get_in_addr(data[IFLA_VTI_LOCAL]); 523 524 if (data[IFLA_VTI_REMOTE]) 525 parms->iph.daddr = nla_get_in_addr(data[IFLA_VTI_REMOTE]); 526 527 if (data[IFLA_VTI_FWMARK]) 528 *fwmark = nla_get_u32(data[IFLA_VTI_FWMARK]); 529 } 530 531 static int vti_newlink(struct net *src_net, struct net_device *dev, 532 struct nlattr *tb[], struct nlattr *data[], 533 struct netlink_ext_ack *extack) 534 { 535 struct ip_tunnel_parm parms; 536 __u32 fwmark = 0; 537 538 vti_netlink_parms(data, &parms, &fwmark); 539 return ip_tunnel_newlink(dev, tb, &parms, fwmark); 540 } 541 542 static int vti_changelink(struct net_device *dev, struct nlattr *tb[], 543 struct nlattr *data[], 544 struct netlink_ext_ack *extack) 545 { 546 struct ip_tunnel *t = netdev_priv(dev); 547 __u32 fwmark = t->fwmark; 548 struct ip_tunnel_parm p; 549 550 vti_netlink_parms(data, &p, &fwmark); 551 return ip_tunnel_changelink(dev, tb, &p, fwmark); 552 } 553 554 static size_t vti_get_size(const struct net_device *dev) 555 { 556 return 557 /* IFLA_VTI_LINK */ 558 nla_total_size(4) + 559 /* IFLA_VTI_IKEY */ 560 nla_total_size(4) + 561 /* IFLA_VTI_OKEY */ 562 nla_total_size(4) + 563 /* IFLA_VTI_LOCAL */ 564 nla_total_size(4) + 565 /* IFLA_VTI_REMOTE */ 566 nla_total_size(4) + 567 /* IFLA_VTI_FWMARK */ 568 nla_total_size(4) + 569 0; 570 } 571 572 static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev) 573 { 574 struct ip_tunnel *t = netdev_priv(dev); 575 struct ip_tunnel_parm *p = &t->parms; 576 577 if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) || 578 nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) || 579 nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key) || 580 nla_put_in_addr(skb, IFLA_VTI_LOCAL, p->iph.saddr) || 581 nla_put_in_addr(skb, IFLA_VTI_REMOTE, p->iph.daddr) || 582 nla_put_u32(skb, IFLA_VTI_FWMARK, t->fwmark)) 583 return -EMSGSIZE; 584 585 return 0; 586 } 587 588 static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = { 589 [IFLA_VTI_LINK] = { .type = NLA_U32 }, 590 [IFLA_VTI_IKEY] = { .type = NLA_U32 }, 591 [IFLA_VTI_OKEY] = { .type = NLA_U32 }, 592 [IFLA_VTI_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) }, 593 [IFLA_VTI_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, 594 [IFLA_VTI_FWMARK] = { .type = NLA_U32 }, 595 }; 596 597 static struct rtnl_link_ops vti_link_ops __read_mostly = { 598 .kind = "vti", 599 .maxtype = IFLA_VTI_MAX, 600 .policy = vti_policy, 601 .priv_size = sizeof(struct ip_tunnel), 602 .setup = vti_tunnel_setup, 603 .validate = vti_tunnel_validate, 604 .newlink = vti_newlink, 605 .changelink = vti_changelink, 606 .dellink = ip_tunnel_dellink, 607 .get_size = vti_get_size, 608 .fill_info = vti_fill_info, 609 .get_link_net = ip_tunnel_get_link_net, 610 }; 611 612 static int __init vti_init(void) 613 { 614 const char *msg; 615 int err; 616 617 pr_info("IPv4 over IPsec tunneling driver\n"); 618 619 msg = "tunnel device"; 620 err = register_pernet_device(&vti_net_ops); 621 if (err < 0) 622 goto pernet_dev_failed; 623 624 msg = "tunnel protocols"; 625 err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP); 626 if (err < 0) 627 goto xfrm_proto_esp_failed; 628 err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH); 629 if (err < 0) 630 goto xfrm_proto_ah_failed; 631 err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP); 632 if (err < 0) 633 goto xfrm_proto_comp_failed; 634 635 msg = "ipip tunnel"; 636 err = xfrm4_tunnel_register(&ipip_handler, AF_INET); 637 if (err < 0) 638 goto xfrm_tunnel_failed; 639 640 msg = "netlink interface"; 641 err = rtnl_link_register(&vti_link_ops); 642 if (err < 0) 643 goto rtnl_link_failed; 644 645 return err; 646 647 rtnl_link_failed: 648 xfrm4_tunnel_deregister(&ipip_handler, AF_INET); 649 xfrm_tunnel_failed: 650 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); 651 xfrm_proto_comp_failed: 652 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 653 xfrm_proto_ah_failed: 654 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); 655 xfrm_proto_esp_failed: 656 unregister_pernet_device(&vti_net_ops); 657 pernet_dev_failed: 658 pr_err("vti init: failed to register %s\n", msg); 659 return err; 660 } 661 662 static void __exit vti_fini(void) 663 { 664 rtnl_link_unregister(&vti_link_ops); 665 xfrm4_tunnel_deregister(&ipip_handler, AF_INET); 666 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); 667 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); 668 xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); 669 unregister_pernet_device(&vti_net_ops); 670 } 671 672 module_init(vti_init); 673 module_exit(vti_fini); 674 MODULE_LICENSE("GPL"); 675 MODULE_ALIAS_RTNL_LINK("vti"); 676 MODULE_ALIAS_NETDEV("ip_vti0"); 677