1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * vrf.c: device driver to encapsulate a VRF space 4 * 5 * Copyright (c) 2015 Cumulus Networks. All rights reserved. 6 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com> 7 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com> 8 * 9 * Based on dummy, team and ipvlan drivers 10 */ 11 12 #include <linux/module.h> 13 #include <linux/kernel.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <linux/ip.h> 17 #include <linux/init.h> 18 #include <linux/moduleparam.h> 19 #include <linux/netfilter.h> 20 #include <linux/rtnetlink.h> 21 #include <net/rtnetlink.h> 22 #include <linux/u64_stats_sync.h> 23 #include <linux/hashtable.h> 24 #include <linux/spinlock_types.h> 25 26 #include <linux/inetdevice.h> 27 #include <net/arp.h> 28 #include <net/ip.h> 29 #include <net/ip_fib.h> 30 #include <net/ip6_fib.h> 31 #include <net/ip6_route.h> 32 #include <net/route.h> 33 #include <net/addrconf.h> 34 #include <net/l3mdev.h> 35 #include <net/fib_rules.h> 36 #include <net/netns/generic.h> 37 38 #define DRV_NAME "vrf" 39 #define DRV_VERSION "1.1" 40 41 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ 42 43 #define HT_MAP_BITS 4 44 #define HASH_INITVAL ((u32)0xcafef00d) 45 46 struct vrf_map { 47 DECLARE_HASHTABLE(ht, HT_MAP_BITS); 48 spinlock_t vmap_lock; 49 50 /* shared_tables: 51 * count how many distinct tables do not comply with the strict mode 52 * requirement. 53 * shared_tables value must be 0 in order to enable the strict mode. 54 * 55 * example of the evolution of shared_tables: 56 * | time 57 * add vrf0 --> table 100 shared_tables = 0 | t0 58 * add vrf1 --> table 101 shared_tables = 0 | t1 59 * add vrf2 --> table 100 shared_tables = 1 | t2 60 * add vrf3 --> table 100 shared_tables = 1 | t3 61 * add vrf4 --> table 101 shared_tables = 2 v t4 62 * 63 * shared_tables is a "step function" (or "staircase function") 64 * and it is increased by one when the second vrf is associated to a 65 * table. 66 * 67 * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1. 68 * 69 * at t3, another dev (vrf3) is bound to the same table 100 but the 70 * value of shared_tables is still 1. 71 * This means that no matter how many new vrfs will register on the 72 * table 100, the shared_tables will not increase (considering only 73 * table 100). 74 * 75 * at t4, vrf4 is bound to table 101, and shared_tables = 2. 76 * 77 * Looking at the value of shared_tables we can immediately know if 78 * the strict_mode can or cannot be enforced. Indeed, strict_mode 79 * can be enforced iff shared_tables = 0. 80 * 81 * Conversely, shared_tables is decreased when a vrf is de-associated 82 * from a table with exactly two associated vrfs. 83 */ 84 u32 shared_tables; 85 86 bool strict_mode; 87 }; 88 89 struct vrf_map_elem { 90 struct hlist_node hnode; 91 struct list_head vrf_list; /* VRFs registered to this table */ 92 93 u32 table_id; 94 int users; 95 int ifindex; 96 }; 97 98 static unsigned int vrf_net_id; 99 100 /* per netns vrf data */ 101 struct netns_vrf { 102 /* protected by rtnl lock */ 103 bool add_fib_rules; 104 105 struct vrf_map vmap; 106 struct ctl_table_header *ctl_hdr; 107 }; 108 109 struct net_vrf { 110 struct rtable __rcu *rth; 111 struct rt6_info __rcu *rt6; 112 #if IS_ENABLED(CONFIG_IPV6) 113 struct fib6_table *fib6_table; 114 #endif 115 u32 tb_id; 116 117 struct list_head me_list; /* entry in vrf_map_elem */ 118 int ifindex; 119 }; 120 121 struct pcpu_dstats { 122 u64 tx_pkts; 123 u64 tx_bytes; 124 u64 tx_drps; 125 u64 rx_pkts; 126 u64 rx_bytes; 127 u64 rx_drps; 128 struct u64_stats_sync syncp; 129 }; 130 131 static void vrf_rx_stats(struct net_device *dev, int len) 132 { 133 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 134 135 u64_stats_update_begin(&dstats->syncp); 136 dstats->rx_pkts++; 137 dstats->rx_bytes += len; 138 u64_stats_update_end(&dstats->syncp); 139 } 140 141 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) 142 { 143 vrf_dev->stats.tx_errors++; 144 kfree_skb(skb); 145 } 146 147 static void vrf_get_stats64(struct net_device *dev, 148 struct rtnl_link_stats64 *stats) 149 { 150 int i; 151 152 for_each_possible_cpu(i) { 153 const struct pcpu_dstats *dstats; 154 u64 tbytes, tpkts, tdrops, rbytes, rpkts; 155 unsigned int start; 156 157 dstats = per_cpu_ptr(dev->dstats, i); 158 do { 159 start = u64_stats_fetch_begin_irq(&dstats->syncp); 160 tbytes = dstats->tx_bytes; 161 tpkts = dstats->tx_pkts; 162 tdrops = dstats->tx_drps; 163 rbytes = dstats->rx_bytes; 164 rpkts = dstats->rx_pkts; 165 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); 166 stats->tx_bytes += tbytes; 167 stats->tx_packets += tpkts; 168 stats->tx_dropped += tdrops; 169 stats->rx_bytes += rbytes; 170 stats->rx_packets += rpkts; 171 } 172 } 173 174 static struct vrf_map *netns_vrf_map(struct net *net) 175 { 176 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); 177 178 return &nn_vrf->vmap; 179 } 180 181 static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev) 182 { 183 return netns_vrf_map(dev_net(dev)); 184 } 185 186 static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me) 187 { 188 struct list_head *me_head = &me->vrf_list; 189 struct net_vrf *vrf; 190 191 if (list_empty(me_head)) 192 return -ENODEV; 193 194 vrf = list_first_entry(me_head, struct net_vrf, me_list); 195 196 return vrf->ifindex; 197 } 198 199 static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags) 200 { 201 struct vrf_map_elem *me; 202 203 me = kmalloc(sizeof(*me), flags); 204 if (!me) 205 return NULL; 206 207 return me; 208 } 209 210 static void vrf_map_elem_free(struct vrf_map_elem *me) 211 { 212 kfree(me); 213 } 214 215 static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id, 216 int ifindex, int users) 217 { 218 me->table_id = table_id; 219 me->ifindex = ifindex; 220 me->users = users; 221 INIT_LIST_HEAD(&me->vrf_list); 222 } 223 224 static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap, 225 u32 table_id) 226 { 227 struct vrf_map_elem *me; 228 u32 key; 229 230 key = jhash_1word(table_id, HASH_INITVAL); 231 hash_for_each_possible(vmap->ht, me, hnode, key) { 232 if (me->table_id == table_id) 233 return me; 234 } 235 236 return NULL; 237 } 238 239 static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me) 240 { 241 u32 table_id = me->table_id; 242 u32 key; 243 244 key = jhash_1word(table_id, HASH_INITVAL); 245 hash_add(vmap->ht, &me->hnode, key); 246 } 247 248 static void vrf_map_del_elem(struct vrf_map_elem *me) 249 { 250 hash_del(&me->hnode); 251 } 252 253 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) 254 { 255 spin_lock(&vmap->vmap_lock); 256 } 257 258 static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) 259 { 260 spin_unlock(&vmap->vmap_lock); 261 } 262 263 static bool vrf_strict_mode(struct vrf_map *vmap) 264 { 265 bool strict_mode; 266 267 vrf_map_lock(vmap); 268 strict_mode = vmap->strict_mode; 269 vrf_map_unlock(vmap); 270 271 return strict_mode; 272 } 273 274 static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode) 275 { 276 bool *cur_mode; 277 int res = 0; 278 279 vrf_map_lock(vmap); 280 281 cur_mode = &vmap->strict_mode; 282 if (*cur_mode == new_mode) 283 goto unlock; 284 285 if (*cur_mode) { 286 /* disable strict mode */ 287 *cur_mode = false; 288 } else { 289 if (vmap->shared_tables) { 290 /* we cannot allow strict_mode because there are some 291 * vrfs that share one or more tables. 292 */ 293 res = -EBUSY; 294 goto unlock; 295 } 296 297 /* no tables are shared among vrfs, so we can go back 298 * to 1:1 association between a vrf with its table. 299 */ 300 *cur_mode = true; 301 } 302 303 unlock: 304 vrf_map_unlock(vmap); 305 306 return res; 307 } 308 309 /* called with rtnl lock held */ 310 static int 311 vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack) 312 { 313 struct vrf_map *vmap = netns_vrf_map_by_dev(dev); 314 struct net_vrf *vrf = netdev_priv(dev); 315 struct vrf_map_elem *new_me, *me; 316 u32 table_id = vrf->tb_id; 317 bool free_new_me = false; 318 int users; 319 int res; 320 321 /* we pre-allocate elements used in the spin-locked section (so that we 322 * keep the spinlock as short as possibile). 323 */ 324 new_me = vrf_map_elem_alloc(GFP_KERNEL); 325 if (!new_me) 326 return -ENOMEM; 327 328 vrf_map_elem_init(new_me, table_id, dev->ifindex, 0); 329 330 vrf_map_lock(vmap); 331 332 me = vrf_map_lookup_elem(vmap, table_id); 333 if (!me) { 334 me = new_me; 335 vrf_map_add_elem(vmap, me); 336 goto link_vrf; 337 } 338 339 /* we already have an entry in the vrf_map, so it means there is (at 340 * least) a vrf registered on the specific table. 341 */ 342 free_new_me = true; 343 if (vmap->strict_mode) { 344 /* vrfs cannot share the same table */ 345 NL_SET_ERR_MSG(extack, "Table is used by another VRF"); 346 res = -EBUSY; 347 goto unlock; 348 } 349 350 link_vrf: 351 users = ++me->users; 352 if (users == 2) 353 ++vmap->shared_tables; 354 355 list_add(&vrf->me_list, &me->vrf_list); 356 357 res = 0; 358 359 unlock: 360 vrf_map_unlock(vmap); 361 362 /* clean-up, if needed */ 363 if (free_new_me) 364 vrf_map_elem_free(new_me); 365 366 return res; 367 } 368 369 /* called with rtnl lock held */ 370 static void vrf_map_unregister_dev(struct net_device *dev) 371 { 372 struct vrf_map *vmap = netns_vrf_map_by_dev(dev); 373 struct net_vrf *vrf = netdev_priv(dev); 374 u32 table_id = vrf->tb_id; 375 struct vrf_map_elem *me; 376 int users; 377 378 vrf_map_lock(vmap); 379 380 me = vrf_map_lookup_elem(vmap, table_id); 381 if (!me) 382 goto unlock; 383 384 list_del(&vrf->me_list); 385 386 users = --me->users; 387 if (users == 1) { 388 --vmap->shared_tables; 389 } else if (users == 0) { 390 vrf_map_del_elem(me); 391 392 /* no one will refer to this element anymore */ 393 vrf_map_elem_free(me); 394 } 395 396 unlock: 397 vrf_map_unlock(vmap); 398 } 399 400 /* return the vrf device index associated with the table_id */ 401 static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id) 402 { 403 struct vrf_map *vmap = netns_vrf_map(net); 404 struct vrf_map_elem *me; 405 int ifindex; 406 407 vrf_map_lock(vmap); 408 409 if (!vmap->strict_mode) { 410 ifindex = -EPERM; 411 goto unlock; 412 } 413 414 me = vrf_map_lookup_elem(vmap, table_id); 415 if (!me) { 416 ifindex = -ENODEV; 417 goto unlock; 418 } 419 420 ifindex = vrf_map_elem_get_vrf_ifindex(me); 421 422 unlock: 423 vrf_map_unlock(vmap); 424 425 return ifindex; 426 } 427 428 /* by default VRF devices do not have a qdisc and are expected 429 * to be created with only a single queue. 430 */ 431 static bool qdisc_tx_is_default(const struct net_device *dev) 432 { 433 struct netdev_queue *txq; 434 struct Qdisc *qdisc; 435 436 if (dev->num_tx_queues > 1) 437 return false; 438 439 txq = netdev_get_tx_queue(dev, 0); 440 qdisc = rcu_access_pointer(txq->qdisc); 441 442 return !qdisc->enqueue; 443 } 444 445 /* Local traffic destined to local address. Reinsert the packet to rx 446 * path, similar to loopback handling. 447 */ 448 static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, 449 struct dst_entry *dst) 450 { 451 int len = skb->len; 452 453 skb_orphan(skb); 454 455 skb_dst_set(skb, dst); 456 457 /* set pkt_type to avoid skb hitting packet taps twice - 458 * once on Tx and again in Rx processing 459 */ 460 skb->pkt_type = PACKET_LOOPBACK; 461 462 skb->protocol = eth_type_trans(skb, dev); 463 464 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) 465 vrf_rx_stats(dev, len); 466 else 467 this_cpu_inc(dev->dstats->rx_drps); 468 469 return NETDEV_TX_OK; 470 } 471 472 #if IS_ENABLED(CONFIG_IPV6) 473 static int vrf_ip6_local_out(struct net *net, struct sock *sk, 474 struct sk_buff *skb) 475 { 476 int err; 477 478 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, 479 sk, skb, NULL, skb_dst(skb)->dev, dst_output); 480 481 if (likely(err == 1)) 482 err = dst_output(net, sk, skb); 483 484 return err; 485 } 486 487 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, 488 struct net_device *dev) 489 { 490 const struct ipv6hdr *iph; 491 struct net *net = dev_net(skb->dev); 492 struct flowi6 fl6; 493 int ret = NET_XMIT_DROP; 494 struct dst_entry *dst; 495 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst; 496 497 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) 498 goto err; 499 500 iph = ipv6_hdr(skb); 501 502 memset(&fl6, 0, sizeof(fl6)); 503 /* needed to match OIF rule */ 504 fl6.flowi6_oif = dev->ifindex; 505 fl6.flowi6_iif = LOOPBACK_IFINDEX; 506 fl6.daddr = iph->daddr; 507 fl6.saddr = iph->saddr; 508 fl6.flowlabel = ip6_flowinfo(iph); 509 fl6.flowi6_mark = skb->mark; 510 fl6.flowi6_proto = iph->nexthdr; 511 fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; 512 513 dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL); 514 if (IS_ERR(dst) || dst == dst_null) 515 goto err; 516 517 skb_dst_drop(skb); 518 519 /* if dst.dev is loopback or the VRF device again this is locally 520 * originated traffic destined to a local address. Short circuit 521 * to Rx path 522 */ 523 if (dst->dev == dev) 524 return vrf_local_xmit(skb, dev, dst); 525 526 skb_dst_set(skb, dst); 527 528 /* strip the ethernet header added for pass through VRF device */ 529 __skb_pull(skb, skb_network_offset(skb)); 530 531 ret = vrf_ip6_local_out(net, skb->sk, skb); 532 if (unlikely(net_xmit_eval(ret))) 533 dev->stats.tx_errors++; 534 else 535 ret = NET_XMIT_SUCCESS; 536 537 return ret; 538 err: 539 vrf_tx_error(dev, skb); 540 return NET_XMIT_DROP; 541 } 542 #else 543 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, 544 struct net_device *dev) 545 { 546 vrf_tx_error(dev, skb); 547 return NET_XMIT_DROP; 548 } 549 #endif 550 551 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */ 552 static int vrf_ip_local_out(struct net *net, struct sock *sk, 553 struct sk_buff *skb) 554 { 555 int err; 556 557 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, 558 skb, NULL, skb_dst(skb)->dev, dst_output); 559 if (likely(err == 1)) 560 err = dst_output(net, sk, skb); 561 562 return err; 563 } 564 565 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, 566 struct net_device *vrf_dev) 567 { 568 struct iphdr *ip4h; 569 int ret = NET_XMIT_DROP; 570 struct flowi4 fl4; 571 struct net *net = dev_net(vrf_dev); 572 struct rtable *rt; 573 574 if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) 575 goto err; 576 577 ip4h = ip_hdr(skb); 578 579 memset(&fl4, 0, sizeof(fl4)); 580 /* needed to match OIF rule */ 581 fl4.flowi4_oif = vrf_dev->ifindex; 582 fl4.flowi4_iif = LOOPBACK_IFINDEX; 583 fl4.flowi4_tos = RT_TOS(ip4h->tos); 584 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF; 585 fl4.flowi4_proto = ip4h->protocol; 586 fl4.daddr = ip4h->daddr; 587 fl4.saddr = ip4h->saddr; 588 589 rt = ip_route_output_flow(net, &fl4, NULL); 590 if (IS_ERR(rt)) 591 goto err; 592 593 skb_dst_drop(skb); 594 595 /* if dst.dev is loopback or the VRF device again this is locally 596 * originated traffic destined to a local address. Short circuit 597 * to Rx path 598 */ 599 if (rt->dst.dev == vrf_dev) 600 return vrf_local_xmit(skb, vrf_dev, &rt->dst); 601 602 skb_dst_set(skb, &rt->dst); 603 604 /* strip the ethernet header added for pass through VRF device */ 605 __skb_pull(skb, skb_network_offset(skb)); 606 607 if (!ip4h->saddr) { 608 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, 609 RT_SCOPE_LINK); 610 } 611 612 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); 613 if (unlikely(net_xmit_eval(ret))) 614 vrf_dev->stats.tx_errors++; 615 else 616 ret = NET_XMIT_SUCCESS; 617 618 out: 619 return ret; 620 err: 621 vrf_tx_error(vrf_dev, skb); 622 goto out; 623 } 624 625 static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) 626 { 627 switch (skb->protocol) { 628 case htons(ETH_P_IP): 629 return vrf_process_v4_outbound(skb, dev); 630 case htons(ETH_P_IPV6): 631 return vrf_process_v6_outbound(skb, dev); 632 default: 633 vrf_tx_error(dev, skb); 634 return NET_XMIT_DROP; 635 } 636 } 637 638 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) 639 { 640 int len = skb->len; 641 netdev_tx_t ret = is_ip_tx_frame(skb, dev); 642 643 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 644 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); 645 646 u64_stats_update_begin(&dstats->syncp); 647 dstats->tx_pkts++; 648 dstats->tx_bytes += len; 649 u64_stats_update_end(&dstats->syncp); 650 } else { 651 this_cpu_inc(dev->dstats->tx_drps); 652 } 653 654 return ret; 655 } 656 657 static int vrf_finish_direct(struct net *net, struct sock *sk, 658 struct sk_buff *skb) 659 { 660 struct net_device *vrf_dev = skb->dev; 661 662 if (!list_empty(&vrf_dev->ptype_all) && 663 likely(skb_headroom(skb) >= ETH_HLEN)) { 664 struct ethhdr *eth = skb_push(skb, ETH_HLEN); 665 666 ether_addr_copy(eth->h_source, vrf_dev->dev_addr); 667 eth_zero_addr(eth->h_dest); 668 eth->h_proto = skb->protocol; 669 670 rcu_read_lock_bh(); 671 dev_queue_xmit_nit(skb, vrf_dev); 672 rcu_read_unlock_bh(); 673 674 skb_pull(skb, ETH_HLEN); 675 } 676 677 return 1; 678 } 679 680 #if IS_ENABLED(CONFIG_IPV6) 681 /* modelled after ip6_finish_output2 */ 682 static int vrf_finish_output6(struct net *net, struct sock *sk, 683 struct sk_buff *skb) 684 { 685 struct dst_entry *dst = skb_dst(skb); 686 struct net_device *dev = dst->dev; 687 const struct in6_addr *nexthop; 688 struct neighbour *neigh; 689 int ret; 690 691 nf_reset_ct(skb); 692 693 skb->protocol = htons(ETH_P_IPV6); 694 skb->dev = dev; 695 696 rcu_read_lock_bh(); 697 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); 698 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); 699 if (unlikely(!neigh)) 700 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); 701 if (!IS_ERR(neigh)) { 702 sock_confirm_neigh(skb, neigh); 703 ret = neigh_output(neigh, skb, false); 704 rcu_read_unlock_bh(); 705 return ret; 706 } 707 rcu_read_unlock_bh(); 708 709 IP6_INC_STATS(dev_net(dst->dev), 710 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 711 kfree_skb(skb); 712 return -EINVAL; 713 } 714 715 /* modelled after ip6_output */ 716 static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) 717 { 718 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, 719 net, sk, skb, NULL, skb_dst(skb)->dev, 720 vrf_finish_output6, 721 !(IP6CB(skb)->flags & IP6SKB_REROUTED)); 722 } 723 724 /* set dst on skb to send packet to us via dev_xmit path. Allows 725 * packet to go through device based features such as qdisc, netfilter 726 * hooks and packet sockets with skb->dev set to vrf device. 727 */ 728 static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev, 729 struct sk_buff *skb) 730 { 731 struct net_vrf *vrf = netdev_priv(vrf_dev); 732 struct dst_entry *dst = NULL; 733 struct rt6_info *rt6; 734 735 rcu_read_lock(); 736 737 rt6 = rcu_dereference(vrf->rt6); 738 if (likely(rt6)) { 739 dst = &rt6->dst; 740 dst_hold(dst); 741 } 742 743 rcu_read_unlock(); 744 745 if (unlikely(!dst)) { 746 vrf_tx_error(vrf_dev, skb); 747 return NULL; 748 } 749 750 skb_dst_drop(skb); 751 skb_dst_set(skb, dst); 752 753 return skb; 754 } 755 756 static int vrf_output6_direct(struct net *net, struct sock *sk, 757 struct sk_buff *skb) 758 { 759 skb->protocol = htons(ETH_P_IPV6); 760 761 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, 762 net, sk, skb, NULL, skb->dev, 763 vrf_finish_direct, 764 !(IPCB(skb)->flags & IPSKB_REROUTED)); 765 } 766 767 static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev, 768 struct sock *sk, 769 struct sk_buff *skb) 770 { 771 struct net *net = dev_net(vrf_dev); 772 int err; 773 774 skb->dev = vrf_dev; 775 776 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, 777 skb, NULL, vrf_dev, vrf_output6_direct); 778 779 if (likely(err == 1)) 780 err = vrf_output6_direct(net, sk, skb); 781 782 /* reset skb device */ 783 if (likely(err == 1)) 784 nf_reset_ct(skb); 785 else 786 skb = NULL; 787 788 return skb; 789 } 790 791 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, 792 struct sock *sk, 793 struct sk_buff *skb) 794 { 795 /* don't divert link scope packets */ 796 if (rt6_need_strict(&ipv6_hdr(skb)->daddr)) 797 return skb; 798 799 if (qdisc_tx_is_default(vrf_dev) || 800 IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) 801 return vrf_ip6_out_direct(vrf_dev, sk, skb); 802 803 return vrf_ip6_out_redirect(vrf_dev, skb); 804 } 805 806 /* holding rtnl */ 807 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) 808 { 809 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); 810 struct net *net = dev_net(dev); 811 struct dst_entry *dst; 812 813 RCU_INIT_POINTER(vrf->rt6, NULL); 814 synchronize_rcu(); 815 816 /* move dev in dst's to loopback so this VRF device can be deleted 817 * - based on dst_ifdown 818 */ 819 if (rt6) { 820 dst = &rt6->dst; 821 dev_put(dst->dev); 822 dst->dev = net->loopback_dev; 823 dev_hold(dst->dev); 824 dst_release(dst); 825 } 826 } 827 828 static int vrf_rt6_create(struct net_device *dev) 829 { 830 int flags = DST_NOPOLICY | DST_NOXFRM; 831 struct net_vrf *vrf = netdev_priv(dev); 832 struct net *net = dev_net(dev); 833 struct rt6_info *rt6; 834 int rc = -ENOMEM; 835 836 /* IPv6 can be CONFIG enabled and then disabled runtime */ 837 if (!ipv6_mod_enabled()) 838 return 0; 839 840 vrf->fib6_table = fib6_new_table(net, vrf->tb_id); 841 if (!vrf->fib6_table) 842 goto out; 843 844 /* create a dst for routing packets out a VRF device */ 845 rt6 = ip6_dst_alloc(net, dev, flags); 846 if (!rt6) 847 goto out; 848 849 rt6->dst.output = vrf_output6; 850 851 rcu_assign_pointer(vrf->rt6, rt6); 852 853 rc = 0; 854 out: 855 return rc; 856 } 857 #else 858 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, 859 struct sock *sk, 860 struct sk_buff *skb) 861 { 862 return skb; 863 } 864 865 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) 866 { 867 } 868 869 static int vrf_rt6_create(struct net_device *dev) 870 { 871 return 0; 872 } 873 #endif 874 875 /* modelled after ip_finish_output2 */ 876 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) 877 { 878 struct dst_entry *dst = skb_dst(skb); 879 struct rtable *rt = (struct rtable *)dst; 880 struct net_device *dev = dst->dev; 881 unsigned int hh_len = LL_RESERVED_SPACE(dev); 882 struct neighbour *neigh; 883 bool is_v6gw = false; 884 int ret = -EINVAL; 885 886 nf_reset_ct(skb); 887 888 /* Be paranoid, rather than too clever. */ 889 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { 890 struct sk_buff *skb2; 891 892 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); 893 if (!skb2) { 894 ret = -ENOMEM; 895 goto err; 896 } 897 if (skb->sk) 898 skb_set_owner_w(skb2, skb->sk); 899 900 consume_skb(skb); 901 skb = skb2; 902 } 903 904 rcu_read_lock_bh(); 905 906 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); 907 if (!IS_ERR(neigh)) { 908 sock_confirm_neigh(skb, neigh); 909 /* if crossing protocols, can not use the cached header */ 910 ret = neigh_output(neigh, skb, is_v6gw); 911 rcu_read_unlock_bh(); 912 return ret; 913 } 914 915 rcu_read_unlock_bh(); 916 err: 917 vrf_tx_error(skb->dev, skb); 918 return ret; 919 } 920 921 static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) 922 { 923 struct net_device *dev = skb_dst(skb)->dev; 924 925 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); 926 927 skb->dev = dev; 928 skb->protocol = htons(ETH_P_IP); 929 930 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 931 net, sk, skb, NULL, dev, 932 vrf_finish_output, 933 !(IPCB(skb)->flags & IPSKB_REROUTED)); 934 } 935 936 /* set dst on skb to send packet to us via dev_xmit path. Allows 937 * packet to go through device based features such as qdisc, netfilter 938 * hooks and packet sockets with skb->dev set to vrf device. 939 */ 940 static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev, 941 struct sk_buff *skb) 942 { 943 struct net_vrf *vrf = netdev_priv(vrf_dev); 944 struct dst_entry *dst = NULL; 945 struct rtable *rth; 946 947 rcu_read_lock(); 948 949 rth = rcu_dereference(vrf->rth); 950 if (likely(rth)) { 951 dst = &rth->dst; 952 dst_hold(dst); 953 } 954 955 rcu_read_unlock(); 956 957 if (unlikely(!dst)) { 958 vrf_tx_error(vrf_dev, skb); 959 return NULL; 960 } 961 962 skb_dst_drop(skb); 963 skb_dst_set(skb, dst); 964 965 return skb; 966 } 967 968 static int vrf_output_direct(struct net *net, struct sock *sk, 969 struct sk_buff *skb) 970 { 971 skb->protocol = htons(ETH_P_IP); 972 973 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, 974 net, sk, skb, NULL, skb->dev, 975 vrf_finish_direct, 976 !(IPCB(skb)->flags & IPSKB_REROUTED)); 977 } 978 979 static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev, 980 struct sock *sk, 981 struct sk_buff *skb) 982 { 983 struct net *net = dev_net(vrf_dev); 984 int err; 985 986 skb->dev = vrf_dev; 987 988 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, 989 skb, NULL, vrf_dev, vrf_output_direct); 990 991 if (likely(err == 1)) 992 err = vrf_output_direct(net, sk, skb); 993 994 /* reset skb device */ 995 if (likely(err == 1)) 996 nf_reset_ct(skb); 997 else 998 skb = NULL; 999 1000 return skb; 1001 } 1002 1003 static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev, 1004 struct sock *sk, 1005 struct sk_buff *skb) 1006 { 1007 /* don't divert multicast or local broadcast */ 1008 if (ipv4_is_multicast(ip_hdr(skb)->daddr) || 1009 ipv4_is_lbcast(ip_hdr(skb)->daddr)) 1010 return skb; 1011 1012 if (qdisc_tx_is_default(vrf_dev) || 1013 IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) 1014 return vrf_ip_out_direct(vrf_dev, sk, skb); 1015 1016 return vrf_ip_out_redirect(vrf_dev, skb); 1017 } 1018 1019 /* called with rcu lock held */ 1020 static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev, 1021 struct sock *sk, 1022 struct sk_buff *skb, 1023 u16 proto) 1024 { 1025 switch (proto) { 1026 case AF_INET: 1027 return vrf_ip_out(vrf_dev, sk, skb); 1028 case AF_INET6: 1029 return vrf_ip6_out(vrf_dev, sk, skb); 1030 } 1031 1032 return skb; 1033 } 1034 1035 /* holding rtnl */ 1036 static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) 1037 { 1038 struct rtable *rth = rtnl_dereference(vrf->rth); 1039 struct net *net = dev_net(dev); 1040 struct dst_entry *dst; 1041 1042 RCU_INIT_POINTER(vrf->rth, NULL); 1043 synchronize_rcu(); 1044 1045 /* move dev in dst's to loopback so this VRF device can be deleted 1046 * - based on dst_ifdown 1047 */ 1048 if (rth) { 1049 dst = &rth->dst; 1050 dev_put(dst->dev); 1051 dst->dev = net->loopback_dev; 1052 dev_hold(dst->dev); 1053 dst_release(dst); 1054 } 1055 } 1056 1057 static int vrf_rtable_create(struct net_device *dev) 1058 { 1059 struct net_vrf *vrf = netdev_priv(dev); 1060 struct rtable *rth; 1061 1062 if (!fib_new_table(dev_net(dev), vrf->tb_id)) 1063 return -ENOMEM; 1064 1065 /* create a dst for routing packets out through a VRF device */ 1066 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1); 1067 if (!rth) 1068 return -ENOMEM; 1069 1070 rth->dst.output = vrf_output; 1071 1072 rcu_assign_pointer(vrf->rth, rth); 1073 1074 return 0; 1075 } 1076 1077 /**************************** device handling ********************/ 1078 1079 /* cycle interface to flush neighbor cache and move routes across tables */ 1080 static void cycle_netdev(struct net_device *dev, 1081 struct netlink_ext_ack *extack) 1082 { 1083 unsigned int flags = dev->flags; 1084 int ret; 1085 1086 if (!netif_running(dev)) 1087 return; 1088 1089 ret = dev_change_flags(dev, flags & ~IFF_UP, extack); 1090 if (ret >= 0) 1091 ret = dev_change_flags(dev, flags, extack); 1092 1093 if (ret < 0) { 1094 netdev_err(dev, 1095 "Failed to cycle device %s; route tables might be wrong!\n", 1096 dev->name); 1097 } 1098 } 1099 1100 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev, 1101 struct netlink_ext_ack *extack) 1102 { 1103 int ret; 1104 1105 /* do not allow loopback device to be enslaved to a VRF. 1106 * The vrf device acts as the loopback for the vrf. 1107 */ 1108 if (port_dev == dev_net(dev)->loopback_dev) { 1109 NL_SET_ERR_MSG(extack, 1110 "Can not enslave loopback device to a VRF"); 1111 return -EOPNOTSUPP; 1112 } 1113 1114 port_dev->priv_flags |= IFF_L3MDEV_SLAVE; 1115 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack); 1116 if (ret < 0) 1117 goto err; 1118 1119 cycle_netdev(port_dev, extack); 1120 1121 return 0; 1122 1123 err: 1124 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; 1125 return ret; 1126 } 1127 1128 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev, 1129 struct netlink_ext_ack *extack) 1130 { 1131 if (netif_is_l3_master(port_dev)) { 1132 NL_SET_ERR_MSG(extack, 1133 "Can not enslave an L3 master device to a VRF"); 1134 return -EINVAL; 1135 } 1136 1137 if (netif_is_l3_slave(port_dev)) 1138 return -EINVAL; 1139 1140 return do_vrf_add_slave(dev, port_dev, extack); 1141 } 1142 1143 /* inverse of do_vrf_add_slave */ 1144 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) 1145 { 1146 netdev_upper_dev_unlink(port_dev, dev); 1147 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; 1148 1149 cycle_netdev(port_dev, NULL); 1150 1151 return 0; 1152 } 1153 1154 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev) 1155 { 1156 return do_vrf_del_slave(dev, port_dev); 1157 } 1158 1159 static void vrf_dev_uninit(struct net_device *dev) 1160 { 1161 struct net_vrf *vrf = netdev_priv(dev); 1162 1163 vrf_rtable_release(dev, vrf); 1164 vrf_rt6_release(dev, vrf); 1165 1166 free_percpu(dev->dstats); 1167 dev->dstats = NULL; 1168 } 1169 1170 static int vrf_dev_init(struct net_device *dev) 1171 { 1172 struct net_vrf *vrf = netdev_priv(dev); 1173 1174 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); 1175 if (!dev->dstats) 1176 goto out_nomem; 1177 1178 /* create the default dst which points back to us */ 1179 if (vrf_rtable_create(dev) != 0) 1180 goto out_stats; 1181 1182 if (vrf_rt6_create(dev) != 0) 1183 goto out_rth; 1184 1185 dev->flags = IFF_MASTER | IFF_NOARP; 1186 1187 /* MTU is irrelevant for VRF device; set to 64k similar to lo */ 1188 dev->mtu = 64 * 1024; 1189 1190 /* similarly, oper state is irrelevant; set to up to avoid confusion */ 1191 dev->operstate = IF_OPER_UP; 1192 netdev_lockdep_set_classes(dev); 1193 return 0; 1194 1195 out_rth: 1196 vrf_rtable_release(dev, vrf); 1197 out_stats: 1198 free_percpu(dev->dstats); 1199 dev->dstats = NULL; 1200 out_nomem: 1201 return -ENOMEM; 1202 } 1203 1204 static const struct net_device_ops vrf_netdev_ops = { 1205 .ndo_init = vrf_dev_init, 1206 .ndo_uninit = vrf_dev_uninit, 1207 .ndo_start_xmit = vrf_xmit, 1208 .ndo_set_mac_address = eth_mac_addr, 1209 .ndo_get_stats64 = vrf_get_stats64, 1210 .ndo_add_slave = vrf_add_slave, 1211 .ndo_del_slave = vrf_del_slave, 1212 }; 1213 1214 static u32 vrf_fib_table(const struct net_device *dev) 1215 { 1216 struct net_vrf *vrf = netdev_priv(dev); 1217 1218 return vrf->tb_id; 1219 } 1220 1221 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 1222 { 1223 kfree_skb(skb); 1224 return 0; 1225 } 1226 1227 static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, 1228 struct sk_buff *skb, 1229 struct net_device *dev) 1230 { 1231 struct net *net = dev_net(dev); 1232 1233 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1) 1234 skb = NULL; /* kfree_skb(skb) handled by nf code */ 1235 1236 return skb; 1237 } 1238 1239 #if IS_ENABLED(CONFIG_IPV6) 1240 /* neighbor handling is done with actual device; do not want 1241 * to flip skb->dev for those ndisc packets. This really fails 1242 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is 1243 * a start. 1244 */ 1245 static bool ipv6_ndisc_frame(const struct sk_buff *skb) 1246 { 1247 const struct ipv6hdr *iph = ipv6_hdr(skb); 1248 bool rc = false; 1249 1250 if (iph->nexthdr == NEXTHDR_ICMP) { 1251 const struct icmp6hdr *icmph; 1252 struct icmp6hdr _icmph; 1253 1254 icmph = skb_header_pointer(skb, sizeof(*iph), 1255 sizeof(_icmph), &_icmph); 1256 if (!icmph) 1257 goto out; 1258 1259 switch (icmph->icmp6_type) { 1260 case NDISC_ROUTER_SOLICITATION: 1261 case NDISC_ROUTER_ADVERTISEMENT: 1262 case NDISC_NEIGHBOUR_SOLICITATION: 1263 case NDISC_NEIGHBOUR_ADVERTISEMENT: 1264 case NDISC_REDIRECT: 1265 rc = true; 1266 break; 1267 } 1268 } 1269 1270 out: 1271 return rc; 1272 } 1273 1274 static struct rt6_info *vrf_ip6_route_lookup(struct net *net, 1275 const struct net_device *dev, 1276 struct flowi6 *fl6, 1277 int ifindex, 1278 const struct sk_buff *skb, 1279 int flags) 1280 { 1281 struct net_vrf *vrf = netdev_priv(dev); 1282 1283 return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags); 1284 } 1285 1286 static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, 1287 int ifindex) 1288 { 1289 const struct ipv6hdr *iph = ipv6_hdr(skb); 1290 struct flowi6 fl6 = { 1291 .flowi6_iif = ifindex, 1292 .flowi6_mark = skb->mark, 1293 .flowi6_proto = iph->nexthdr, 1294 .daddr = iph->daddr, 1295 .saddr = iph->saddr, 1296 .flowlabel = ip6_flowinfo(iph), 1297 }; 1298 struct net *net = dev_net(vrf_dev); 1299 struct rt6_info *rt6; 1300 1301 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb, 1302 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); 1303 if (unlikely(!rt6)) 1304 return; 1305 1306 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst)) 1307 return; 1308 1309 skb_dst_set(skb, &rt6->dst); 1310 } 1311 1312 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, 1313 struct sk_buff *skb) 1314 { 1315 int orig_iif = skb->skb_iif; 1316 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); 1317 bool is_ndisc = ipv6_ndisc_frame(skb); 1318 1319 /* loopback, multicast & non-ND link-local traffic; do not push through 1320 * packet taps again. Reset pkt_type for upper layers to process skb 1321 */ 1322 if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { 1323 skb->dev = vrf_dev; 1324 skb->skb_iif = vrf_dev->ifindex; 1325 IP6CB(skb)->flags |= IP6SKB_L3SLAVE; 1326 if (skb->pkt_type == PACKET_LOOPBACK) 1327 skb->pkt_type = PACKET_HOST; 1328 goto out; 1329 } 1330 1331 /* if packet is NDISC then keep the ingress interface */ 1332 if (!is_ndisc) { 1333 vrf_rx_stats(vrf_dev, skb->len); 1334 skb->dev = vrf_dev; 1335 skb->skb_iif = vrf_dev->ifindex; 1336 1337 if (!list_empty(&vrf_dev->ptype_all)) { 1338 skb_push(skb, skb->mac_len); 1339 dev_queue_xmit_nit(skb, vrf_dev); 1340 skb_pull(skb, skb->mac_len); 1341 } 1342 1343 IP6CB(skb)->flags |= IP6SKB_L3SLAVE; 1344 } 1345 1346 if (need_strict) 1347 vrf_ip6_input_dst(skb, vrf_dev, orig_iif); 1348 1349 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev); 1350 out: 1351 return skb; 1352 } 1353 1354 #else 1355 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, 1356 struct sk_buff *skb) 1357 { 1358 return skb; 1359 } 1360 #endif 1361 1362 static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, 1363 struct sk_buff *skb) 1364 { 1365 skb->dev = vrf_dev; 1366 skb->skb_iif = vrf_dev->ifindex; 1367 IPCB(skb)->flags |= IPSKB_L3SLAVE; 1368 1369 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) 1370 goto out; 1371 1372 /* loopback traffic; do not push through packet taps again. 1373 * Reset pkt_type for upper layers to process skb 1374 */ 1375 if (skb->pkt_type == PACKET_LOOPBACK) { 1376 skb->pkt_type = PACKET_HOST; 1377 goto out; 1378 } 1379 1380 vrf_rx_stats(vrf_dev, skb->len); 1381 1382 if (!list_empty(&vrf_dev->ptype_all)) { 1383 skb_push(skb, skb->mac_len); 1384 dev_queue_xmit_nit(skb, vrf_dev); 1385 skb_pull(skb, skb->mac_len); 1386 } 1387 1388 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev); 1389 out: 1390 return skb; 1391 } 1392 1393 /* called with rcu lock held */ 1394 static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, 1395 struct sk_buff *skb, 1396 u16 proto) 1397 { 1398 switch (proto) { 1399 case AF_INET: 1400 return vrf_ip_rcv(vrf_dev, skb); 1401 case AF_INET6: 1402 return vrf_ip6_rcv(vrf_dev, skb); 1403 } 1404 1405 return skb; 1406 } 1407 1408 #if IS_ENABLED(CONFIG_IPV6) 1409 /* send to link-local or multicast address via interface enslaved to 1410 * VRF device. Force lookup to VRF table without changing flow struct 1411 * Note: Caller to this function must hold rcu_read_lock() and no refcnt 1412 * is taken on the dst by this function. 1413 */ 1414 static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev, 1415 struct flowi6 *fl6) 1416 { 1417 struct net *net = dev_net(dev); 1418 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF; 1419 struct dst_entry *dst = NULL; 1420 struct rt6_info *rt; 1421 1422 /* VRF device does not have a link-local address and 1423 * sending packets to link-local or mcast addresses over 1424 * a VRF device does not make sense 1425 */ 1426 if (fl6->flowi6_oif == dev->ifindex) { 1427 dst = &net->ipv6.ip6_null_entry->dst; 1428 return dst; 1429 } 1430 1431 if (!ipv6_addr_any(&fl6->saddr)) 1432 flags |= RT6_LOOKUP_F_HAS_SADDR; 1433 1434 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags); 1435 if (rt) 1436 dst = &rt->dst; 1437 1438 return dst; 1439 } 1440 #endif 1441 1442 static const struct l3mdev_ops vrf_l3mdev_ops = { 1443 .l3mdev_fib_table = vrf_fib_table, 1444 .l3mdev_l3_rcv = vrf_l3_rcv, 1445 .l3mdev_l3_out = vrf_l3_out, 1446 #if IS_ENABLED(CONFIG_IPV6) 1447 .l3mdev_link_scope_lookup = vrf_link_scope_lookup, 1448 #endif 1449 }; 1450 1451 static void vrf_get_drvinfo(struct net_device *dev, 1452 struct ethtool_drvinfo *info) 1453 { 1454 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1455 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1456 } 1457 1458 static const struct ethtool_ops vrf_ethtool_ops = { 1459 .get_drvinfo = vrf_get_drvinfo, 1460 }; 1461 1462 static inline size_t vrf_fib_rule_nl_size(void) 1463 { 1464 size_t sz; 1465 1466 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); 1467 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ 1468 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ 1469 sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */ 1470 1471 return sz; 1472 } 1473 1474 static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) 1475 { 1476 struct fib_rule_hdr *frh; 1477 struct nlmsghdr *nlh; 1478 struct sk_buff *skb; 1479 int err; 1480 1481 if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) && 1482 !ipv6_mod_enabled()) 1483 return 0; 1484 1485 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); 1486 if (!skb) 1487 return -ENOMEM; 1488 1489 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0); 1490 if (!nlh) 1491 goto nla_put_failure; 1492 1493 /* rule only needs to appear once */ 1494 nlh->nlmsg_flags |= NLM_F_EXCL; 1495 1496 frh = nlmsg_data(nlh); 1497 memset(frh, 0, sizeof(*frh)); 1498 frh->family = family; 1499 frh->action = FR_ACT_TO_TBL; 1500 1501 if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL)) 1502 goto nla_put_failure; 1503 1504 if (nla_put_u8(skb, FRA_L3MDEV, 1)) 1505 goto nla_put_failure; 1506 1507 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF)) 1508 goto nla_put_failure; 1509 1510 nlmsg_end(skb, nlh); 1511 1512 /* fib_nl_{new,del}rule handling looks for net from skb->sk */ 1513 skb->sk = dev_net(dev)->rtnl; 1514 if (add_it) { 1515 err = fib_nl_newrule(skb, nlh, NULL); 1516 if (err == -EEXIST) 1517 err = 0; 1518 } else { 1519 err = fib_nl_delrule(skb, nlh, NULL); 1520 if (err == -ENOENT) 1521 err = 0; 1522 } 1523 nlmsg_free(skb); 1524 1525 return err; 1526 1527 nla_put_failure: 1528 nlmsg_free(skb); 1529 1530 return -EMSGSIZE; 1531 } 1532 1533 static int vrf_add_fib_rules(const struct net_device *dev) 1534 { 1535 int err; 1536 1537 err = vrf_fib_rule(dev, AF_INET, true); 1538 if (err < 0) 1539 goto out_err; 1540 1541 err = vrf_fib_rule(dev, AF_INET6, true); 1542 if (err < 0) 1543 goto ipv6_err; 1544 1545 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) 1546 err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true); 1547 if (err < 0) 1548 goto ipmr_err; 1549 #endif 1550 1551 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) 1552 err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true); 1553 if (err < 0) 1554 goto ip6mr_err; 1555 #endif 1556 1557 return 0; 1558 1559 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES) 1560 ip6mr_err: 1561 vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false); 1562 #endif 1563 1564 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES) 1565 ipmr_err: 1566 vrf_fib_rule(dev, AF_INET6, false); 1567 #endif 1568 1569 ipv6_err: 1570 vrf_fib_rule(dev, AF_INET, false); 1571 1572 out_err: 1573 netdev_err(dev, "Failed to add FIB rules.\n"); 1574 return err; 1575 } 1576 1577 static void vrf_setup(struct net_device *dev) 1578 { 1579 ether_setup(dev); 1580 1581 /* Initialize the device structure. */ 1582 dev->netdev_ops = &vrf_netdev_ops; 1583 dev->l3mdev_ops = &vrf_l3mdev_ops; 1584 dev->ethtool_ops = &vrf_ethtool_ops; 1585 dev->needs_free_netdev = true; 1586 1587 /* Fill in device structure with ethernet-generic values. */ 1588 eth_hw_addr_random(dev); 1589 1590 /* don't acquire vrf device's netif_tx_lock when transmitting */ 1591 dev->features |= NETIF_F_LLTX; 1592 1593 /* don't allow vrf devices to change network namespaces. */ 1594 dev->features |= NETIF_F_NETNS_LOCAL; 1595 1596 /* does not make sense for a VLAN to be added to a vrf device */ 1597 dev->features |= NETIF_F_VLAN_CHALLENGED; 1598 1599 /* enable offload features */ 1600 dev->features |= NETIF_F_GSO_SOFTWARE; 1601 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; 1602 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; 1603 1604 dev->hw_features = dev->features; 1605 dev->hw_enc_features = dev->features; 1606 1607 /* default to no qdisc; user can add if desired */ 1608 dev->priv_flags |= IFF_NO_QUEUE; 1609 dev->priv_flags |= IFF_NO_RX_HANDLER; 1610 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1611 1612 /* VRF devices do not care about MTU, but if the MTU is set 1613 * too low then the ipv4 and ipv6 protocols are disabled 1614 * which breaks networking. 1615 */ 1616 dev->min_mtu = IPV6_MIN_MTU; 1617 dev->max_mtu = ETH_MAX_MTU; 1618 } 1619 1620 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], 1621 struct netlink_ext_ack *extack) 1622 { 1623 if (tb[IFLA_ADDRESS]) { 1624 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { 1625 NL_SET_ERR_MSG(extack, "Invalid hardware address"); 1626 return -EINVAL; 1627 } 1628 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { 1629 NL_SET_ERR_MSG(extack, "Invalid hardware address"); 1630 return -EADDRNOTAVAIL; 1631 } 1632 } 1633 return 0; 1634 } 1635 1636 static void vrf_dellink(struct net_device *dev, struct list_head *head) 1637 { 1638 struct net_device *port_dev; 1639 struct list_head *iter; 1640 1641 netdev_for_each_lower_dev(dev, port_dev, iter) 1642 vrf_del_slave(dev, port_dev); 1643 1644 vrf_map_unregister_dev(dev); 1645 1646 unregister_netdevice_queue(dev, head); 1647 } 1648 1649 static int vrf_newlink(struct net *src_net, struct net_device *dev, 1650 struct nlattr *tb[], struct nlattr *data[], 1651 struct netlink_ext_ack *extack) 1652 { 1653 struct net_vrf *vrf = netdev_priv(dev); 1654 struct netns_vrf *nn_vrf; 1655 bool *add_fib_rules; 1656 struct net *net; 1657 int err; 1658 1659 if (!data || !data[IFLA_VRF_TABLE]) { 1660 NL_SET_ERR_MSG(extack, "VRF table id is missing"); 1661 return -EINVAL; 1662 } 1663 1664 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); 1665 if (vrf->tb_id == RT_TABLE_UNSPEC) { 1666 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE], 1667 "Invalid VRF table id"); 1668 return -EINVAL; 1669 } 1670 1671 dev->priv_flags |= IFF_L3MDEV_MASTER; 1672 1673 err = register_netdevice(dev); 1674 if (err) 1675 goto out; 1676 1677 /* mapping between table_id and vrf; 1678 * note: such binding could not be done in the dev init function 1679 * because dev->ifindex id is not available yet. 1680 */ 1681 vrf->ifindex = dev->ifindex; 1682 1683 err = vrf_map_register_dev(dev, extack); 1684 if (err) { 1685 unregister_netdevice(dev); 1686 goto out; 1687 } 1688 1689 net = dev_net(dev); 1690 nn_vrf = net_generic(net, vrf_net_id); 1691 1692 add_fib_rules = &nn_vrf->add_fib_rules; 1693 if (*add_fib_rules) { 1694 err = vrf_add_fib_rules(dev); 1695 if (err) { 1696 vrf_map_unregister_dev(dev); 1697 unregister_netdevice(dev); 1698 goto out; 1699 } 1700 *add_fib_rules = false; 1701 } 1702 1703 out: 1704 return err; 1705 } 1706 1707 static size_t vrf_nl_getsize(const struct net_device *dev) 1708 { 1709 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */ 1710 } 1711 1712 static int vrf_fillinfo(struct sk_buff *skb, 1713 const struct net_device *dev) 1714 { 1715 struct net_vrf *vrf = netdev_priv(dev); 1716 1717 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); 1718 } 1719 1720 static size_t vrf_get_slave_size(const struct net_device *bond_dev, 1721 const struct net_device *slave_dev) 1722 { 1723 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */ 1724 } 1725 1726 static int vrf_fill_slave_info(struct sk_buff *skb, 1727 const struct net_device *vrf_dev, 1728 const struct net_device *slave_dev) 1729 { 1730 struct net_vrf *vrf = netdev_priv(vrf_dev); 1731 1732 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id)) 1733 return -EMSGSIZE; 1734 1735 return 0; 1736 } 1737 1738 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { 1739 [IFLA_VRF_TABLE] = { .type = NLA_U32 }, 1740 }; 1741 1742 static struct rtnl_link_ops vrf_link_ops __read_mostly = { 1743 .kind = DRV_NAME, 1744 .priv_size = sizeof(struct net_vrf), 1745 1746 .get_size = vrf_nl_getsize, 1747 .policy = vrf_nl_policy, 1748 .validate = vrf_validate, 1749 .fill_info = vrf_fillinfo, 1750 1751 .get_slave_size = vrf_get_slave_size, 1752 .fill_slave_info = vrf_fill_slave_info, 1753 1754 .newlink = vrf_newlink, 1755 .dellink = vrf_dellink, 1756 .setup = vrf_setup, 1757 .maxtype = IFLA_VRF_MAX, 1758 }; 1759 1760 static int vrf_device_event(struct notifier_block *unused, 1761 unsigned long event, void *ptr) 1762 { 1763 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1764 1765 /* only care about unregister events to drop slave references */ 1766 if (event == NETDEV_UNREGISTER) { 1767 struct net_device *vrf_dev; 1768 1769 if (!netif_is_l3_slave(dev)) 1770 goto out; 1771 1772 vrf_dev = netdev_master_upper_dev_get(dev); 1773 vrf_del_slave(vrf_dev, dev); 1774 } 1775 out: 1776 return NOTIFY_DONE; 1777 } 1778 1779 static struct notifier_block vrf_notifier_block __read_mostly = { 1780 .notifier_call = vrf_device_event, 1781 }; 1782 1783 static int vrf_map_init(struct vrf_map *vmap) 1784 { 1785 spin_lock_init(&vmap->vmap_lock); 1786 hash_init(vmap->ht); 1787 1788 vmap->strict_mode = false; 1789 1790 return 0; 1791 } 1792 1793 static int vrf_shared_table_handler(struct ctl_table *table, int write, 1794 void *buffer, size_t *lenp, loff_t *ppos) 1795 { 1796 struct net *net = (struct net *)table->extra1; 1797 struct vrf_map *vmap = netns_vrf_map(net); 1798 int proc_strict_mode = 0; 1799 struct ctl_table tmp = { 1800 .procname = table->procname, 1801 .data = &proc_strict_mode, 1802 .maxlen = sizeof(int), 1803 .mode = table->mode, 1804 .extra1 = SYSCTL_ZERO, 1805 .extra2 = SYSCTL_ONE, 1806 }; 1807 int ret; 1808 1809 if (!write) 1810 proc_strict_mode = vrf_strict_mode(vmap); 1811 1812 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); 1813 1814 if (write && ret == 0) 1815 ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode); 1816 1817 return ret; 1818 } 1819 1820 static const struct ctl_table vrf_table[] = { 1821 { 1822 .procname = "strict_mode", 1823 .data = NULL, 1824 .maxlen = sizeof(int), 1825 .mode = 0644, 1826 .proc_handler = vrf_shared_table_handler, 1827 /* set by the vrf_netns_init */ 1828 .extra1 = NULL, 1829 }, 1830 { }, 1831 }; 1832 1833 /* Initialize per network namespace state */ 1834 static int __net_init vrf_netns_init(struct net *net) 1835 { 1836 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); 1837 struct ctl_table *table; 1838 int res; 1839 1840 nn_vrf->add_fib_rules = true; 1841 vrf_map_init(&nn_vrf->vmap); 1842 1843 table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL); 1844 if (!table) 1845 return -ENOMEM; 1846 1847 /* init the extra1 parameter with the reference to current netns */ 1848 table[0].extra1 = net; 1849 1850 nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table); 1851 if (!nn_vrf->ctl_hdr) { 1852 res = -ENOMEM; 1853 goto free_table; 1854 } 1855 1856 return 0; 1857 1858 free_table: 1859 kfree(table); 1860 1861 return res; 1862 } 1863 1864 static void __net_exit vrf_netns_exit(struct net *net) 1865 { 1866 struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); 1867 struct ctl_table *table; 1868 1869 table = nn_vrf->ctl_hdr->ctl_table_arg; 1870 unregister_net_sysctl_table(nn_vrf->ctl_hdr); 1871 kfree(table); 1872 } 1873 1874 static struct pernet_operations vrf_net_ops __net_initdata = { 1875 .init = vrf_netns_init, 1876 .exit = vrf_netns_exit, 1877 .id = &vrf_net_id, 1878 .size = sizeof(struct netns_vrf), 1879 }; 1880 1881 static int __init vrf_init_module(void) 1882 { 1883 int rc; 1884 1885 register_netdevice_notifier(&vrf_notifier_block); 1886 1887 rc = register_pernet_subsys(&vrf_net_ops); 1888 if (rc < 0) 1889 goto error; 1890 1891 rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF, 1892 vrf_ifindex_lookup_by_table_id); 1893 if (rc < 0) 1894 goto unreg_pernet; 1895 1896 rc = rtnl_link_register(&vrf_link_ops); 1897 if (rc < 0) 1898 goto table_lookup_unreg; 1899 1900 return 0; 1901 1902 table_lookup_unreg: 1903 l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF, 1904 vrf_ifindex_lookup_by_table_id); 1905 1906 unreg_pernet: 1907 unregister_pernet_subsys(&vrf_net_ops); 1908 1909 error: 1910 unregister_netdevice_notifier(&vrf_notifier_block); 1911 return rc; 1912 } 1913 1914 module_init(vrf_init_module); 1915 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern"); 1916 MODULE_DESCRIPTION("Device driver to instantiate VRF domains"); 1917 MODULE_LICENSE("GPL"); 1918 MODULE_ALIAS_RTNL_LINK(DRV_NAME); 1919 MODULE_VERSION(DRV_VERSION); 1920