xref: /openbmc/linux/drivers/net/vrf.c (revision e2942062)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vrf.c: device driver to encapsulate a VRF space
4  *
5  * Copyright (c) 2015 Cumulus Networks. All rights reserved.
6  * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
7  * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
8  *
9  * Based on dummy, team and ipvlan drivers
10  */
11 
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/ip.h>
18 #include <linux/init.h>
19 #include <linux/moduleparam.h>
20 #include <linux/netfilter.h>
21 #include <linux/rtnetlink.h>
22 #include <net/rtnetlink.h>
23 #include <linux/u64_stats_sync.h>
24 #include <linux/hashtable.h>
25 #include <linux/spinlock_types.h>
26 
27 #include <linux/inetdevice.h>
28 #include <net/arp.h>
29 #include <net/ip.h>
30 #include <net/ip_fib.h>
31 #include <net/ip6_fib.h>
32 #include <net/ip6_route.h>
33 #include <net/route.h>
34 #include <net/addrconf.h>
35 #include <net/l3mdev.h>
36 #include <net/fib_rules.h>
37 #include <net/sch_generic.h>
38 #include <net/netns/generic.h>
39 #include <net/netfilter/nf_conntrack.h>
40 
41 #define DRV_NAME	"vrf"
42 #define DRV_VERSION	"1.1"
43 
44 #define FIB_RULE_PREF  1000       /* default preference for FIB rules */
45 
46 #define HT_MAP_BITS	4
47 #define HASH_INITVAL	((u32)0xcafef00d)
48 
49 struct  vrf_map {
50 	DECLARE_HASHTABLE(ht, HT_MAP_BITS);
51 	spinlock_t vmap_lock;
52 
53 	/* shared_tables:
54 	 * count how many distinct tables do not comply with the strict mode
55 	 * requirement.
56 	 * shared_tables value must be 0 in order to enable the strict mode.
57 	 *
58 	 * example of the evolution of shared_tables:
59 	 *                                                        | time
60 	 * add  vrf0 --> table 100        shared_tables = 0       | t0
61 	 * add  vrf1 --> table 101        shared_tables = 0       | t1
62 	 * add  vrf2 --> table 100        shared_tables = 1       | t2
63 	 * add  vrf3 --> table 100        shared_tables = 1       | t3
64 	 * add  vrf4 --> table 101        shared_tables = 2       v t4
65 	 *
66 	 * shared_tables is a "step function" (or "staircase function")
67 	 * and it is increased by one when the second vrf is associated to a
68 	 * table.
69 	 *
70 	 * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1.
71 	 *
72 	 * at t3, another dev (vrf3) is bound to the same table 100 but the
73 	 * value of shared_tables is still 1.
74 	 * This means that no matter how many new vrfs will register on the
75 	 * table 100, the shared_tables will not increase (considering only
76 	 * table 100).
77 	 *
78 	 * at t4, vrf4 is bound to table 101, and shared_tables = 2.
79 	 *
80 	 * Looking at the value of shared_tables we can immediately know if
81 	 * the strict_mode can or cannot be enforced. Indeed, strict_mode
82 	 * can be enforced iff shared_tables = 0.
83 	 *
84 	 * Conversely, shared_tables is decreased when a vrf is de-associated
85 	 * from a table with exactly two associated vrfs.
86 	 */
87 	u32 shared_tables;
88 
89 	bool strict_mode;
90 };
91 
92 struct vrf_map_elem {
93 	struct hlist_node hnode;
94 	struct list_head vrf_list;  /* VRFs registered to this table */
95 
96 	u32 table_id;
97 	int users;
98 	int ifindex;
99 };
100 
101 static unsigned int vrf_net_id;
102 
103 /* per netns vrf data */
104 struct netns_vrf {
105 	/* protected by rtnl lock */
106 	bool add_fib_rules;
107 
108 	struct vrf_map vmap;
109 	struct ctl_table_header	*ctl_hdr;
110 };
111 
112 struct net_vrf {
113 	struct rtable __rcu	*rth;
114 	struct rt6_info	__rcu	*rt6;
115 #if IS_ENABLED(CONFIG_IPV6)
116 	struct fib6_table	*fib6_table;
117 #endif
118 	u32                     tb_id;
119 
120 	struct list_head	me_list;   /* entry in vrf_map_elem */
121 	int			ifindex;
122 };
123 
124 struct pcpu_dstats {
125 	u64			tx_pkts;
126 	u64			tx_bytes;
127 	u64			tx_drps;
128 	u64			rx_pkts;
129 	u64			rx_bytes;
130 	u64			rx_drps;
131 	struct u64_stats_sync	syncp;
132 };
133 
134 static void vrf_rx_stats(struct net_device *dev, int len)
135 {
136 	struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
137 
138 	u64_stats_update_begin(&dstats->syncp);
139 	dstats->rx_pkts++;
140 	dstats->rx_bytes += len;
141 	u64_stats_update_end(&dstats->syncp);
142 }
143 
144 static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
145 {
146 	vrf_dev->stats.tx_errors++;
147 	kfree_skb(skb);
148 }
149 
150 static void vrf_get_stats64(struct net_device *dev,
151 			    struct rtnl_link_stats64 *stats)
152 {
153 	int i;
154 
155 	for_each_possible_cpu(i) {
156 		const struct pcpu_dstats *dstats;
157 		u64 tbytes, tpkts, tdrops, rbytes, rpkts;
158 		unsigned int start;
159 
160 		dstats = per_cpu_ptr(dev->dstats, i);
161 		do {
162 			start = u64_stats_fetch_begin(&dstats->syncp);
163 			tbytes = dstats->tx_bytes;
164 			tpkts = dstats->tx_pkts;
165 			tdrops = dstats->tx_drps;
166 			rbytes = dstats->rx_bytes;
167 			rpkts = dstats->rx_pkts;
168 		} while (u64_stats_fetch_retry(&dstats->syncp, start));
169 		stats->tx_bytes += tbytes;
170 		stats->tx_packets += tpkts;
171 		stats->tx_dropped += tdrops;
172 		stats->rx_bytes += rbytes;
173 		stats->rx_packets += rpkts;
174 	}
175 }
176 
177 static struct vrf_map *netns_vrf_map(struct net *net)
178 {
179 	struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
180 
181 	return &nn_vrf->vmap;
182 }
183 
184 static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev)
185 {
186 	return netns_vrf_map(dev_net(dev));
187 }
188 
189 static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me)
190 {
191 	struct list_head *me_head = &me->vrf_list;
192 	struct net_vrf *vrf;
193 
194 	if (list_empty(me_head))
195 		return -ENODEV;
196 
197 	vrf = list_first_entry(me_head, struct net_vrf, me_list);
198 
199 	return vrf->ifindex;
200 }
201 
202 static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags)
203 {
204 	struct vrf_map_elem *me;
205 
206 	me = kmalloc(sizeof(*me), flags);
207 	if (!me)
208 		return NULL;
209 
210 	return me;
211 }
212 
213 static void vrf_map_elem_free(struct vrf_map_elem *me)
214 {
215 	kfree(me);
216 }
217 
218 static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id,
219 			      int ifindex, int users)
220 {
221 	me->table_id = table_id;
222 	me->ifindex = ifindex;
223 	me->users = users;
224 	INIT_LIST_HEAD(&me->vrf_list);
225 }
226 
227 static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap,
228 						u32 table_id)
229 {
230 	struct vrf_map_elem *me;
231 	u32 key;
232 
233 	key = jhash_1word(table_id, HASH_INITVAL);
234 	hash_for_each_possible(vmap->ht, me, hnode, key) {
235 		if (me->table_id == table_id)
236 			return me;
237 	}
238 
239 	return NULL;
240 }
241 
242 static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me)
243 {
244 	u32 table_id = me->table_id;
245 	u32 key;
246 
247 	key = jhash_1word(table_id, HASH_INITVAL);
248 	hash_add(vmap->ht, &me->hnode, key);
249 }
250 
251 static void vrf_map_del_elem(struct vrf_map_elem *me)
252 {
253 	hash_del(&me->hnode);
254 }
255 
256 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock)
257 {
258 	spin_lock(&vmap->vmap_lock);
259 }
260 
261 static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock)
262 {
263 	spin_unlock(&vmap->vmap_lock);
264 }
265 
266 /* called with rtnl lock held */
267 static int
268 vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack)
269 {
270 	struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
271 	struct net_vrf *vrf = netdev_priv(dev);
272 	struct vrf_map_elem *new_me, *me;
273 	u32 table_id = vrf->tb_id;
274 	bool free_new_me = false;
275 	int users;
276 	int res;
277 
278 	/* we pre-allocate elements used in the spin-locked section (so that we
279 	 * keep the spinlock as short as possible).
280 	 */
281 	new_me = vrf_map_elem_alloc(GFP_KERNEL);
282 	if (!new_me)
283 		return -ENOMEM;
284 
285 	vrf_map_elem_init(new_me, table_id, dev->ifindex, 0);
286 
287 	vrf_map_lock(vmap);
288 
289 	me = vrf_map_lookup_elem(vmap, table_id);
290 	if (!me) {
291 		me = new_me;
292 		vrf_map_add_elem(vmap, me);
293 		goto link_vrf;
294 	}
295 
296 	/* we already have an entry in the vrf_map, so it means there is (at
297 	 * least) a vrf registered on the specific table.
298 	 */
299 	free_new_me = true;
300 	if (vmap->strict_mode) {
301 		/* vrfs cannot share the same table */
302 		NL_SET_ERR_MSG(extack, "Table is used by another VRF");
303 		res = -EBUSY;
304 		goto unlock;
305 	}
306 
307 link_vrf:
308 	users = ++me->users;
309 	if (users == 2)
310 		++vmap->shared_tables;
311 
312 	list_add(&vrf->me_list, &me->vrf_list);
313 
314 	res = 0;
315 
316 unlock:
317 	vrf_map_unlock(vmap);
318 
319 	/* clean-up, if needed */
320 	if (free_new_me)
321 		vrf_map_elem_free(new_me);
322 
323 	return res;
324 }
325 
326 /* called with rtnl lock held */
327 static void vrf_map_unregister_dev(struct net_device *dev)
328 {
329 	struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
330 	struct net_vrf *vrf = netdev_priv(dev);
331 	u32 table_id = vrf->tb_id;
332 	struct vrf_map_elem *me;
333 	int users;
334 
335 	vrf_map_lock(vmap);
336 
337 	me = vrf_map_lookup_elem(vmap, table_id);
338 	if (!me)
339 		goto unlock;
340 
341 	list_del(&vrf->me_list);
342 
343 	users = --me->users;
344 	if (users == 1) {
345 		--vmap->shared_tables;
346 	} else if (users == 0) {
347 		vrf_map_del_elem(me);
348 
349 		/* no one will refer to this element anymore */
350 		vrf_map_elem_free(me);
351 	}
352 
353 unlock:
354 	vrf_map_unlock(vmap);
355 }
356 
357 /* return the vrf device index associated with the table_id */
358 static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id)
359 {
360 	struct vrf_map *vmap = netns_vrf_map(net);
361 	struct vrf_map_elem *me;
362 	int ifindex;
363 
364 	vrf_map_lock(vmap);
365 
366 	if (!vmap->strict_mode) {
367 		ifindex = -EPERM;
368 		goto unlock;
369 	}
370 
371 	me = vrf_map_lookup_elem(vmap, table_id);
372 	if (!me) {
373 		ifindex = -ENODEV;
374 		goto unlock;
375 	}
376 
377 	ifindex = vrf_map_elem_get_vrf_ifindex(me);
378 
379 unlock:
380 	vrf_map_unlock(vmap);
381 
382 	return ifindex;
383 }
384 
385 /* by default VRF devices do not have a qdisc and are expected
386  * to be created with only a single queue.
387  */
388 static bool qdisc_tx_is_default(const struct net_device *dev)
389 {
390 	struct netdev_queue *txq;
391 	struct Qdisc *qdisc;
392 
393 	if (dev->num_tx_queues > 1)
394 		return false;
395 
396 	txq = netdev_get_tx_queue(dev, 0);
397 	qdisc = rcu_access_pointer(txq->qdisc);
398 
399 	return !qdisc->enqueue;
400 }
401 
402 /* Local traffic destined to local address. Reinsert the packet to rx
403  * path, similar to loopback handling.
404  */
405 static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
406 			  struct dst_entry *dst)
407 {
408 	int len = skb->len;
409 
410 	skb_orphan(skb);
411 
412 	skb_dst_set(skb, dst);
413 
414 	/* set pkt_type to avoid skb hitting packet taps twice -
415 	 * once on Tx and again in Rx processing
416 	 */
417 	skb->pkt_type = PACKET_LOOPBACK;
418 
419 	skb->protocol = eth_type_trans(skb, dev);
420 
421 	if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
422 		vrf_rx_stats(dev, len);
423 	else
424 		this_cpu_inc(dev->dstats->rx_drps);
425 
426 	return NETDEV_TX_OK;
427 }
428 
429 static void vrf_nf_set_untracked(struct sk_buff *skb)
430 {
431 	if (skb_get_nfct(skb) == 0)
432 		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
433 }
434 
435 static void vrf_nf_reset_ct(struct sk_buff *skb)
436 {
437 	if (skb_get_nfct(skb) == IP_CT_UNTRACKED)
438 		nf_reset_ct(skb);
439 }
440 
441 #if IS_ENABLED(CONFIG_IPV6)
442 static int vrf_ip6_local_out(struct net *net, struct sock *sk,
443 			     struct sk_buff *skb)
444 {
445 	int err;
446 
447 	vrf_nf_reset_ct(skb);
448 
449 	err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
450 		      sk, skb, NULL, skb_dst(skb)->dev, dst_output);
451 
452 	if (likely(err == 1))
453 		err = dst_output(net, sk, skb);
454 
455 	return err;
456 }
457 
458 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
459 					   struct net_device *dev)
460 {
461 	const struct ipv6hdr *iph;
462 	struct net *net = dev_net(skb->dev);
463 	struct flowi6 fl6;
464 	int ret = NET_XMIT_DROP;
465 	struct dst_entry *dst;
466 	struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
467 
468 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
469 		goto err;
470 
471 	iph = ipv6_hdr(skb);
472 
473 	memset(&fl6, 0, sizeof(fl6));
474 	/* needed to match OIF rule */
475 	fl6.flowi6_l3mdev = dev->ifindex;
476 	fl6.flowi6_iif = LOOPBACK_IFINDEX;
477 	fl6.daddr = iph->daddr;
478 	fl6.saddr = iph->saddr;
479 	fl6.flowlabel = ip6_flowinfo(iph);
480 	fl6.flowi6_mark = skb->mark;
481 	fl6.flowi6_proto = iph->nexthdr;
482 
483 	dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
484 	if (IS_ERR(dst) || dst == dst_null)
485 		goto err;
486 
487 	skb_dst_drop(skb);
488 
489 	/* if dst.dev is the VRF device again this is locally originated traffic
490 	 * destined to a local address. Short circuit to Rx path.
491 	 */
492 	if (dst->dev == dev)
493 		return vrf_local_xmit(skb, dev, dst);
494 
495 	skb_dst_set(skb, dst);
496 
497 	/* strip the ethernet header added for pass through VRF device */
498 	__skb_pull(skb, skb_network_offset(skb));
499 
500 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
501 	ret = vrf_ip6_local_out(net, skb->sk, skb);
502 	if (unlikely(net_xmit_eval(ret)))
503 		dev->stats.tx_errors++;
504 	else
505 		ret = NET_XMIT_SUCCESS;
506 
507 	return ret;
508 err:
509 	vrf_tx_error(dev, skb);
510 	return NET_XMIT_DROP;
511 }
512 #else
513 static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
514 					   struct net_device *dev)
515 {
516 	vrf_tx_error(dev, skb);
517 	return NET_XMIT_DROP;
518 }
519 #endif
520 
521 /* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
522 static int vrf_ip_local_out(struct net *net, struct sock *sk,
523 			    struct sk_buff *skb)
524 {
525 	int err;
526 
527 	vrf_nf_reset_ct(skb);
528 
529 	err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
530 		      skb, NULL, skb_dst(skb)->dev, dst_output);
531 	if (likely(err == 1))
532 		err = dst_output(net, sk, skb);
533 
534 	return err;
535 }
536 
537 static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
538 					   struct net_device *vrf_dev)
539 {
540 	struct iphdr *ip4h;
541 	int ret = NET_XMIT_DROP;
542 	struct flowi4 fl4;
543 	struct net *net = dev_net(vrf_dev);
544 	struct rtable *rt;
545 
546 	if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
547 		goto err;
548 
549 	ip4h = ip_hdr(skb);
550 
551 	memset(&fl4, 0, sizeof(fl4));
552 	/* needed to match OIF rule */
553 	fl4.flowi4_l3mdev = vrf_dev->ifindex;
554 	fl4.flowi4_iif = LOOPBACK_IFINDEX;
555 	fl4.flowi4_tos = RT_TOS(ip4h->tos);
556 	fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
557 	fl4.flowi4_proto = ip4h->protocol;
558 	fl4.daddr = ip4h->daddr;
559 	fl4.saddr = ip4h->saddr;
560 
561 	rt = ip_route_output_flow(net, &fl4, NULL);
562 	if (IS_ERR(rt))
563 		goto err;
564 
565 	skb_dst_drop(skb);
566 
567 	/* if dst.dev is the VRF device again this is locally originated traffic
568 	 * destined to a local address. Short circuit to Rx path.
569 	 */
570 	if (rt->dst.dev == vrf_dev)
571 		return vrf_local_xmit(skb, vrf_dev, &rt->dst);
572 
573 	skb_dst_set(skb, &rt->dst);
574 
575 	/* strip the ethernet header added for pass through VRF device */
576 	__skb_pull(skb, skb_network_offset(skb));
577 
578 	if (!ip4h->saddr) {
579 		ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
580 					       RT_SCOPE_LINK);
581 	}
582 
583 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
584 	ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
585 	if (unlikely(net_xmit_eval(ret)))
586 		vrf_dev->stats.tx_errors++;
587 	else
588 		ret = NET_XMIT_SUCCESS;
589 
590 out:
591 	return ret;
592 err:
593 	vrf_tx_error(vrf_dev, skb);
594 	goto out;
595 }
596 
597 static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
598 {
599 	switch (skb->protocol) {
600 	case htons(ETH_P_IP):
601 		return vrf_process_v4_outbound(skb, dev);
602 	case htons(ETH_P_IPV6):
603 		return vrf_process_v6_outbound(skb, dev);
604 	default:
605 		vrf_tx_error(dev, skb);
606 		return NET_XMIT_DROP;
607 	}
608 }
609 
610 static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
611 {
612 	int len = skb->len;
613 	netdev_tx_t ret = is_ip_tx_frame(skb, dev);
614 
615 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
616 		struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
617 
618 		u64_stats_update_begin(&dstats->syncp);
619 		dstats->tx_pkts++;
620 		dstats->tx_bytes += len;
621 		u64_stats_update_end(&dstats->syncp);
622 	} else {
623 		this_cpu_inc(dev->dstats->tx_drps);
624 	}
625 
626 	return ret;
627 }
628 
629 static void vrf_finish_direct(struct sk_buff *skb)
630 {
631 	struct net_device *vrf_dev = skb->dev;
632 
633 	if (!list_empty(&vrf_dev->ptype_all) &&
634 	    likely(skb_headroom(skb) >= ETH_HLEN)) {
635 		struct ethhdr *eth = skb_push(skb, ETH_HLEN);
636 
637 		ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
638 		eth_zero_addr(eth->h_dest);
639 		eth->h_proto = skb->protocol;
640 
641 		rcu_read_lock_bh();
642 		dev_queue_xmit_nit(skb, vrf_dev);
643 		rcu_read_unlock_bh();
644 
645 		skb_pull(skb, ETH_HLEN);
646 	}
647 
648 	vrf_nf_reset_ct(skb);
649 }
650 
651 #if IS_ENABLED(CONFIG_IPV6)
652 /* modelled after ip6_finish_output2 */
653 static int vrf_finish_output6(struct net *net, struct sock *sk,
654 			      struct sk_buff *skb)
655 {
656 	struct dst_entry *dst = skb_dst(skb);
657 	struct net_device *dev = dst->dev;
658 	const struct in6_addr *nexthop;
659 	struct neighbour *neigh;
660 	int ret;
661 
662 	vrf_nf_reset_ct(skb);
663 
664 	skb->protocol = htons(ETH_P_IPV6);
665 	skb->dev = dev;
666 
667 	rcu_read_lock();
668 	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
669 	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
670 	if (unlikely(!neigh))
671 		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
672 	if (!IS_ERR(neigh)) {
673 		sock_confirm_neigh(skb, neigh);
674 		ret = neigh_output(neigh, skb, false);
675 		rcu_read_unlock();
676 		return ret;
677 	}
678 	rcu_read_unlock();
679 
680 	IP6_INC_STATS(dev_net(dst->dev),
681 		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
682 	kfree_skb(skb);
683 	return -EINVAL;
684 }
685 
686 /* modelled after ip6_output */
687 static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
688 {
689 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
690 			    net, sk, skb, NULL, skb_dst(skb)->dev,
691 			    vrf_finish_output6,
692 			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
693 }
694 
695 /* set dst on skb to send packet to us via dev_xmit path. Allows
696  * packet to go through device based features such as qdisc, netfilter
697  * hooks and packet sockets with skb->dev set to vrf device.
698  */
699 static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
700 					    struct sk_buff *skb)
701 {
702 	struct net_vrf *vrf = netdev_priv(vrf_dev);
703 	struct dst_entry *dst = NULL;
704 	struct rt6_info *rt6;
705 
706 	rcu_read_lock();
707 
708 	rt6 = rcu_dereference(vrf->rt6);
709 	if (likely(rt6)) {
710 		dst = &rt6->dst;
711 		dst_hold(dst);
712 	}
713 
714 	rcu_read_unlock();
715 
716 	if (unlikely(!dst)) {
717 		vrf_tx_error(vrf_dev, skb);
718 		return NULL;
719 	}
720 
721 	skb_dst_drop(skb);
722 	skb_dst_set(skb, dst);
723 
724 	return skb;
725 }
726 
727 static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
728 				     struct sk_buff *skb)
729 {
730 	vrf_finish_direct(skb);
731 
732 	return vrf_ip6_local_out(net, sk, skb);
733 }
734 
735 static int vrf_output6_direct(struct net *net, struct sock *sk,
736 			      struct sk_buff *skb)
737 {
738 	int err = 1;
739 
740 	skb->protocol = htons(ETH_P_IPV6);
741 
742 	if (!(IPCB(skb)->flags & IPSKB_REROUTED))
743 		err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
744 			      NULL, skb->dev, vrf_output6_direct_finish);
745 
746 	if (likely(err == 1))
747 		vrf_finish_direct(skb);
748 
749 	return err;
750 }
751 
752 static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
753 				     struct sk_buff *skb)
754 {
755 	int err;
756 
757 	err = vrf_output6_direct(net, sk, skb);
758 	if (likely(err == 1))
759 		err = vrf_ip6_local_out(net, sk, skb);
760 
761 	return err;
762 }
763 
764 static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
765 					  struct sock *sk,
766 					  struct sk_buff *skb)
767 {
768 	struct net *net = dev_net(vrf_dev);
769 	int err;
770 
771 	skb->dev = vrf_dev;
772 
773 	err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
774 		      skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
775 
776 	if (likely(err == 1))
777 		err = vrf_output6_direct(net, sk, skb);
778 
779 	if (likely(err == 1))
780 		return skb;
781 
782 	return NULL;
783 }
784 
785 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
786 				   struct sock *sk,
787 				   struct sk_buff *skb)
788 {
789 	/* don't divert link scope packets */
790 	if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
791 		return skb;
792 
793 	vrf_nf_set_untracked(skb);
794 
795 	if (qdisc_tx_is_default(vrf_dev) ||
796 	    IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
797 		return vrf_ip6_out_direct(vrf_dev, sk, skb);
798 
799 	return vrf_ip6_out_redirect(vrf_dev, skb);
800 }
801 
802 /* holding rtnl */
803 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
804 {
805 	struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
806 	struct net *net = dev_net(dev);
807 	struct dst_entry *dst;
808 
809 	RCU_INIT_POINTER(vrf->rt6, NULL);
810 	synchronize_rcu();
811 
812 	/* move dev in dst's to loopback so this VRF device can be deleted
813 	 * - based on dst_ifdown
814 	 */
815 	if (rt6) {
816 		dst = &rt6->dst;
817 		netdev_ref_replace(dst->dev, net->loopback_dev,
818 				   &dst->dev_tracker, GFP_KERNEL);
819 		dst->dev = net->loopback_dev;
820 		dst_release(dst);
821 	}
822 }
823 
824 static int vrf_rt6_create(struct net_device *dev)
825 {
826 	int flags = DST_NOPOLICY | DST_NOXFRM;
827 	struct net_vrf *vrf = netdev_priv(dev);
828 	struct net *net = dev_net(dev);
829 	struct rt6_info *rt6;
830 	int rc = -ENOMEM;
831 
832 	/* IPv6 can be CONFIG enabled and then disabled runtime */
833 	if (!ipv6_mod_enabled())
834 		return 0;
835 
836 	vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
837 	if (!vrf->fib6_table)
838 		goto out;
839 
840 	/* create a dst for routing packets out a VRF device */
841 	rt6 = ip6_dst_alloc(net, dev, flags);
842 	if (!rt6)
843 		goto out;
844 
845 	rt6->dst.output	= vrf_output6;
846 
847 	rcu_assign_pointer(vrf->rt6, rt6);
848 
849 	rc = 0;
850 out:
851 	return rc;
852 }
853 #else
854 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
855 				   struct sock *sk,
856 				   struct sk_buff *skb)
857 {
858 	return skb;
859 }
860 
861 static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
862 {
863 }
864 
865 static int vrf_rt6_create(struct net_device *dev)
866 {
867 	return 0;
868 }
869 #endif
870 
871 /* modelled after ip_finish_output2 */
872 static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
873 {
874 	struct dst_entry *dst = skb_dst(skb);
875 	struct rtable *rt = (struct rtable *)dst;
876 	struct net_device *dev = dst->dev;
877 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
878 	struct neighbour *neigh;
879 	bool is_v6gw = false;
880 
881 	vrf_nf_reset_ct(skb);
882 
883 	/* Be paranoid, rather than too clever. */
884 	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
885 		skb = skb_expand_head(skb, hh_len);
886 		if (!skb) {
887 			dev->stats.tx_errors++;
888 			return -ENOMEM;
889 		}
890 	}
891 
892 	rcu_read_lock();
893 
894 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
895 	if (!IS_ERR(neigh)) {
896 		int ret;
897 
898 		sock_confirm_neigh(skb, neigh);
899 		/* if crossing protocols, can not use the cached header */
900 		ret = neigh_output(neigh, skb, is_v6gw);
901 		rcu_read_unlock();
902 		return ret;
903 	}
904 
905 	rcu_read_unlock();
906 	vrf_tx_error(skb->dev, skb);
907 	return -EINVAL;
908 }
909 
910 static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
911 {
912 	struct net_device *dev = skb_dst(skb)->dev;
913 
914 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
915 
916 	skb->dev = dev;
917 	skb->protocol = htons(ETH_P_IP);
918 
919 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
920 			    net, sk, skb, NULL, dev,
921 			    vrf_finish_output,
922 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
923 }
924 
925 /* set dst on skb to send packet to us via dev_xmit path. Allows
926  * packet to go through device based features such as qdisc, netfilter
927  * hooks and packet sockets with skb->dev set to vrf device.
928  */
929 static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
930 					   struct sk_buff *skb)
931 {
932 	struct net_vrf *vrf = netdev_priv(vrf_dev);
933 	struct dst_entry *dst = NULL;
934 	struct rtable *rth;
935 
936 	rcu_read_lock();
937 
938 	rth = rcu_dereference(vrf->rth);
939 	if (likely(rth)) {
940 		dst = &rth->dst;
941 		dst_hold(dst);
942 	}
943 
944 	rcu_read_unlock();
945 
946 	if (unlikely(!dst)) {
947 		vrf_tx_error(vrf_dev, skb);
948 		return NULL;
949 	}
950 
951 	skb_dst_drop(skb);
952 	skb_dst_set(skb, dst);
953 
954 	return skb;
955 }
956 
957 static int vrf_output_direct_finish(struct net *net, struct sock *sk,
958 				    struct sk_buff *skb)
959 {
960 	vrf_finish_direct(skb);
961 
962 	return vrf_ip_local_out(net, sk, skb);
963 }
964 
965 static int vrf_output_direct(struct net *net, struct sock *sk,
966 			     struct sk_buff *skb)
967 {
968 	int err = 1;
969 
970 	skb->protocol = htons(ETH_P_IP);
971 
972 	if (!(IPCB(skb)->flags & IPSKB_REROUTED))
973 		err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
974 			      NULL, skb->dev, vrf_output_direct_finish);
975 
976 	if (likely(err == 1))
977 		vrf_finish_direct(skb);
978 
979 	return err;
980 }
981 
982 static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
983 				    struct sk_buff *skb)
984 {
985 	int err;
986 
987 	err = vrf_output_direct(net, sk, skb);
988 	if (likely(err == 1))
989 		err = vrf_ip_local_out(net, sk, skb);
990 
991 	return err;
992 }
993 
994 static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
995 					 struct sock *sk,
996 					 struct sk_buff *skb)
997 {
998 	struct net *net = dev_net(vrf_dev);
999 	int err;
1000 
1001 	skb->dev = vrf_dev;
1002 
1003 	err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
1004 		      skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
1005 
1006 	if (likely(err == 1))
1007 		err = vrf_output_direct(net, sk, skb);
1008 
1009 	if (likely(err == 1))
1010 		return skb;
1011 
1012 	return NULL;
1013 }
1014 
1015 static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
1016 				  struct sock *sk,
1017 				  struct sk_buff *skb)
1018 {
1019 	/* don't divert multicast or local broadcast */
1020 	if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
1021 	    ipv4_is_lbcast(ip_hdr(skb)->daddr))
1022 		return skb;
1023 
1024 	vrf_nf_set_untracked(skb);
1025 
1026 	if (qdisc_tx_is_default(vrf_dev) ||
1027 	    IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
1028 		return vrf_ip_out_direct(vrf_dev, sk, skb);
1029 
1030 	return vrf_ip_out_redirect(vrf_dev, skb);
1031 }
1032 
1033 /* called with rcu lock held */
1034 static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
1035 				  struct sock *sk,
1036 				  struct sk_buff *skb,
1037 				  u16 proto)
1038 {
1039 	switch (proto) {
1040 	case AF_INET:
1041 		return vrf_ip_out(vrf_dev, sk, skb);
1042 	case AF_INET6:
1043 		return vrf_ip6_out(vrf_dev, sk, skb);
1044 	}
1045 
1046 	return skb;
1047 }
1048 
1049 /* holding rtnl */
1050 static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
1051 {
1052 	struct rtable *rth = rtnl_dereference(vrf->rth);
1053 	struct net *net = dev_net(dev);
1054 	struct dst_entry *dst;
1055 
1056 	RCU_INIT_POINTER(vrf->rth, NULL);
1057 	synchronize_rcu();
1058 
1059 	/* move dev in dst's to loopback so this VRF device can be deleted
1060 	 * - based on dst_ifdown
1061 	 */
1062 	if (rth) {
1063 		dst = &rth->dst;
1064 		netdev_ref_replace(dst->dev, net->loopback_dev,
1065 				   &dst->dev_tracker, GFP_KERNEL);
1066 		dst->dev = net->loopback_dev;
1067 		dst_release(dst);
1068 	}
1069 }
1070 
1071 static int vrf_rtable_create(struct net_device *dev)
1072 {
1073 	struct net_vrf *vrf = netdev_priv(dev);
1074 	struct rtable *rth;
1075 
1076 	if (!fib_new_table(dev_net(dev), vrf->tb_id))
1077 		return -ENOMEM;
1078 
1079 	/* create a dst for routing packets out through a VRF device */
1080 	rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1);
1081 	if (!rth)
1082 		return -ENOMEM;
1083 
1084 	rth->dst.output	= vrf_output;
1085 
1086 	rcu_assign_pointer(vrf->rth, rth);
1087 
1088 	return 0;
1089 }
1090 
1091 /**************************** device handling ********************/
1092 
1093 /* cycle interface to flush neighbor cache and move routes across tables */
1094 static void cycle_netdev(struct net_device *dev,
1095 			 struct netlink_ext_ack *extack)
1096 {
1097 	unsigned int flags = dev->flags;
1098 	int ret;
1099 
1100 	if (!netif_running(dev))
1101 		return;
1102 
1103 	ret = dev_change_flags(dev, flags & ~IFF_UP, extack);
1104 	if (ret >= 0)
1105 		ret = dev_change_flags(dev, flags, extack);
1106 
1107 	if (ret < 0) {
1108 		netdev_err(dev,
1109 			   "Failed to cycle device %s; route tables might be wrong!\n",
1110 			   dev->name);
1111 	}
1112 }
1113 
1114 static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1115 			    struct netlink_ext_ack *extack)
1116 {
1117 	int ret;
1118 
1119 	/* do not allow loopback device to be enslaved to a VRF.
1120 	 * The vrf device acts as the loopback for the vrf.
1121 	 */
1122 	if (port_dev == dev_net(dev)->loopback_dev) {
1123 		NL_SET_ERR_MSG(extack,
1124 			       "Can not enslave loopback device to a VRF");
1125 		return -EOPNOTSUPP;
1126 	}
1127 
1128 	port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
1129 	ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
1130 	if (ret < 0)
1131 		goto err;
1132 
1133 	cycle_netdev(port_dev, extack);
1134 
1135 	return 0;
1136 
1137 err:
1138 	port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
1139 	return ret;
1140 }
1141 
1142 static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
1143 			 struct netlink_ext_ack *extack)
1144 {
1145 	if (netif_is_l3_master(port_dev)) {
1146 		NL_SET_ERR_MSG(extack,
1147 			       "Can not enslave an L3 master device to a VRF");
1148 		return -EINVAL;
1149 	}
1150 
1151 	if (netif_is_l3_slave(port_dev))
1152 		return -EINVAL;
1153 
1154 	return do_vrf_add_slave(dev, port_dev, extack);
1155 }
1156 
1157 /* inverse of do_vrf_add_slave */
1158 static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1159 {
1160 	netdev_upper_dev_unlink(port_dev, dev);
1161 	port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
1162 
1163 	cycle_netdev(port_dev, NULL);
1164 
1165 	return 0;
1166 }
1167 
1168 static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
1169 {
1170 	return do_vrf_del_slave(dev, port_dev);
1171 }
1172 
1173 static void vrf_dev_uninit(struct net_device *dev)
1174 {
1175 	struct net_vrf *vrf = netdev_priv(dev);
1176 
1177 	vrf_rtable_release(dev, vrf);
1178 	vrf_rt6_release(dev, vrf);
1179 
1180 	free_percpu(dev->dstats);
1181 	dev->dstats = NULL;
1182 }
1183 
1184 static int vrf_dev_init(struct net_device *dev)
1185 {
1186 	struct net_vrf *vrf = netdev_priv(dev);
1187 
1188 	dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
1189 	if (!dev->dstats)
1190 		goto out_nomem;
1191 
1192 	/* create the default dst which points back to us */
1193 	if (vrf_rtable_create(dev) != 0)
1194 		goto out_stats;
1195 
1196 	if (vrf_rt6_create(dev) != 0)
1197 		goto out_rth;
1198 
1199 	dev->flags = IFF_MASTER | IFF_NOARP;
1200 
1201 	/* similarly, oper state is irrelevant; set to up to avoid confusion */
1202 	dev->operstate = IF_OPER_UP;
1203 	netdev_lockdep_set_classes(dev);
1204 	return 0;
1205 
1206 out_rth:
1207 	vrf_rtable_release(dev, vrf);
1208 out_stats:
1209 	free_percpu(dev->dstats);
1210 	dev->dstats = NULL;
1211 out_nomem:
1212 	return -ENOMEM;
1213 }
1214 
1215 static const struct net_device_ops vrf_netdev_ops = {
1216 	.ndo_init		= vrf_dev_init,
1217 	.ndo_uninit		= vrf_dev_uninit,
1218 	.ndo_start_xmit		= vrf_xmit,
1219 	.ndo_set_mac_address	= eth_mac_addr,
1220 	.ndo_get_stats64	= vrf_get_stats64,
1221 	.ndo_add_slave		= vrf_add_slave,
1222 	.ndo_del_slave		= vrf_del_slave,
1223 };
1224 
1225 static u32 vrf_fib_table(const struct net_device *dev)
1226 {
1227 	struct net_vrf *vrf = netdev_priv(dev);
1228 
1229 	return vrf->tb_id;
1230 }
1231 
1232 static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1233 {
1234 	kfree_skb(skb);
1235 	return 0;
1236 }
1237 
1238 static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
1239 				      struct sk_buff *skb,
1240 				      struct net_device *dev)
1241 {
1242 	struct net *net = dev_net(dev);
1243 
1244 	if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
1245 		skb = NULL;    /* kfree_skb(skb) handled by nf code */
1246 
1247 	return skb;
1248 }
1249 
1250 static int vrf_prepare_mac_header(struct sk_buff *skb,
1251 				  struct net_device *vrf_dev, u16 proto)
1252 {
1253 	struct ethhdr *eth;
1254 	int err;
1255 
1256 	/* in general, we do not know if there is enough space in the head of
1257 	 * the packet for hosting the mac header.
1258 	 */
1259 	err = skb_cow_head(skb, LL_RESERVED_SPACE(vrf_dev));
1260 	if (unlikely(err))
1261 		/* no space in the skb head */
1262 		return -ENOBUFS;
1263 
1264 	__skb_push(skb, ETH_HLEN);
1265 	eth = (struct ethhdr *)skb->data;
1266 
1267 	skb_reset_mac_header(skb);
1268 	skb_reset_mac_len(skb);
1269 
1270 	/* we set the ethernet destination and the source addresses to the
1271 	 * address of the VRF device.
1272 	 */
1273 	ether_addr_copy(eth->h_dest, vrf_dev->dev_addr);
1274 	ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
1275 	eth->h_proto = htons(proto);
1276 
1277 	/* the destination address of the Ethernet frame corresponds to the
1278 	 * address set on the VRF interface; therefore, the packet is intended
1279 	 * to be processed locally.
1280 	 */
1281 	skb->protocol = eth->h_proto;
1282 	skb->pkt_type = PACKET_HOST;
1283 
1284 	skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
1285 
1286 	skb_pull_inline(skb, ETH_HLEN);
1287 
1288 	return 0;
1289 }
1290 
1291 /* prepare and add the mac header to the packet if it was not set previously.
1292  * In this way, packet sniffers such as tcpdump can parse the packet correctly.
1293  * If the mac header was already set, the original mac header is left
1294  * untouched and the function returns immediately.
1295  */
1296 static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
1297 				       struct net_device *vrf_dev,
1298 				       u16 proto, struct net_device *orig_dev)
1299 {
1300 	if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
1301 		return 0;
1302 
1303 	return vrf_prepare_mac_header(skb, vrf_dev, proto);
1304 }
1305 
1306 #if IS_ENABLED(CONFIG_IPV6)
1307 /* neighbor handling is done with actual device; do not want
1308  * to flip skb->dev for those ndisc packets. This really fails
1309  * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
1310  * a start.
1311  */
1312 static bool ipv6_ndisc_frame(const struct sk_buff *skb)
1313 {
1314 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1315 	bool rc = false;
1316 
1317 	if (iph->nexthdr == NEXTHDR_ICMP) {
1318 		const struct icmp6hdr *icmph;
1319 		struct icmp6hdr _icmph;
1320 
1321 		icmph = skb_header_pointer(skb, sizeof(*iph),
1322 					   sizeof(_icmph), &_icmph);
1323 		if (!icmph)
1324 			goto out;
1325 
1326 		switch (icmph->icmp6_type) {
1327 		case NDISC_ROUTER_SOLICITATION:
1328 		case NDISC_ROUTER_ADVERTISEMENT:
1329 		case NDISC_NEIGHBOUR_SOLICITATION:
1330 		case NDISC_NEIGHBOUR_ADVERTISEMENT:
1331 		case NDISC_REDIRECT:
1332 			rc = true;
1333 			break;
1334 		}
1335 	}
1336 
1337 out:
1338 	return rc;
1339 }
1340 
1341 static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
1342 					     const struct net_device *dev,
1343 					     struct flowi6 *fl6,
1344 					     int ifindex,
1345 					     const struct sk_buff *skb,
1346 					     int flags)
1347 {
1348 	struct net_vrf *vrf = netdev_priv(dev);
1349 
1350 	return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
1351 }
1352 
1353 static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
1354 			      int ifindex)
1355 {
1356 	const struct ipv6hdr *iph = ipv6_hdr(skb);
1357 	struct flowi6 fl6 = {
1358 		.flowi6_iif     = ifindex,
1359 		.flowi6_mark    = skb->mark,
1360 		.flowi6_proto   = iph->nexthdr,
1361 		.daddr          = iph->daddr,
1362 		.saddr          = iph->saddr,
1363 		.flowlabel      = ip6_flowinfo(iph),
1364 	};
1365 	struct net *net = dev_net(vrf_dev);
1366 	struct rt6_info *rt6;
1367 
1368 	rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
1369 				   RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
1370 	if (unlikely(!rt6))
1371 		return;
1372 
1373 	if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
1374 		return;
1375 
1376 	skb_dst_set(skb, &rt6->dst);
1377 }
1378 
1379 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1380 				   struct sk_buff *skb)
1381 {
1382 	int orig_iif = skb->skb_iif;
1383 	bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1384 	bool is_ndisc = ipv6_ndisc_frame(skb);
1385 
1386 	/* loopback, multicast & non-ND link-local traffic; do not push through
1387 	 * packet taps again. Reset pkt_type for upper layers to process skb.
1388 	 * For non-loopback strict packets, determine the dst using the original
1389 	 * ifindex.
1390 	 */
1391 	if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
1392 		skb->dev = vrf_dev;
1393 		skb->skb_iif = vrf_dev->ifindex;
1394 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1395 
1396 		if (skb->pkt_type == PACKET_LOOPBACK)
1397 			skb->pkt_type = PACKET_HOST;
1398 		else
1399 			vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1400 
1401 		goto out;
1402 	}
1403 
1404 	/* if packet is NDISC then keep the ingress interface */
1405 	if (!is_ndisc) {
1406 		struct net_device *orig_dev = skb->dev;
1407 
1408 		vrf_rx_stats(vrf_dev, skb->len);
1409 		skb->dev = vrf_dev;
1410 		skb->skb_iif = vrf_dev->ifindex;
1411 
1412 		if (!list_empty(&vrf_dev->ptype_all)) {
1413 			int err;
1414 
1415 			err = vrf_add_mac_header_if_unset(skb, vrf_dev,
1416 							  ETH_P_IPV6,
1417 							  orig_dev);
1418 			if (likely(!err)) {
1419 				skb_push(skb, skb->mac_len);
1420 				dev_queue_xmit_nit(skb, vrf_dev);
1421 				skb_pull(skb, skb->mac_len);
1422 			}
1423 		}
1424 
1425 		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1426 	}
1427 
1428 	if (need_strict)
1429 		vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1430 
1431 	skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
1432 out:
1433 	return skb;
1434 }
1435 
1436 #else
1437 static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1438 				   struct sk_buff *skb)
1439 {
1440 	return skb;
1441 }
1442 #endif
1443 
1444 static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1445 				  struct sk_buff *skb)
1446 {
1447 	struct net_device *orig_dev = skb->dev;
1448 
1449 	skb->dev = vrf_dev;
1450 	skb->skb_iif = vrf_dev->ifindex;
1451 	IPCB(skb)->flags |= IPSKB_L3SLAVE;
1452 
1453 	if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1454 		goto out;
1455 
1456 	/* loopback traffic; do not push through packet taps again.
1457 	 * Reset pkt_type for upper layers to process skb
1458 	 */
1459 	if (skb->pkt_type == PACKET_LOOPBACK) {
1460 		skb->pkt_type = PACKET_HOST;
1461 		goto out;
1462 	}
1463 
1464 	vrf_rx_stats(vrf_dev, skb->len);
1465 
1466 	if (!list_empty(&vrf_dev->ptype_all)) {
1467 		int err;
1468 
1469 		err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
1470 						  orig_dev);
1471 		if (likely(!err)) {
1472 			skb_push(skb, skb->mac_len);
1473 			dev_queue_xmit_nit(skb, vrf_dev);
1474 			skb_pull(skb, skb->mac_len);
1475 		}
1476 	}
1477 
1478 	skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
1479 out:
1480 	return skb;
1481 }
1482 
1483 /* called with rcu lock held */
1484 static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1485 				  struct sk_buff *skb,
1486 				  u16 proto)
1487 {
1488 	switch (proto) {
1489 	case AF_INET:
1490 		return vrf_ip_rcv(vrf_dev, skb);
1491 	case AF_INET6:
1492 		return vrf_ip6_rcv(vrf_dev, skb);
1493 	}
1494 
1495 	return skb;
1496 }
1497 
1498 #if IS_ENABLED(CONFIG_IPV6)
1499 /* send to link-local or multicast address via interface enslaved to
1500  * VRF device. Force lookup to VRF table without changing flow struct
1501  * Note: Caller to this function must hold rcu_read_lock() and no refcnt
1502  * is taken on the dst by this function.
1503  */
1504 static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1505 					      struct flowi6 *fl6)
1506 {
1507 	struct net *net = dev_net(dev);
1508 	int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_DST_NOREF;
1509 	struct dst_entry *dst = NULL;
1510 	struct rt6_info *rt;
1511 
1512 	/* VRF device does not have a link-local address and
1513 	 * sending packets to link-local or mcast addresses over
1514 	 * a VRF device does not make sense
1515 	 */
1516 	if (fl6->flowi6_oif == dev->ifindex) {
1517 		dst = &net->ipv6.ip6_null_entry->dst;
1518 		return dst;
1519 	}
1520 
1521 	if (!ipv6_addr_any(&fl6->saddr))
1522 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1523 
1524 	rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
1525 	if (rt)
1526 		dst = &rt->dst;
1527 
1528 	return dst;
1529 }
1530 #endif
1531 
1532 static const struct l3mdev_ops vrf_l3mdev_ops = {
1533 	.l3mdev_fib_table	= vrf_fib_table,
1534 	.l3mdev_l3_rcv		= vrf_l3_rcv,
1535 	.l3mdev_l3_out		= vrf_l3_out,
1536 #if IS_ENABLED(CONFIG_IPV6)
1537 	.l3mdev_link_scope_lookup = vrf_link_scope_lookup,
1538 #endif
1539 };
1540 
1541 static void vrf_get_drvinfo(struct net_device *dev,
1542 			    struct ethtool_drvinfo *info)
1543 {
1544 	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1545 	strscpy(info->version, DRV_VERSION, sizeof(info->version));
1546 }
1547 
1548 static const struct ethtool_ops vrf_ethtool_ops = {
1549 	.get_drvinfo	= vrf_get_drvinfo,
1550 };
1551 
1552 static inline size_t vrf_fib_rule_nl_size(void)
1553 {
1554 	size_t sz;
1555 
1556 	sz  = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1557 	sz += nla_total_size(sizeof(u8));	/* FRA_L3MDEV */
1558 	sz += nla_total_size(sizeof(u32));	/* FRA_PRIORITY */
1559 	sz += nla_total_size(sizeof(u8));       /* FRA_PROTOCOL */
1560 
1561 	return sz;
1562 }
1563 
1564 static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1565 {
1566 	struct fib_rule_hdr *frh;
1567 	struct nlmsghdr *nlh;
1568 	struct sk_buff *skb;
1569 	int err;
1570 
1571 	if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
1572 	    !ipv6_mod_enabled())
1573 		return 0;
1574 
1575 	skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1576 	if (!skb)
1577 		return -ENOMEM;
1578 
1579 	nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1580 	if (!nlh)
1581 		goto nla_put_failure;
1582 
1583 	/* rule only needs to appear once */
1584 	nlh->nlmsg_flags |= NLM_F_EXCL;
1585 
1586 	frh = nlmsg_data(nlh);
1587 	memset(frh, 0, sizeof(*frh));
1588 	frh->family = family;
1589 	frh->action = FR_ACT_TO_TBL;
1590 
1591 	if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
1592 		goto nla_put_failure;
1593 
1594 	if (nla_put_u8(skb, FRA_L3MDEV, 1))
1595 		goto nla_put_failure;
1596 
1597 	if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1598 		goto nla_put_failure;
1599 
1600 	nlmsg_end(skb, nlh);
1601 
1602 	/* fib_nl_{new,del}rule handling looks for net from skb->sk */
1603 	skb->sk = dev_net(dev)->rtnl;
1604 	if (add_it) {
1605 		err = fib_nl_newrule(skb, nlh, NULL);
1606 		if (err == -EEXIST)
1607 			err = 0;
1608 	} else {
1609 		err = fib_nl_delrule(skb, nlh, NULL);
1610 		if (err == -ENOENT)
1611 			err = 0;
1612 	}
1613 	nlmsg_free(skb);
1614 
1615 	return err;
1616 
1617 nla_put_failure:
1618 	nlmsg_free(skb);
1619 
1620 	return -EMSGSIZE;
1621 }
1622 
1623 static int vrf_add_fib_rules(const struct net_device *dev)
1624 {
1625 	int err;
1626 
1627 	err = vrf_fib_rule(dev, AF_INET,  true);
1628 	if (err < 0)
1629 		goto out_err;
1630 
1631 	err = vrf_fib_rule(dev, AF_INET6, true);
1632 	if (err < 0)
1633 		goto ipv6_err;
1634 
1635 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1636 	err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1637 	if (err < 0)
1638 		goto ipmr_err;
1639 #endif
1640 
1641 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1642 	err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
1643 	if (err < 0)
1644 		goto ip6mr_err;
1645 #endif
1646 
1647 	return 0;
1648 
1649 #if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1650 ip6mr_err:
1651 	vrf_fib_rule(dev, RTNL_FAMILY_IPMR,  false);
1652 #endif
1653 
1654 #if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1655 ipmr_err:
1656 	vrf_fib_rule(dev, AF_INET6,  false);
1657 #endif
1658 
1659 ipv6_err:
1660 	vrf_fib_rule(dev, AF_INET,  false);
1661 
1662 out_err:
1663 	netdev_err(dev, "Failed to add FIB rules.\n");
1664 	return err;
1665 }
1666 
1667 static void vrf_setup(struct net_device *dev)
1668 {
1669 	ether_setup(dev);
1670 
1671 	/* Initialize the device structure. */
1672 	dev->netdev_ops = &vrf_netdev_ops;
1673 	dev->l3mdev_ops = &vrf_l3mdev_ops;
1674 	dev->ethtool_ops = &vrf_ethtool_ops;
1675 	dev->needs_free_netdev = true;
1676 
1677 	/* Fill in device structure with ethernet-generic values. */
1678 	eth_hw_addr_random(dev);
1679 
1680 	/* don't acquire vrf device's netif_tx_lock when transmitting */
1681 	dev->features |= NETIF_F_LLTX;
1682 
1683 	/* don't allow vrf devices to change network namespaces. */
1684 	dev->features |= NETIF_F_NETNS_LOCAL;
1685 
1686 	/* does not make sense for a VLAN to be added to a vrf device */
1687 	dev->features   |= NETIF_F_VLAN_CHALLENGED;
1688 
1689 	/* enable offload features */
1690 	dev->features   |= NETIF_F_GSO_SOFTWARE;
1691 	dev->features   |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
1692 	dev->features   |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1693 
1694 	dev->hw_features = dev->features;
1695 	dev->hw_enc_features = dev->features;
1696 
1697 	/* default to no qdisc; user can add if desired */
1698 	dev->priv_flags |= IFF_NO_QUEUE;
1699 	dev->priv_flags |= IFF_NO_RX_HANDLER;
1700 	dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1701 
1702 	/* VRF devices do not care about MTU, but if the MTU is set
1703 	 * too low then the ipv4 and ipv6 protocols are disabled
1704 	 * which breaks networking.
1705 	 */
1706 	dev->min_mtu = IPV6_MIN_MTU;
1707 	dev->max_mtu = IP6_MAX_MTU;
1708 	dev->mtu = dev->max_mtu;
1709 }
1710 
1711 static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1712 			struct netlink_ext_ack *extack)
1713 {
1714 	if (tb[IFLA_ADDRESS]) {
1715 		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1716 			NL_SET_ERR_MSG(extack, "Invalid hardware address");
1717 			return -EINVAL;
1718 		}
1719 		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1720 			NL_SET_ERR_MSG(extack, "Invalid hardware address");
1721 			return -EADDRNOTAVAIL;
1722 		}
1723 	}
1724 	return 0;
1725 }
1726 
1727 static void vrf_dellink(struct net_device *dev, struct list_head *head)
1728 {
1729 	struct net_device *port_dev;
1730 	struct list_head *iter;
1731 
1732 	netdev_for_each_lower_dev(dev, port_dev, iter)
1733 		vrf_del_slave(dev, port_dev);
1734 
1735 	vrf_map_unregister_dev(dev);
1736 
1737 	unregister_netdevice_queue(dev, head);
1738 }
1739 
1740 static int vrf_newlink(struct net *src_net, struct net_device *dev,
1741 		       struct nlattr *tb[], struct nlattr *data[],
1742 		       struct netlink_ext_ack *extack)
1743 {
1744 	struct net_vrf *vrf = netdev_priv(dev);
1745 	struct netns_vrf *nn_vrf;
1746 	bool *add_fib_rules;
1747 	struct net *net;
1748 	int err;
1749 
1750 	if (!data || !data[IFLA_VRF_TABLE]) {
1751 		NL_SET_ERR_MSG(extack, "VRF table id is missing");
1752 		return -EINVAL;
1753 	}
1754 
1755 	vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
1756 	if (vrf->tb_id == RT_TABLE_UNSPEC) {
1757 		NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE],
1758 				    "Invalid VRF table id");
1759 		return -EINVAL;
1760 	}
1761 
1762 	dev->priv_flags |= IFF_L3MDEV_MASTER;
1763 
1764 	err = register_netdevice(dev);
1765 	if (err)
1766 		goto out;
1767 
1768 	/* mapping between table_id and vrf;
1769 	 * note: such binding could not be done in the dev init function
1770 	 * because dev->ifindex id is not available yet.
1771 	 */
1772 	vrf->ifindex = dev->ifindex;
1773 
1774 	err = vrf_map_register_dev(dev, extack);
1775 	if (err) {
1776 		unregister_netdevice(dev);
1777 		goto out;
1778 	}
1779 
1780 	net = dev_net(dev);
1781 	nn_vrf = net_generic(net, vrf_net_id);
1782 
1783 	add_fib_rules = &nn_vrf->add_fib_rules;
1784 	if (*add_fib_rules) {
1785 		err = vrf_add_fib_rules(dev);
1786 		if (err) {
1787 			vrf_map_unregister_dev(dev);
1788 			unregister_netdevice(dev);
1789 			goto out;
1790 		}
1791 		*add_fib_rules = false;
1792 	}
1793 
1794 out:
1795 	return err;
1796 }
1797 
1798 static size_t vrf_nl_getsize(const struct net_device *dev)
1799 {
1800 	return nla_total_size(sizeof(u32));  /* IFLA_VRF_TABLE */
1801 }
1802 
1803 static int vrf_fillinfo(struct sk_buff *skb,
1804 			const struct net_device *dev)
1805 {
1806 	struct net_vrf *vrf = netdev_priv(dev);
1807 
1808 	return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1809 }
1810 
1811 static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1812 				 const struct net_device *slave_dev)
1813 {
1814 	return nla_total_size(sizeof(u32));  /* IFLA_VRF_PORT_TABLE */
1815 }
1816 
1817 static int vrf_fill_slave_info(struct sk_buff *skb,
1818 			       const struct net_device *vrf_dev,
1819 			       const struct net_device *slave_dev)
1820 {
1821 	struct net_vrf *vrf = netdev_priv(vrf_dev);
1822 
1823 	if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1824 		return -EMSGSIZE;
1825 
1826 	return 0;
1827 }
1828 
1829 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1830 	[IFLA_VRF_TABLE] = { .type = NLA_U32 },
1831 };
1832 
1833 static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1834 	.kind		= DRV_NAME,
1835 	.priv_size	= sizeof(struct net_vrf),
1836 
1837 	.get_size	= vrf_nl_getsize,
1838 	.policy		= vrf_nl_policy,
1839 	.validate	= vrf_validate,
1840 	.fill_info	= vrf_fillinfo,
1841 
1842 	.get_slave_size  = vrf_get_slave_size,
1843 	.fill_slave_info = vrf_fill_slave_info,
1844 
1845 	.newlink	= vrf_newlink,
1846 	.dellink	= vrf_dellink,
1847 	.setup		= vrf_setup,
1848 	.maxtype	= IFLA_VRF_MAX,
1849 };
1850 
1851 static int vrf_device_event(struct notifier_block *unused,
1852 			    unsigned long event, void *ptr)
1853 {
1854 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1855 
1856 	/* only care about unregister events to drop slave references */
1857 	if (event == NETDEV_UNREGISTER) {
1858 		struct net_device *vrf_dev;
1859 
1860 		if (!netif_is_l3_slave(dev))
1861 			goto out;
1862 
1863 		vrf_dev = netdev_master_upper_dev_get(dev);
1864 		vrf_del_slave(vrf_dev, dev);
1865 	}
1866 out:
1867 	return NOTIFY_DONE;
1868 }
1869 
1870 static struct notifier_block vrf_notifier_block __read_mostly = {
1871 	.notifier_call = vrf_device_event,
1872 };
1873 
1874 static int vrf_map_init(struct vrf_map *vmap)
1875 {
1876 	spin_lock_init(&vmap->vmap_lock);
1877 	hash_init(vmap->ht);
1878 
1879 	vmap->strict_mode = false;
1880 
1881 	return 0;
1882 }
1883 
1884 #ifdef CONFIG_SYSCTL
1885 static bool vrf_strict_mode(struct vrf_map *vmap)
1886 {
1887 	bool strict_mode;
1888 
1889 	vrf_map_lock(vmap);
1890 	strict_mode = vmap->strict_mode;
1891 	vrf_map_unlock(vmap);
1892 
1893 	return strict_mode;
1894 }
1895 
1896 static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode)
1897 {
1898 	bool *cur_mode;
1899 	int res = 0;
1900 
1901 	vrf_map_lock(vmap);
1902 
1903 	cur_mode = &vmap->strict_mode;
1904 	if (*cur_mode == new_mode)
1905 		goto unlock;
1906 
1907 	if (*cur_mode) {
1908 		/* disable strict mode */
1909 		*cur_mode = false;
1910 	} else {
1911 		if (vmap->shared_tables) {
1912 			/* we cannot allow strict_mode because there are some
1913 			 * vrfs that share one or more tables.
1914 			 */
1915 			res = -EBUSY;
1916 			goto unlock;
1917 		}
1918 
1919 		/* no tables are shared among vrfs, so we can go back
1920 		 * to 1:1 association between a vrf with its table.
1921 		 */
1922 		*cur_mode = true;
1923 	}
1924 
1925 unlock:
1926 	vrf_map_unlock(vmap);
1927 
1928 	return res;
1929 }
1930 
1931 static int vrf_shared_table_handler(struct ctl_table *table, int write,
1932 				    void *buffer, size_t *lenp, loff_t *ppos)
1933 {
1934 	struct net *net = (struct net *)table->extra1;
1935 	struct vrf_map *vmap = netns_vrf_map(net);
1936 	int proc_strict_mode = 0;
1937 	struct ctl_table tmp = {
1938 		.procname	= table->procname,
1939 		.data		= &proc_strict_mode,
1940 		.maxlen		= sizeof(int),
1941 		.mode		= table->mode,
1942 		.extra1		= SYSCTL_ZERO,
1943 		.extra2		= SYSCTL_ONE,
1944 	};
1945 	int ret;
1946 
1947 	if (!write)
1948 		proc_strict_mode = vrf_strict_mode(vmap);
1949 
1950 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1951 
1952 	if (write && ret == 0)
1953 		ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode);
1954 
1955 	return ret;
1956 }
1957 
1958 static const struct ctl_table vrf_table[] = {
1959 	{
1960 		.procname	= "strict_mode",
1961 		.data		= NULL,
1962 		.maxlen		= sizeof(int),
1963 		.mode		= 0644,
1964 		.proc_handler	= vrf_shared_table_handler,
1965 		/* set by the vrf_netns_init */
1966 		.extra1		= NULL,
1967 	},
1968 	{ },
1969 };
1970 
1971 static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
1972 {
1973 	struct ctl_table *table;
1974 
1975 	table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL);
1976 	if (!table)
1977 		return -ENOMEM;
1978 
1979 	/* init the extra1 parameter with the reference to current netns */
1980 	table[0].extra1 = net;
1981 
1982 	nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table);
1983 	if (!nn_vrf->ctl_hdr) {
1984 		kfree(table);
1985 		return -ENOMEM;
1986 	}
1987 
1988 	return 0;
1989 }
1990 
1991 static void vrf_netns_exit_sysctl(struct net *net)
1992 {
1993 	struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
1994 	struct ctl_table *table;
1995 
1996 	table = nn_vrf->ctl_hdr->ctl_table_arg;
1997 	unregister_net_sysctl_table(nn_vrf->ctl_hdr);
1998 	kfree(table);
1999 }
2000 #else
2001 static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
2002 {
2003 	return 0;
2004 }
2005 
2006 static void vrf_netns_exit_sysctl(struct net *net)
2007 {
2008 }
2009 #endif
2010 
2011 /* Initialize per network namespace state */
2012 static int __net_init vrf_netns_init(struct net *net)
2013 {
2014 	struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
2015 
2016 	nn_vrf->add_fib_rules = true;
2017 	vrf_map_init(&nn_vrf->vmap);
2018 
2019 	return vrf_netns_init_sysctl(net, nn_vrf);
2020 }
2021 
2022 static void __net_exit vrf_netns_exit(struct net *net)
2023 {
2024 	vrf_netns_exit_sysctl(net);
2025 }
2026 
2027 static struct pernet_operations vrf_net_ops __net_initdata = {
2028 	.init = vrf_netns_init,
2029 	.exit = vrf_netns_exit,
2030 	.id   = &vrf_net_id,
2031 	.size = sizeof(struct netns_vrf),
2032 };
2033 
2034 static int __init vrf_init_module(void)
2035 {
2036 	int rc;
2037 
2038 	register_netdevice_notifier(&vrf_notifier_block);
2039 
2040 	rc = register_pernet_subsys(&vrf_net_ops);
2041 	if (rc < 0)
2042 		goto error;
2043 
2044 	rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF,
2045 					  vrf_ifindex_lookup_by_table_id);
2046 	if (rc < 0)
2047 		goto unreg_pernet;
2048 
2049 	rc = rtnl_link_register(&vrf_link_ops);
2050 	if (rc < 0)
2051 		goto table_lookup_unreg;
2052 
2053 	return 0;
2054 
2055 table_lookup_unreg:
2056 	l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF,
2057 				       vrf_ifindex_lookup_by_table_id);
2058 
2059 unreg_pernet:
2060 	unregister_pernet_subsys(&vrf_net_ops);
2061 
2062 error:
2063 	unregister_netdevice_notifier(&vrf_notifier_block);
2064 	return rc;
2065 }
2066 
2067 module_init(vrf_init_module);
2068 MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
2069 MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
2070 MODULE_LICENSE("GPL");
2071 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2072 MODULE_VERSION(DRV_VERSION);
2073