xref: /openbmc/linux/net/mpls/af_mpls.c (revision 232b0b08)
1 #include <linux/types.h>
2 #include <linux/skbuff.h>
3 #include <linux/socket.h>
4 #include <linux/sysctl.h>
5 #include <linux/net.h>
6 #include <linux/module.h>
7 #include <linux/if_arp.h>
8 #include <linux/ipv6.h>
9 #include <linux/mpls.h>
10 #include <linux/netconf.h>
11 #include <linux/vmalloc.h>
12 #include <linux/percpu.h>
13 #include <net/ip.h>
14 #include <net/dst.h>
15 #include <net/sock.h>
16 #include <net/arp.h>
17 #include <net/ip_fib.h>
18 #include <net/netevent.h>
19 #include <net/netns/generic.h>
20 #if IS_ENABLED(CONFIG_IPV6)
21 #include <net/ipv6.h>
22 #endif
23 #include <net/addrconf.h>
24 #include <net/nexthop.h>
25 #include "internal.h"
26 
27 /* Maximum number of labels to look ahead at when selecting a path of
28  * a multipath route
29  */
30 #define MAX_MP_SELECT_LABELS 4
31 
32 #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1)
33 
34 static int zero = 0;
35 static int label_limit = (1 << 20) - 1;
36 
37 static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
38 		       struct nlmsghdr *nlh, struct net *net, u32 portid,
39 		       unsigned int nlm_flags);
40 
41 static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
42 {
43 	struct mpls_route *rt = NULL;
44 
45 	if (index < net->mpls.platform_labels) {
46 		struct mpls_route __rcu **platform_label =
47 			rcu_dereference(net->mpls.platform_label);
48 		rt = rcu_dereference(platform_label[index]);
49 	}
50 	return rt;
51 }
52 
53 bool mpls_output_possible(const struct net_device *dev)
54 {
55 	return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
56 }
57 EXPORT_SYMBOL_GPL(mpls_output_possible);
58 
59 static u8 *__mpls_nh_via(struct mpls_route *rt, struct mpls_nh *nh)
60 {
61 	u8 *nh0_via = PTR_ALIGN((u8 *)&rt->rt_nh[rt->rt_nhn], VIA_ALEN_ALIGN);
62 	int nh_index = nh - rt->rt_nh;
63 
64 	return nh0_via + rt->rt_max_alen * nh_index;
65 }
66 
67 static const u8 *mpls_nh_via(const struct mpls_route *rt,
68 			     const struct mpls_nh *nh)
69 {
70 	return __mpls_nh_via((struct mpls_route *)rt, (struct mpls_nh *)nh);
71 }
72 
73 static unsigned int mpls_nh_header_size(const struct mpls_nh *nh)
74 {
75 	/* The size of the layer 2.5 labels to be added for this route */
76 	return nh->nh_labels * sizeof(struct mpls_shim_hdr);
77 }
78 
79 unsigned int mpls_dev_mtu(const struct net_device *dev)
80 {
81 	/* The amount of data the layer 2 frame can hold */
82 	return dev->mtu;
83 }
84 EXPORT_SYMBOL_GPL(mpls_dev_mtu);
85 
86 bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
87 {
88 	if (skb->len <= mtu)
89 		return false;
90 
91 	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
92 		return false;
93 
94 	return true;
95 }
96 EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
97 
98 void mpls_stats_inc_outucastpkts(struct net_device *dev,
99 				 const struct sk_buff *skb)
100 {
101 	struct mpls_dev *mdev;
102 
103 	if (skb->protocol == htons(ETH_P_MPLS_UC)) {
104 		mdev = mpls_dev_get(dev);
105 		if (mdev)
106 			MPLS_INC_STATS_LEN(mdev, skb->len,
107 					   tx_packets,
108 					   tx_bytes);
109 	} else if (skb->protocol == htons(ETH_P_IP)) {
110 		IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
111 #if IS_ENABLED(CONFIG_IPV6)
112 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
113 		struct inet6_dev *in6dev = __in6_dev_get(dev);
114 
115 		if (in6dev)
116 			IP6_UPD_PO_STATS(dev_net(dev), in6dev,
117 					 IPSTATS_MIB_OUT, skb->len);
118 #endif
119 	}
120 }
121 EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
122 
123 static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
124 {
125 	struct mpls_entry_decoded dec;
126 	unsigned int mpls_hdr_len = 0;
127 	struct mpls_shim_hdr *hdr;
128 	bool eli_seen = false;
129 	int label_index;
130 	u32 hash = 0;
131 
132 	for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
133 	     label_index++) {
134 		mpls_hdr_len += sizeof(*hdr);
135 		if (!pskb_may_pull(skb, mpls_hdr_len))
136 			break;
137 
138 		/* Read and decode the current label */
139 		hdr = mpls_hdr(skb) + label_index;
140 		dec = mpls_entry_decode(hdr);
141 
142 		/* RFC6790 - reserved labels MUST NOT be used as keys
143 		 * for the load-balancing function
144 		 */
145 		if (likely(dec.label >= MPLS_LABEL_FIRST_UNRESERVED)) {
146 			hash = jhash_1word(dec.label, hash);
147 
148 			/* The entropy label follows the entropy label
149 			 * indicator, so this means that the entropy
150 			 * label was just added to the hash - no need to
151 			 * go any deeper either in the label stack or in the
152 			 * payload
153 			 */
154 			if (eli_seen)
155 				break;
156 		} else if (dec.label == MPLS_LABEL_ENTROPY) {
157 			eli_seen = true;
158 		}
159 
160 		if (!dec.bos)
161 			continue;
162 
163 		/* found bottom label; does skb have room for a header? */
164 		if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
165 			const struct iphdr *v4hdr;
166 
167 			v4hdr = (const struct iphdr *)(hdr + 1);
168 			if (v4hdr->version == 4) {
169 				hash = jhash_3words(ntohl(v4hdr->saddr),
170 						    ntohl(v4hdr->daddr),
171 						    v4hdr->protocol, hash);
172 			} else if (v4hdr->version == 6 &&
173 				   pskb_may_pull(skb, mpls_hdr_len +
174 						 sizeof(struct ipv6hdr))) {
175 				const struct ipv6hdr *v6hdr;
176 
177 				v6hdr = (const struct ipv6hdr *)(hdr + 1);
178 				hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
179 				hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
180 				hash = jhash_1word(v6hdr->nexthdr, hash);
181 			}
182 		}
183 
184 		break;
185 	}
186 
187 	return hash;
188 }
189 
190 static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
191 					     struct sk_buff *skb)
192 {
193 	int alive = ACCESS_ONCE(rt->rt_nhn_alive);
194 	u32 hash = 0;
195 	int nh_index = 0;
196 	int n = 0;
197 
198 	/* No need to look further into packet if there's only
199 	 * one path
200 	 */
201 	if (rt->rt_nhn == 1)
202 		goto out;
203 
204 	if (alive <= 0)
205 		return NULL;
206 
207 	hash = mpls_multipath_hash(rt, skb);
208 	nh_index = hash % alive;
209 	if (alive == rt->rt_nhn)
210 		goto out;
211 	for_nexthops(rt) {
212 		if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
213 			continue;
214 		if (n == nh_index)
215 			return nh;
216 		n++;
217 	} endfor_nexthops(rt);
218 
219 out:
220 	return &rt->rt_nh[nh_index];
221 }
222 
223 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
224 			struct mpls_entry_decoded dec)
225 {
226 	enum mpls_payload_type payload_type;
227 	bool success = false;
228 
229 	/* The IPv4 code below accesses through the IPv4 header
230 	 * checksum, which is 12 bytes into the packet.
231 	 * The IPv6 code below accesses through the IPv6 hop limit
232 	 * which is 8 bytes into the packet.
233 	 *
234 	 * For all supported cases there should always be at least 12
235 	 * bytes of packet data present.  The IPv4 header is 20 bytes
236 	 * without options and the IPv6 header is always 40 bytes
237 	 * long.
238 	 */
239 	if (!pskb_may_pull(skb, 12))
240 		return false;
241 
242 	payload_type = rt->rt_payload_type;
243 	if (payload_type == MPT_UNSPEC)
244 		payload_type = ip_hdr(skb)->version;
245 
246 	switch (payload_type) {
247 	case MPT_IPV4: {
248 		struct iphdr *hdr4 = ip_hdr(skb);
249 		skb->protocol = htons(ETH_P_IP);
250 		csum_replace2(&hdr4->check,
251 			      htons(hdr4->ttl << 8),
252 			      htons(dec.ttl << 8));
253 		hdr4->ttl = dec.ttl;
254 		success = true;
255 		break;
256 	}
257 	case MPT_IPV6: {
258 		struct ipv6hdr *hdr6 = ipv6_hdr(skb);
259 		skb->protocol = htons(ETH_P_IPV6);
260 		hdr6->hop_limit = dec.ttl;
261 		success = true;
262 		break;
263 	}
264 	case MPT_UNSPEC:
265 		break;
266 	}
267 
268 	return success;
269 }
270 
271 static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
272 			struct packet_type *pt, struct net_device *orig_dev)
273 {
274 	struct net *net = dev_net(dev);
275 	struct mpls_shim_hdr *hdr;
276 	struct mpls_route *rt;
277 	struct mpls_nh *nh;
278 	struct mpls_entry_decoded dec;
279 	struct net_device *out_dev;
280 	struct mpls_dev *out_mdev;
281 	struct mpls_dev *mdev;
282 	unsigned int hh_len;
283 	unsigned int new_header_size;
284 	unsigned int mtu;
285 	int err;
286 
287 	/* Careful this entire function runs inside of an rcu critical section */
288 
289 	mdev = mpls_dev_get(dev);
290 	if (!mdev)
291 		goto drop;
292 
293 	MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
294 			   rx_bytes);
295 
296 	if (!mdev->input_enabled) {
297 		MPLS_INC_STATS(mdev, rx_dropped);
298 		goto drop;
299 	}
300 
301 	if (skb->pkt_type != PACKET_HOST)
302 		goto err;
303 
304 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
305 		goto err;
306 
307 	if (!pskb_may_pull(skb, sizeof(*hdr)))
308 		goto err;
309 
310 	/* Read and decode the label */
311 	hdr = mpls_hdr(skb);
312 	dec = mpls_entry_decode(hdr);
313 
314 	rt = mpls_route_input_rcu(net, dec.label);
315 	if (!rt) {
316 		MPLS_INC_STATS(mdev, rx_noroute);
317 		goto drop;
318 	}
319 
320 	nh = mpls_select_multipath(rt, skb);
321 	if (!nh)
322 		goto err;
323 
324 	/* Pop the label */
325 	skb_pull(skb, sizeof(*hdr));
326 	skb_reset_network_header(skb);
327 
328 	skb_orphan(skb);
329 
330 	if (skb_warn_if_lro(skb))
331 		goto err;
332 
333 	skb_forward_csum(skb);
334 
335 	/* Verify ttl is valid */
336 	if (dec.ttl <= 1)
337 		goto err;
338 	dec.ttl -= 1;
339 
340 	/* Find the output device */
341 	out_dev = rcu_dereference(nh->nh_dev);
342 	if (!mpls_output_possible(out_dev))
343 		goto tx_err;
344 
345 	/* Verify the destination can hold the packet */
346 	new_header_size = mpls_nh_header_size(nh);
347 	mtu = mpls_dev_mtu(out_dev);
348 	if (mpls_pkt_too_big(skb, mtu - new_header_size))
349 		goto tx_err;
350 
351 	hh_len = LL_RESERVED_SPACE(out_dev);
352 	if (!out_dev->header_ops)
353 		hh_len = 0;
354 
355 	/* Ensure there is enough space for the headers in the skb */
356 	if (skb_cow(skb, hh_len + new_header_size))
357 		goto tx_err;
358 
359 	skb->dev = out_dev;
360 	skb->protocol = htons(ETH_P_MPLS_UC);
361 
362 	if (unlikely(!new_header_size && dec.bos)) {
363 		/* Penultimate hop popping */
364 		if (!mpls_egress(rt, skb, dec))
365 			goto err;
366 	} else {
367 		bool bos;
368 		int i;
369 		skb_push(skb, new_header_size);
370 		skb_reset_network_header(skb);
371 		/* Push the new labels */
372 		hdr = mpls_hdr(skb);
373 		bos = dec.bos;
374 		for (i = nh->nh_labels - 1; i >= 0; i--) {
375 			hdr[i] = mpls_entry_encode(nh->nh_label[i],
376 						   dec.ttl, 0, bos);
377 			bos = false;
378 		}
379 	}
380 
381 	mpls_stats_inc_outucastpkts(out_dev, skb);
382 
383 	/* If via wasn't specified then send out using device address */
384 	if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
385 		err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
386 				 out_dev->dev_addr, skb);
387 	else
388 		err = neigh_xmit(nh->nh_via_table, out_dev,
389 				 mpls_nh_via(rt, nh), skb);
390 	if (err)
391 		net_dbg_ratelimited("%s: packet transmission failed: %d\n",
392 				    __func__, err);
393 	return 0;
394 
395 tx_err:
396 	out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
397 	if (out_mdev)
398 		MPLS_INC_STATS(out_mdev, tx_errors);
399 	goto drop;
400 err:
401 	MPLS_INC_STATS(mdev, rx_errors);
402 drop:
403 	kfree_skb(skb);
404 	return NET_RX_DROP;
405 }
406 
407 static struct packet_type mpls_packet_type __read_mostly = {
408 	.type = cpu_to_be16(ETH_P_MPLS_UC),
409 	.func = mpls_forward,
410 };
411 
412 static const struct nla_policy rtm_mpls_policy[RTA_MAX+1] = {
413 	[RTA_DST]		= { .type = NLA_U32 },
414 	[RTA_OIF]		= { .type = NLA_U32 },
415 };
416 
417 struct mpls_route_config {
418 	u32			rc_protocol;
419 	u32			rc_ifindex;
420 	u8			rc_via_table;
421 	u8			rc_via_alen;
422 	u8			rc_via[MAX_VIA_ALEN];
423 	u32			rc_label;
424 	u8			rc_output_labels;
425 	u32			rc_output_label[MAX_NEW_LABELS];
426 	u32			rc_nlflags;
427 	enum mpls_payload_type	rc_payload_type;
428 	struct nl_info		rc_nlinfo;
429 	struct rtnexthop	*rc_mp;
430 	int			rc_mp_len;
431 };
432 
433 static struct mpls_route *mpls_rt_alloc(int num_nh, u8 max_alen)
434 {
435 	u8 max_alen_aligned = ALIGN(max_alen, VIA_ALEN_ALIGN);
436 	struct mpls_route *rt;
437 
438 	rt = kzalloc(ALIGN(sizeof(*rt) + num_nh * sizeof(*rt->rt_nh),
439 			   VIA_ALEN_ALIGN) +
440 		     num_nh * max_alen_aligned,
441 		     GFP_KERNEL);
442 	if (rt) {
443 		rt->rt_nhn = num_nh;
444 		rt->rt_nhn_alive = num_nh;
445 		rt->rt_max_alen = max_alen_aligned;
446 	}
447 
448 	return rt;
449 }
450 
451 static void mpls_rt_free(struct mpls_route *rt)
452 {
453 	if (rt)
454 		kfree_rcu(rt, rt_rcu);
455 }
456 
457 static void mpls_notify_route(struct net *net, unsigned index,
458 			      struct mpls_route *old, struct mpls_route *new,
459 			      const struct nl_info *info)
460 {
461 	struct nlmsghdr *nlh = info ? info->nlh : NULL;
462 	unsigned portid = info ? info->portid : 0;
463 	int event = new ? RTM_NEWROUTE : RTM_DELROUTE;
464 	struct mpls_route *rt = new ? new : old;
465 	unsigned nlm_flags = (old && new) ? NLM_F_REPLACE : 0;
466 	/* Ignore reserved labels for now */
467 	if (rt && (index >= MPLS_LABEL_FIRST_UNRESERVED))
468 		rtmsg_lfib(event, index, rt, nlh, net, portid, nlm_flags);
469 }
470 
471 static void mpls_route_update(struct net *net, unsigned index,
472 			      struct mpls_route *new,
473 			      const struct nl_info *info)
474 {
475 	struct mpls_route __rcu **platform_label;
476 	struct mpls_route *rt;
477 
478 	ASSERT_RTNL();
479 
480 	platform_label = rtnl_dereference(net->mpls.platform_label);
481 	rt = rtnl_dereference(platform_label[index]);
482 	rcu_assign_pointer(platform_label[index], new);
483 
484 	mpls_notify_route(net, index, rt, new, info);
485 
486 	/* If we removed a route free it now */
487 	mpls_rt_free(rt);
488 }
489 
490 static unsigned find_free_label(struct net *net)
491 {
492 	struct mpls_route __rcu **platform_label;
493 	size_t platform_labels;
494 	unsigned index;
495 
496 	platform_label = rtnl_dereference(net->mpls.platform_label);
497 	platform_labels = net->mpls.platform_labels;
498 	for (index = MPLS_LABEL_FIRST_UNRESERVED; index < platform_labels;
499 	     index++) {
500 		if (!rtnl_dereference(platform_label[index]))
501 			return index;
502 	}
503 	return LABEL_NOT_SPECIFIED;
504 }
505 
506 #if IS_ENABLED(CONFIG_INET)
507 static struct net_device *inet_fib_lookup_dev(struct net *net,
508 					      const void *addr)
509 {
510 	struct net_device *dev;
511 	struct rtable *rt;
512 	struct in_addr daddr;
513 
514 	memcpy(&daddr, addr, sizeof(struct in_addr));
515 	rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
516 	if (IS_ERR(rt))
517 		return ERR_CAST(rt);
518 
519 	dev = rt->dst.dev;
520 	dev_hold(dev);
521 
522 	ip_rt_put(rt);
523 
524 	return dev;
525 }
526 #else
527 static struct net_device *inet_fib_lookup_dev(struct net *net,
528 					      const void *addr)
529 {
530 	return ERR_PTR(-EAFNOSUPPORT);
531 }
532 #endif
533 
534 #if IS_ENABLED(CONFIG_IPV6)
535 static struct net_device *inet6_fib_lookup_dev(struct net *net,
536 					       const void *addr)
537 {
538 	struct net_device *dev;
539 	struct dst_entry *dst;
540 	struct flowi6 fl6;
541 	int err;
542 
543 	if (!ipv6_stub)
544 		return ERR_PTR(-EAFNOSUPPORT);
545 
546 	memset(&fl6, 0, sizeof(fl6));
547 	memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
548 	err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
549 	if (err)
550 		return ERR_PTR(err);
551 
552 	dev = dst->dev;
553 	dev_hold(dev);
554 	dst_release(dst);
555 
556 	return dev;
557 }
558 #else
559 static struct net_device *inet6_fib_lookup_dev(struct net *net,
560 					       const void *addr)
561 {
562 	return ERR_PTR(-EAFNOSUPPORT);
563 }
564 #endif
565 
566 static struct net_device *find_outdev(struct net *net,
567 				      struct mpls_route *rt,
568 				      struct mpls_nh *nh, int oif)
569 {
570 	struct net_device *dev = NULL;
571 
572 	if (!oif) {
573 		switch (nh->nh_via_table) {
574 		case NEIGH_ARP_TABLE:
575 			dev = inet_fib_lookup_dev(net, mpls_nh_via(rt, nh));
576 			break;
577 		case NEIGH_ND_TABLE:
578 			dev = inet6_fib_lookup_dev(net, mpls_nh_via(rt, nh));
579 			break;
580 		case NEIGH_LINK_TABLE:
581 			break;
582 		}
583 	} else {
584 		dev = dev_get_by_index(net, oif);
585 	}
586 
587 	if (!dev)
588 		return ERR_PTR(-ENODEV);
589 
590 	if (IS_ERR(dev))
591 		return dev;
592 
593 	/* The caller is holding rtnl anyways, so release the dev reference */
594 	dev_put(dev);
595 
596 	return dev;
597 }
598 
599 static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
600 			      struct mpls_nh *nh, int oif)
601 {
602 	struct net_device *dev = NULL;
603 	int err = -ENODEV;
604 
605 	dev = find_outdev(net, rt, nh, oif);
606 	if (IS_ERR(dev)) {
607 		err = PTR_ERR(dev);
608 		dev = NULL;
609 		goto errout;
610 	}
611 
612 	/* Ensure this is a supported device */
613 	err = -EINVAL;
614 	if (!mpls_dev_get(dev))
615 		goto errout;
616 
617 	if ((nh->nh_via_table == NEIGH_LINK_TABLE) &&
618 	    (dev->addr_len != nh->nh_via_alen))
619 		goto errout;
620 
621 	RCU_INIT_POINTER(nh->nh_dev, dev);
622 
623 	if (!(dev->flags & IFF_UP)) {
624 		nh->nh_flags |= RTNH_F_DEAD;
625 	} else {
626 		unsigned int flags;
627 
628 		flags = dev_get_flags(dev);
629 		if (!(flags & (IFF_RUNNING | IFF_LOWER_UP)))
630 			nh->nh_flags |= RTNH_F_LINKDOWN;
631 	}
632 
633 	return 0;
634 
635 errout:
636 	return err;
637 }
638 
639 static int mpls_nh_build_from_cfg(struct mpls_route_config *cfg,
640 				  struct mpls_route *rt)
641 {
642 	struct net *net = cfg->rc_nlinfo.nl_net;
643 	struct mpls_nh *nh = rt->rt_nh;
644 	int err;
645 	int i;
646 
647 	if (!nh)
648 		return -ENOMEM;
649 
650 	err = -EINVAL;
651 	/* Ensure only a supported number of labels are present */
652 	if (cfg->rc_output_labels > MAX_NEW_LABELS)
653 		goto errout;
654 
655 	nh->nh_labels = cfg->rc_output_labels;
656 	for (i = 0; i < nh->nh_labels; i++)
657 		nh->nh_label[i] = cfg->rc_output_label[i];
658 
659 	nh->nh_via_table = cfg->rc_via_table;
660 	memcpy(__mpls_nh_via(rt, nh), cfg->rc_via, cfg->rc_via_alen);
661 	nh->nh_via_alen = cfg->rc_via_alen;
662 
663 	err = mpls_nh_assign_dev(net, rt, nh, cfg->rc_ifindex);
664 	if (err)
665 		goto errout;
666 
667 	if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
668 		rt->rt_nhn_alive--;
669 
670 	return 0;
671 
672 errout:
673 	return err;
674 }
675 
676 static int mpls_nh_build(struct net *net, struct mpls_route *rt,
677 			 struct mpls_nh *nh, int oif, struct nlattr *via,
678 			 struct nlattr *newdst)
679 {
680 	int err = -ENOMEM;
681 
682 	if (!nh)
683 		goto errout;
684 
685 	if (newdst) {
686 		err = nla_get_labels(newdst, MAX_NEW_LABELS,
687 				     &nh->nh_labels, nh->nh_label);
688 		if (err)
689 			goto errout;
690 	}
691 
692 	if (via) {
693 		err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table,
694 				  __mpls_nh_via(rt, nh));
695 		if (err)
696 			goto errout;
697 	} else {
698 		nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC;
699 	}
700 
701 	err = mpls_nh_assign_dev(net, rt, nh, oif);
702 	if (err)
703 		goto errout;
704 
705 	return 0;
706 
707 errout:
708 	return err;
709 }
710 
711 static int mpls_count_nexthops(struct rtnexthop *rtnh, int len,
712 			       u8 cfg_via_alen, u8 *max_via_alen)
713 {
714 	int nhs = 0;
715 	int remaining = len;
716 
717 	if (!rtnh) {
718 		*max_via_alen = cfg_via_alen;
719 		return 1;
720 	}
721 
722 	*max_via_alen = 0;
723 
724 	while (rtnh_ok(rtnh, remaining)) {
725 		struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
726 		int attrlen;
727 
728 		attrlen = rtnh_attrlen(rtnh);
729 		nla = nla_find(attrs, attrlen, RTA_VIA);
730 		if (nla && nla_len(nla) >=
731 		    offsetof(struct rtvia, rtvia_addr)) {
732 			int via_alen = nla_len(nla) -
733 				offsetof(struct rtvia, rtvia_addr);
734 
735 			if (via_alen <= MAX_VIA_ALEN)
736 				*max_via_alen = max_t(u16, *max_via_alen,
737 						      via_alen);
738 		}
739 
740 		nhs++;
741 		rtnh = rtnh_next(rtnh, &remaining);
742 	}
743 
744 	/* leftover implies invalid nexthop configuration, discard it */
745 	return remaining > 0 ? 0 : nhs;
746 }
747 
748 static int mpls_nh_build_multi(struct mpls_route_config *cfg,
749 			       struct mpls_route *rt)
750 {
751 	struct rtnexthop *rtnh = cfg->rc_mp;
752 	struct nlattr *nla_via, *nla_newdst;
753 	int remaining = cfg->rc_mp_len;
754 	int nhs = 0;
755 	int err = 0;
756 
757 	change_nexthops(rt) {
758 		int attrlen;
759 
760 		nla_via = NULL;
761 		nla_newdst = NULL;
762 
763 		err = -EINVAL;
764 		if (!rtnh_ok(rtnh, remaining))
765 			goto errout;
766 
767 		/* neither weighted multipath nor any flags
768 		 * are supported
769 		 */
770 		if (rtnh->rtnh_hops || rtnh->rtnh_flags)
771 			goto errout;
772 
773 		attrlen = rtnh_attrlen(rtnh);
774 		if (attrlen > 0) {
775 			struct nlattr *attrs = rtnh_attrs(rtnh);
776 
777 			nla_via = nla_find(attrs, attrlen, RTA_VIA);
778 			nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST);
779 		}
780 
781 		err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh,
782 				    rtnh->rtnh_ifindex, nla_via, nla_newdst);
783 		if (err)
784 			goto errout;
785 
786 		if (nh->nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
787 			rt->rt_nhn_alive--;
788 
789 		rtnh = rtnh_next(rtnh, &remaining);
790 		nhs++;
791 	} endfor_nexthops(rt);
792 
793 	rt->rt_nhn = nhs;
794 
795 	return 0;
796 
797 errout:
798 	return err;
799 }
800 
801 static int mpls_route_add(struct mpls_route_config *cfg)
802 {
803 	struct mpls_route __rcu **platform_label;
804 	struct net *net = cfg->rc_nlinfo.nl_net;
805 	struct mpls_route *rt, *old;
806 	int err = -EINVAL;
807 	u8 max_via_alen;
808 	unsigned index;
809 	int nhs;
810 
811 	index = cfg->rc_label;
812 
813 	/* If a label was not specified during insert pick one */
814 	if ((index == LABEL_NOT_SPECIFIED) &&
815 	    (cfg->rc_nlflags & NLM_F_CREATE)) {
816 		index = find_free_label(net);
817 	}
818 
819 	/* Reserved labels may not be set */
820 	if (index < MPLS_LABEL_FIRST_UNRESERVED)
821 		goto errout;
822 
823 	/* The full 20 bit range may not be supported. */
824 	if (index >= net->mpls.platform_labels)
825 		goto errout;
826 
827 	/* Append makes no sense with mpls */
828 	err = -EOPNOTSUPP;
829 	if (cfg->rc_nlflags & NLM_F_APPEND)
830 		goto errout;
831 
832 	err = -EEXIST;
833 	platform_label = rtnl_dereference(net->mpls.platform_label);
834 	old = rtnl_dereference(platform_label[index]);
835 	if ((cfg->rc_nlflags & NLM_F_EXCL) && old)
836 		goto errout;
837 
838 	err = -EEXIST;
839 	if (!(cfg->rc_nlflags & NLM_F_REPLACE) && old)
840 		goto errout;
841 
842 	err = -ENOENT;
843 	if (!(cfg->rc_nlflags & NLM_F_CREATE) && !old)
844 		goto errout;
845 
846 	err = -EINVAL;
847 	nhs = mpls_count_nexthops(cfg->rc_mp, cfg->rc_mp_len,
848 				  cfg->rc_via_alen, &max_via_alen);
849 	if (nhs == 0)
850 		goto errout;
851 
852 	err = -ENOMEM;
853 	rt = mpls_rt_alloc(nhs, max_via_alen);
854 	if (!rt)
855 		goto errout;
856 
857 	rt->rt_protocol = cfg->rc_protocol;
858 	rt->rt_payload_type = cfg->rc_payload_type;
859 
860 	if (cfg->rc_mp)
861 		err = mpls_nh_build_multi(cfg, rt);
862 	else
863 		err = mpls_nh_build_from_cfg(cfg, rt);
864 	if (err)
865 		goto freert;
866 
867 	mpls_route_update(net, index, rt, &cfg->rc_nlinfo);
868 
869 	return 0;
870 
871 freert:
872 	mpls_rt_free(rt);
873 errout:
874 	return err;
875 }
876 
877 static int mpls_route_del(struct mpls_route_config *cfg)
878 {
879 	struct net *net = cfg->rc_nlinfo.nl_net;
880 	unsigned index;
881 	int err = -EINVAL;
882 
883 	index = cfg->rc_label;
884 
885 	/* Reserved labels may not be removed */
886 	if (index < MPLS_LABEL_FIRST_UNRESERVED)
887 		goto errout;
888 
889 	/* The full 20 bit range may not be supported */
890 	if (index >= net->mpls.platform_labels)
891 		goto errout;
892 
893 	mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
894 
895 	err = 0;
896 errout:
897 	return err;
898 }
899 
900 static void mpls_get_stats(struct mpls_dev *mdev,
901 			   struct mpls_link_stats *stats)
902 {
903 	struct mpls_pcpu_stats *p;
904 	int i;
905 
906 	memset(stats, 0, sizeof(*stats));
907 
908 	for_each_possible_cpu(i) {
909 		struct mpls_link_stats local;
910 		unsigned int start;
911 
912 		p = per_cpu_ptr(mdev->stats, i);
913 		do {
914 			start = u64_stats_fetch_begin(&p->syncp);
915 			local = p->stats;
916 		} while (u64_stats_fetch_retry(&p->syncp, start));
917 
918 		stats->rx_packets	+= local.rx_packets;
919 		stats->rx_bytes		+= local.rx_bytes;
920 		stats->tx_packets	+= local.tx_packets;
921 		stats->tx_bytes		+= local.tx_bytes;
922 		stats->rx_errors	+= local.rx_errors;
923 		stats->tx_errors	+= local.tx_errors;
924 		stats->rx_dropped	+= local.rx_dropped;
925 		stats->tx_dropped	+= local.tx_dropped;
926 		stats->rx_noroute	+= local.rx_noroute;
927 	}
928 }
929 
930 static int mpls_fill_stats_af(struct sk_buff *skb,
931 			      const struct net_device *dev)
932 {
933 	struct mpls_link_stats *stats;
934 	struct mpls_dev *mdev;
935 	struct nlattr *nla;
936 
937 	mdev = mpls_dev_get(dev);
938 	if (!mdev)
939 		return -ENODATA;
940 
941 	nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
942 				sizeof(struct mpls_link_stats),
943 				MPLS_STATS_UNSPEC);
944 	if (!nla)
945 		return -EMSGSIZE;
946 
947 	stats = nla_data(nla);
948 	mpls_get_stats(mdev, stats);
949 
950 	return 0;
951 }
952 
953 static size_t mpls_get_stats_af_size(const struct net_device *dev)
954 {
955 	struct mpls_dev *mdev;
956 
957 	mdev = mpls_dev_get(dev);
958 	if (!mdev)
959 		return 0;
960 
961 	return nla_total_size_64bit(sizeof(struct mpls_link_stats));
962 }
963 
964 static int mpls_netconf_fill_devconf(struct sk_buff *skb, struct mpls_dev *mdev,
965 				     u32 portid, u32 seq, int event,
966 				     unsigned int flags, int type)
967 {
968 	struct nlmsghdr  *nlh;
969 	struct netconfmsg *ncm;
970 	bool all = false;
971 
972 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
973 			flags);
974 	if (!nlh)
975 		return -EMSGSIZE;
976 
977 	if (type == NETCONFA_ALL)
978 		all = true;
979 
980 	ncm = nlmsg_data(nlh);
981 	ncm->ncm_family = AF_MPLS;
982 
983 	if (nla_put_s32(skb, NETCONFA_IFINDEX, mdev->dev->ifindex) < 0)
984 		goto nla_put_failure;
985 
986 	if ((all || type == NETCONFA_INPUT) &&
987 	    nla_put_s32(skb, NETCONFA_INPUT,
988 			mdev->input_enabled) < 0)
989 		goto nla_put_failure;
990 
991 	nlmsg_end(skb, nlh);
992 	return 0;
993 
994 nla_put_failure:
995 	nlmsg_cancel(skb, nlh);
996 	return -EMSGSIZE;
997 }
998 
999 static int mpls_netconf_msgsize_devconf(int type)
1000 {
1001 	int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1002 			+ nla_total_size(4); /* NETCONFA_IFINDEX */
1003 	bool all = false;
1004 
1005 	if (type == NETCONFA_ALL)
1006 		all = true;
1007 
1008 	if (all || type == NETCONFA_INPUT)
1009 		size += nla_total_size(4);
1010 
1011 	return size;
1012 }
1013 
1014 static void mpls_netconf_notify_devconf(struct net *net, int type,
1015 					struct mpls_dev *mdev)
1016 {
1017 	struct sk_buff *skb;
1018 	int err = -ENOBUFS;
1019 
1020 	skb = nlmsg_new(mpls_netconf_msgsize_devconf(type), GFP_KERNEL);
1021 	if (!skb)
1022 		goto errout;
1023 
1024 	err = mpls_netconf_fill_devconf(skb, mdev, 0, 0, RTM_NEWNETCONF,
1025 					0, type);
1026 	if (err < 0) {
1027 		/* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1028 		WARN_ON(err == -EMSGSIZE);
1029 		kfree_skb(skb);
1030 		goto errout;
1031 	}
1032 
1033 	rtnl_notify(skb, net, 0, RTNLGRP_MPLS_NETCONF, NULL, GFP_KERNEL);
1034 	return;
1035 errout:
1036 	if (err < 0)
1037 		rtnl_set_sk_err(net, RTNLGRP_MPLS_NETCONF, err);
1038 }
1039 
1040 static const struct nla_policy devconf_mpls_policy[NETCONFA_MAX + 1] = {
1041 	[NETCONFA_IFINDEX]	= { .len = sizeof(int) },
1042 };
1043 
1044 static int mpls_netconf_get_devconf(struct sk_buff *in_skb,
1045 				    struct nlmsghdr *nlh)
1046 {
1047 	struct net *net = sock_net(in_skb->sk);
1048 	struct nlattr *tb[NETCONFA_MAX + 1];
1049 	struct netconfmsg *ncm;
1050 	struct net_device *dev;
1051 	struct mpls_dev *mdev;
1052 	struct sk_buff *skb;
1053 	int ifindex;
1054 	int err;
1055 
1056 	err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1057 			  devconf_mpls_policy);
1058 	if (err < 0)
1059 		goto errout;
1060 
1061 	err = -EINVAL;
1062 	if (!tb[NETCONFA_IFINDEX])
1063 		goto errout;
1064 
1065 	ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1066 	dev = __dev_get_by_index(net, ifindex);
1067 	if (!dev)
1068 		goto errout;
1069 
1070 	mdev = mpls_dev_get(dev);
1071 	if (!mdev)
1072 		goto errout;
1073 
1074 	err = -ENOBUFS;
1075 	skb = nlmsg_new(mpls_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
1076 	if (!skb)
1077 		goto errout;
1078 
1079 	err = mpls_netconf_fill_devconf(skb, mdev,
1080 					NETLINK_CB(in_skb).portid,
1081 					nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1082 					NETCONFA_ALL);
1083 	if (err < 0) {
1084 		/* -EMSGSIZE implies BUG in mpls_netconf_msgsize_devconf() */
1085 		WARN_ON(err == -EMSGSIZE);
1086 		kfree_skb(skb);
1087 		goto errout;
1088 	}
1089 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1090 errout:
1091 	return err;
1092 }
1093 
1094 static int mpls_netconf_dump_devconf(struct sk_buff *skb,
1095 				     struct netlink_callback *cb)
1096 {
1097 	struct net *net = sock_net(skb->sk);
1098 	struct hlist_head *head;
1099 	struct net_device *dev;
1100 	struct mpls_dev *mdev;
1101 	int idx, s_idx;
1102 	int h, s_h;
1103 
1104 	s_h = cb->args[0];
1105 	s_idx = idx = cb->args[1];
1106 
1107 	for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1108 		idx = 0;
1109 		head = &net->dev_index_head[h];
1110 		rcu_read_lock();
1111 		cb->seq = net->dev_base_seq;
1112 		hlist_for_each_entry_rcu(dev, head, index_hlist) {
1113 			if (idx < s_idx)
1114 				goto cont;
1115 			mdev = mpls_dev_get(dev);
1116 			if (!mdev)
1117 				goto cont;
1118 			if (mpls_netconf_fill_devconf(skb, mdev,
1119 						      NETLINK_CB(cb->skb).portid,
1120 						      cb->nlh->nlmsg_seq,
1121 						      RTM_NEWNETCONF,
1122 						      NLM_F_MULTI,
1123 						      NETCONFA_ALL) < 0) {
1124 				rcu_read_unlock();
1125 				goto done;
1126 			}
1127 			nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1128 cont:
1129 			idx++;
1130 		}
1131 		rcu_read_unlock();
1132 	}
1133 done:
1134 	cb->args[0] = h;
1135 	cb->args[1] = idx;
1136 
1137 	return skb->len;
1138 }
1139 
1140 #define MPLS_PERDEV_SYSCTL_OFFSET(field)	\
1141 	(&((struct mpls_dev *)0)->field)
1142 
1143 static int mpls_conf_proc(struct ctl_table *ctl, int write,
1144 			  void __user *buffer,
1145 			  size_t *lenp, loff_t *ppos)
1146 {
1147 	int oval = *(int *)ctl->data;
1148 	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1149 
1150 	if (write) {
1151 		struct mpls_dev *mdev = ctl->extra1;
1152 		int i = (int *)ctl->data - (int *)mdev;
1153 		struct net *net = ctl->extra2;
1154 		int val = *(int *)ctl->data;
1155 
1156 		if (i == offsetof(struct mpls_dev, input_enabled) &&
1157 		    val != oval) {
1158 			mpls_netconf_notify_devconf(net,
1159 						    NETCONFA_INPUT,
1160 						    mdev);
1161 		}
1162 	}
1163 
1164 	return ret;
1165 }
1166 
1167 static const struct ctl_table mpls_dev_table[] = {
1168 	{
1169 		.procname	= "input",
1170 		.maxlen		= sizeof(int),
1171 		.mode		= 0644,
1172 		.proc_handler	= mpls_conf_proc,
1173 		.data		= MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
1174 	},
1175 	{ }
1176 };
1177 
1178 static int mpls_dev_sysctl_register(struct net_device *dev,
1179 				    struct mpls_dev *mdev)
1180 {
1181 	char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
1182 	struct net *net = dev_net(dev);
1183 	struct ctl_table *table;
1184 	int i;
1185 
1186 	table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
1187 	if (!table)
1188 		goto out;
1189 
1190 	/* Table data contains only offsets relative to the base of
1191 	 * the mdev at this point, so make them absolute.
1192 	 */
1193 	for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) {
1194 		table[i].data = (char *)mdev + (uintptr_t)table[i].data;
1195 		table[i].extra1 = mdev;
1196 		table[i].extra2 = net;
1197 	}
1198 
1199 	snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
1200 
1201 	mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
1202 	if (!mdev->sysctl)
1203 		goto free;
1204 
1205 	return 0;
1206 
1207 free:
1208 	kfree(table);
1209 out:
1210 	return -ENOBUFS;
1211 }
1212 
1213 static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
1214 {
1215 	struct ctl_table *table;
1216 
1217 	table = mdev->sysctl->ctl_table_arg;
1218 	unregister_net_sysctl_table(mdev->sysctl);
1219 	kfree(table);
1220 }
1221 
1222 static struct mpls_dev *mpls_add_dev(struct net_device *dev)
1223 {
1224 	struct mpls_dev *mdev;
1225 	int err = -ENOMEM;
1226 	int i;
1227 
1228 	ASSERT_RTNL();
1229 
1230 	mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
1231 	if (!mdev)
1232 		return ERR_PTR(err);
1233 
1234 	mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
1235 	if (!mdev->stats)
1236 		goto free;
1237 
1238 	for_each_possible_cpu(i) {
1239 		struct mpls_pcpu_stats *mpls_stats;
1240 
1241 		mpls_stats = per_cpu_ptr(mdev->stats, i);
1242 		u64_stats_init(&mpls_stats->syncp);
1243 	}
1244 
1245 	err = mpls_dev_sysctl_register(dev, mdev);
1246 	if (err)
1247 		goto free;
1248 
1249 	mdev->dev = dev;
1250 	rcu_assign_pointer(dev->mpls_ptr, mdev);
1251 
1252 	return mdev;
1253 
1254 free:
1255 	free_percpu(mdev->stats);
1256 	kfree(mdev);
1257 	return ERR_PTR(err);
1258 }
1259 
1260 static void mpls_dev_destroy_rcu(struct rcu_head *head)
1261 {
1262 	struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
1263 
1264 	free_percpu(mdev->stats);
1265 	kfree(mdev);
1266 }
1267 
1268 static void mpls_ifdown(struct net_device *dev, int event)
1269 {
1270 	struct mpls_route __rcu **platform_label;
1271 	struct net *net = dev_net(dev);
1272 	unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN;
1273 	unsigned int alive;
1274 	unsigned index;
1275 
1276 	platform_label = rtnl_dereference(net->mpls.platform_label);
1277 	for (index = 0; index < net->mpls.platform_labels; index++) {
1278 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1279 
1280 		if (!rt)
1281 			continue;
1282 
1283 		alive = 0;
1284 		change_nexthops(rt) {
1285 			if (rtnl_dereference(nh->nh_dev) != dev)
1286 				goto next;
1287 
1288 			switch (event) {
1289 			case NETDEV_DOWN:
1290 			case NETDEV_UNREGISTER:
1291 				nh->nh_flags |= RTNH_F_DEAD;
1292 				/* fall through */
1293 			case NETDEV_CHANGE:
1294 				nh->nh_flags |= RTNH_F_LINKDOWN;
1295 				break;
1296 			}
1297 			if (event == NETDEV_UNREGISTER)
1298 				RCU_INIT_POINTER(nh->nh_dev, NULL);
1299 next:
1300 			if (!(nh->nh_flags & nh_flags))
1301 				alive++;
1302 		} endfor_nexthops(rt);
1303 
1304 		WRITE_ONCE(rt->rt_nhn_alive, alive);
1305 	}
1306 }
1307 
1308 static void mpls_ifup(struct net_device *dev, unsigned int nh_flags)
1309 {
1310 	struct mpls_route __rcu **platform_label;
1311 	struct net *net = dev_net(dev);
1312 	unsigned index;
1313 	int alive;
1314 
1315 	platform_label = rtnl_dereference(net->mpls.platform_label);
1316 	for (index = 0; index < net->mpls.platform_labels; index++) {
1317 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
1318 
1319 		if (!rt)
1320 			continue;
1321 
1322 		alive = 0;
1323 		change_nexthops(rt) {
1324 			struct net_device *nh_dev =
1325 				rtnl_dereference(nh->nh_dev);
1326 
1327 			if (!(nh->nh_flags & nh_flags)) {
1328 				alive++;
1329 				continue;
1330 			}
1331 			if (nh_dev != dev)
1332 				continue;
1333 			alive++;
1334 			nh->nh_flags &= ~nh_flags;
1335 		} endfor_nexthops(rt);
1336 
1337 		ACCESS_ONCE(rt->rt_nhn_alive) = alive;
1338 	}
1339 }
1340 
1341 static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
1342 			   void *ptr)
1343 {
1344 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1345 	struct mpls_dev *mdev;
1346 	unsigned int flags;
1347 
1348 	if (event == NETDEV_REGISTER) {
1349 		/* For now just support Ethernet, IPGRE, SIT and IPIP devices */
1350 		if (dev->type == ARPHRD_ETHER ||
1351 		    dev->type == ARPHRD_LOOPBACK ||
1352 		    dev->type == ARPHRD_IPGRE ||
1353 		    dev->type == ARPHRD_SIT ||
1354 		    dev->type == ARPHRD_TUNNEL) {
1355 			mdev = mpls_add_dev(dev);
1356 			if (IS_ERR(mdev))
1357 				return notifier_from_errno(PTR_ERR(mdev));
1358 		}
1359 		return NOTIFY_OK;
1360 	}
1361 
1362 	mdev = mpls_dev_get(dev);
1363 	if (!mdev)
1364 		return NOTIFY_OK;
1365 
1366 	switch (event) {
1367 	case NETDEV_DOWN:
1368 		mpls_ifdown(dev, event);
1369 		break;
1370 	case NETDEV_UP:
1371 		flags = dev_get_flags(dev);
1372 		if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1373 			mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1374 		else
1375 			mpls_ifup(dev, RTNH_F_DEAD);
1376 		break;
1377 	case NETDEV_CHANGE:
1378 		flags = dev_get_flags(dev);
1379 		if (flags & (IFF_RUNNING | IFF_LOWER_UP))
1380 			mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
1381 		else
1382 			mpls_ifdown(dev, event);
1383 		break;
1384 	case NETDEV_UNREGISTER:
1385 		mpls_ifdown(dev, event);
1386 		mdev = mpls_dev_get(dev);
1387 		if (mdev) {
1388 			mpls_dev_sysctl_unregister(mdev);
1389 			RCU_INIT_POINTER(dev->mpls_ptr, NULL);
1390 			call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
1391 		}
1392 		break;
1393 	case NETDEV_CHANGENAME:
1394 		mdev = mpls_dev_get(dev);
1395 		if (mdev) {
1396 			int err;
1397 
1398 			mpls_dev_sysctl_unregister(mdev);
1399 			err = mpls_dev_sysctl_register(dev, mdev);
1400 			if (err)
1401 				return notifier_from_errno(err);
1402 		}
1403 		break;
1404 	}
1405 	return NOTIFY_OK;
1406 }
1407 
1408 static struct notifier_block mpls_dev_notifier = {
1409 	.notifier_call = mpls_dev_notify,
1410 };
1411 
1412 static int nla_put_via(struct sk_buff *skb,
1413 		       u8 table, const void *addr, int alen)
1414 {
1415 	static const int table_to_family[NEIGH_NR_TABLES + 1] = {
1416 		AF_INET, AF_INET6, AF_DECnet, AF_PACKET,
1417 	};
1418 	struct nlattr *nla;
1419 	struct rtvia *via;
1420 	int family = AF_UNSPEC;
1421 
1422 	nla = nla_reserve(skb, RTA_VIA, alen + 2);
1423 	if (!nla)
1424 		return -EMSGSIZE;
1425 
1426 	if (table <= NEIGH_NR_TABLES)
1427 		family = table_to_family[table];
1428 
1429 	via = nla_data(nla);
1430 	via->rtvia_family = family;
1431 	memcpy(via->rtvia_addr, addr, alen);
1432 	return 0;
1433 }
1434 
1435 int nla_put_labels(struct sk_buff *skb, int attrtype,
1436 		   u8 labels, const u32 label[])
1437 {
1438 	struct nlattr *nla;
1439 	struct mpls_shim_hdr *nla_label;
1440 	bool bos;
1441 	int i;
1442 	nla = nla_reserve(skb, attrtype, labels*4);
1443 	if (!nla)
1444 		return -EMSGSIZE;
1445 
1446 	nla_label = nla_data(nla);
1447 	bos = true;
1448 	for (i = labels - 1; i >= 0; i--) {
1449 		nla_label[i] = mpls_entry_encode(label[i], 0, 0, bos);
1450 		bos = false;
1451 	}
1452 
1453 	return 0;
1454 }
1455 EXPORT_SYMBOL_GPL(nla_put_labels);
1456 
1457 int nla_get_labels(const struct nlattr *nla,
1458 		   u32 max_labels, u8 *labels, u32 label[])
1459 {
1460 	unsigned len = nla_len(nla);
1461 	unsigned nla_labels;
1462 	struct mpls_shim_hdr *nla_label;
1463 	bool bos;
1464 	int i;
1465 
1466 	/* len needs to be an even multiple of 4 (the label size) */
1467 	if (len & 3)
1468 		return -EINVAL;
1469 
1470 	/* Limit the number of new labels allowed */
1471 	nla_labels = len/4;
1472 	if (nla_labels > max_labels)
1473 		return -EINVAL;
1474 
1475 	nla_label = nla_data(nla);
1476 	bos = true;
1477 	for (i = nla_labels - 1; i >= 0; i--, bos = false) {
1478 		struct mpls_entry_decoded dec;
1479 		dec = mpls_entry_decode(nla_label + i);
1480 
1481 		/* Ensure the bottom of stack flag is properly set
1482 		 * and ttl and tc are both clear.
1483 		 */
1484 		if ((dec.bos != bos) || dec.ttl || dec.tc)
1485 			return -EINVAL;
1486 
1487 		switch (dec.label) {
1488 		case MPLS_LABEL_IMPLNULL:
1489 			/* RFC3032: This is a label that an LSR may
1490 			 * assign and distribute, but which never
1491 			 * actually appears in the encapsulation.
1492 			 */
1493 			return -EINVAL;
1494 		}
1495 
1496 		label[i] = dec.label;
1497 	}
1498 	*labels = nla_labels;
1499 	return 0;
1500 }
1501 EXPORT_SYMBOL_GPL(nla_get_labels);
1502 
1503 int nla_get_via(const struct nlattr *nla, u8 *via_alen,
1504 		u8 *via_table, u8 via_addr[])
1505 {
1506 	struct rtvia *via = nla_data(nla);
1507 	int err = -EINVAL;
1508 	int alen;
1509 
1510 	if (nla_len(nla) < offsetof(struct rtvia, rtvia_addr))
1511 		goto errout;
1512 	alen = nla_len(nla) -
1513 			offsetof(struct rtvia, rtvia_addr);
1514 	if (alen > MAX_VIA_ALEN)
1515 		goto errout;
1516 
1517 	/* Validate the address family */
1518 	switch (via->rtvia_family) {
1519 	case AF_PACKET:
1520 		*via_table = NEIGH_LINK_TABLE;
1521 		break;
1522 	case AF_INET:
1523 		*via_table = NEIGH_ARP_TABLE;
1524 		if (alen != 4)
1525 			goto errout;
1526 		break;
1527 	case AF_INET6:
1528 		*via_table = NEIGH_ND_TABLE;
1529 		if (alen != 16)
1530 			goto errout;
1531 		break;
1532 	default:
1533 		/* Unsupported address family */
1534 		goto errout;
1535 	}
1536 
1537 	memcpy(via_addr, via->rtvia_addr, alen);
1538 	*via_alen = alen;
1539 	err = 0;
1540 
1541 errout:
1542 	return err;
1543 }
1544 
1545 static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
1546 			       struct mpls_route_config *cfg)
1547 {
1548 	struct rtmsg *rtm;
1549 	struct nlattr *tb[RTA_MAX+1];
1550 	int index;
1551 	int err;
1552 
1553 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_mpls_policy);
1554 	if (err < 0)
1555 		goto errout;
1556 
1557 	err = -EINVAL;
1558 	rtm = nlmsg_data(nlh);
1559 	memset(cfg, 0, sizeof(*cfg));
1560 
1561 	if (rtm->rtm_family != AF_MPLS)
1562 		goto errout;
1563 	if (rtm->rtm_dst_len != 20)
1564 		goto errout;
1565 	if (rtm->rtm_src_len != 0)
1566 		goto errout;
1567 	if (rtm->rtm_tos != 0)
1568 		goto errout;
1569 	if (rtm->rtm_table != RT_TABLE_MAIN)
1570 		goto errout;
1571 	/* Any value is acceptable for rtm_protocol */
1572 
1573 	/* As mpls uses destination specific addresses
1574 	 * (or source specific address in the case of multicast)
1575 	 * all addresses have universal scope.
1576 	 */
1577 	if (rtm->rtm_scope != RT_SCOPE_UNIVERSE)
1578 		goto errout;
1579 	if (rtm->rtm_type != RTN_UNICAST)
1580 		goto errout;
1581 	if (rtm->rtm_flags != 0)
1582 		goto errout;
1583 
1584 	cfg->rc_label		= LABEL_NOT_SPECIFIED;
1585 	cfg->rc_protocol	= rtm->rtm_protocol;
1586 	cfg->rc_via_table	= MPLS_NEIGH_TABLE_UNSPEC;
1587 	cfg->rc_nlflags		= nlh->nlmsg_flags;
1588 	cfg->rc_nlinfo.portid	= NETLINK_CB(skb).portid;
1589 	cfg->rc_nlinfo.nlh	= nlh;
1590 	cfg->rc_nlinfo.nl_net	= sock_net(skb->sk);
1591 
1592 	for (index = 0; index <= RTA_MAX; index++) {
1593 		struct nlattr *nla = tb[index];
1594 		if (!nla)
1595 			continue;
1596 
1597 		switch (index) {
1598 		case RTA_OIF:
1599 			cfg->rc_ifindex = nla_get_u32(nla);
1600 			break;
1601 		case RTA_NEWDST:
1602 			if (nla_get_labels(nla, MAX_NEW_LABELS,
1603 					   &cfg->rc_output_labels,
1604 					   cfg->rc_output_label))
1605 				goto errout;
1606 			break;
1607 		case RTA_DST:
1608 		{
1609 			u8 label_count;
1610 			if (nla_get_labels(nla, 1, &label_count,
1611 					   &cfg->rc_label))
1612 				goto errout;
1613 
1614 			/* Reserved labels may not be set */
1615 			if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED)
1616 				goto errout;
1617 
1618 			break;
1619 		}
1620 		case RTA_VIA:
1621 		{
1622 			if (nla_get_via(nla, &cfg->rc_via_alen,
1623 					&cfg->rc_via_table, cfg->rc_via))
1624 				goto errout;
1625 			break;
1626 		}
1627 		case RTA_MULTIPATH:
1628 		{
1629 			cfg->rc_mp = nla_data(nla);
1630 			cfg->rc_mp_len = nla_len(nla);
1631 			break;
1632 		}
1633 		default:
1634 			/* Unsupported attribute */
1635 			goto errout;
1636 		}
1637 	}
1638 
1639 	err = 0;
1640 errout:
1641 	return err;
1642 }
1643 
1644 static int mpls_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1645 {
1646 	struct mpls_route_config cfg;
1647 	int err;
1648 
1649 	err = rtm_to_route_config(skb, nlh, &cfg);
1650 	if (err < 0)
1651 		return err;
1652 
1653 	return mpls_route_del(&cfg);
1654 }
1655 
1656 
1657 static int mpls_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
1658 {
1659 	struct mpls_route_config cfg;
1660 	int err;
1661 
1662 	err = rtm_to_route_config(skb, nlh, &cfg);
1663 	if (err < 0)
1664 		return err;
1665 
1666 	return mpls_route_add(&cfg);
1667 }
1668 
1669 static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
1670 			   u32 label, struct mpls_route *rt, int flags)
1671 {
1672 	struct net_device *dev;
1673 	struct nlmsghdr *nlh;
1674 	struct rtmsg *rtm;
1675 
1676 	nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1677 	if (nlh == NULL)
1678 		return -EMSGSIZE;
1679 
1680 	rtm = nlmsg_data(nlh);
1681 	rtm->rtm_family = AF_MPLS;
1682 	rtm->rtm_dst_len = 20;
1683 	rtm->rtm_src_len = 0;
1684 	rtm->rtm_tos = 0;
1685 	rtm->rtm_table = RT_TABLE_MAIN;
1686 	rtm->rtm_protocol = rt->rt_protocol;
1687 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
1688 	rtm->rtm_type = RTN_UNICAST;
1689 	rtm->rtm_flags = 0;
1690 
1691 	if (nla_put_labels(skb, RTA_DST, 1, &label))
1692 		goto nla_put_failure;
1693 	if (rt->rt_nhn == 1) {
1694 		const struct mpls_nh *nh = rt->rt_nh;
1695 
1696 		if (nh->nh_labels &&
1697 		    nla_put_labels(skb, RTA_NEWDST, nh->nh_labels,
1698 				   nh->nh_label))
1699 			goto nla_put_failure;
1700 		if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1701 		    nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
1702 				nh->nh_via_alen))
1703 			goto nla_put_failure;
1704 		dev = rtnl_dereference(nh->nh_dev);
1705 		if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
1706 			goto nla_put_failure;
1707 		if (nh->nh_flags & RTNH_F_LINKDOWN)
1708 			rtm->rtm_flags |= RTNH_F_LINKDOWN;
1709 		if (nh->nh_flags & RTNH_F_DEAD)
1710 			rtm->rtm_flags |= RTNH_F_DEAD;
1711 	} else {
1712 		struct rtnexthop *rtnh;
1713 		struct nlattr *mp;
1714 		int dead = 0;
1715 		int linkdown = 0;
1716 
1717 		mp = nla_nest_start(skb, RTA_MULTIPATH);
1718 		if (!mp)
1719 			goto nla_put_failure;
1720 
1721 		for_nexthops(rt) {
1722 			rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1723 			if (!rtnh)
1724 				goto nla_put_failure;
1725 
1726 			dev = rtnl_dereference(nh->nh_dev);
1727 			if (dev)
1728 				rtnh->rtnh_ifindex = dev->ifindex;
1729 			if (nh->nh_flags & RTNH_F_LINKDOWN) {
1730 				rtnh->rtnh_flags |= RTNH_F_LINKDOWN;
1731 				linkdown++;
1732 			}
1733 			if (nh->nh_flags & RTNH_F_DEAD) {
1734 				rtnh->rtnh_flags |= RTNH_F_DEAD;
1735 				dead++;
1736 			}
1737 
1738 			if (nh->nh_labels && nla_put_labels(skb, RTA_NEWDST,
1739 							    nh->nh_labels,
1740 							    nh->nh_label))
1741 				goto nla_put_failure;
1742 			if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC &&
1743 			    nla_put_via(skb, nh->nh_via_table,
1744 					mpls_nh_via(rt, nh),
1745 					nh->nh_via_alen))
1746 				goto nla_put_failure;
1747 
1748 			/* length of rtnetlink header + attributes */
1749 			rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
1750 		} endfor_nexthops(rt);
1751 
1752 		if (linkdown == rt->rt_nhn)
1753 			rtm->rtm_flags |= RTNH_F_LINKDOWN;
1754 		if (dead == rt->rt_nhn)
1755 			rtm->rtm_flags |= RTNH_F_DEAD;
1756 
1757 		nla_nest_end(skb, mp);
1758 	}
1759 
1760 	nlmsg_end(skb, nlh);
1761 	return 0;
1762 
1763 nla_put_failure:
1764 	nlmsg_cancel(skb, nlh);
1765 	return -EMSGSIZE;
1766 }
1767 
1768 static int mpls_dump_routes(struct sk_buff *skb, struct netlink_callback *cb)
1769 {
1770 	struct net *net = sock_net(skb->sk);
1771 	struct mpls_route __rcu **platform_label;
1772 	size_t platform_labels;
1773 	unsigned int index;
1774 
1775 	ASSERT_RTNL();
1776 
1777 	index = cb->args[0];
1778 	if (index < MPLS_LABEL_FIRST_UNRESERVED)
1779 		index = MPLS_LABEL_FIRST_UNRESERVED;
1780 
1781 	platform_label = rtnl_dereference(net->mpls.platform_label);
1782 	platform_labels = net->mpls.platform_labels;
1783 	for (; index < platform_labels; index++) {
1784 		struct mpls_route *rt;
1785 		rt = rtnl_dereference(platform_label[index]);
1786 		if (!rt)
1787 			continue;
1788 
1789 		if (mpls_dump_route(skb, NETLINK_CB(cb->skb).portid,
1790 				    cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1791 				    index, rt, NLM_F_MULTI) < 0)
1792 			break;
1793 	}
1794 	cb->args[0] = index;
1795 
1796 	return skb->len;
1797 }
1798 
1799 static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
1800 {
1801 	size_t payload =
1802 		NLMSG_ALIGN(sizeof(struct rtmsg))
1803 		+ nla_total_size(4);			/* RTA_DST */
1804 
1805 	if (rt->rt_nhn == 1) {
1806 		struct mpls_nh *nh = rt->rt_nh;
1807 
1808 		if (nh->nh_dev)
1809 			payload += nla_total_size(4); /* RTA_OIF */
1810 		if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */
1811 			payload += nla_total_size(2 + nh->nh_via_alen);
1812 		if (nh->nh_labels) /* RTA_NEWDST */
1813 			payload += nla_total_size(nh->nh_labels * 4);
1814 	} else {
1815 		/* each nexthop is packed in an attribute */
1816 		size_t nhsize = 0;
1817 
1818 		for_nexthops(rt) {
1819 			nhsize += nla_total_size(sizeof(struct rtnexthop));
1820 			/* RTA_VIA */
1821 			if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC)
1822 				nhsize += nla_total_size(2 + nh->nh_via_alen);
1823 			if (nh->nh_labels)
1824 				nhsize += nla_total_size(nh->nh_labels * 4);
1825 		} endfor_nexthops(rt);
1826 		/* nested attribute */
1827 		payload += nla_total_size(nhsize);
1828 	}
1829 
1830 	return payload;
1831 }
1832 
1833 static void rtmsg_lfib(int event, u32 label, struct mpls_route *rt,
1834 		       struct nlmsghdr *nlh, struct net *net, u32 portid,
1835 		       unsigned int nlm_flags)
1836 {
1837 	struct sk_buff *skb;
1838 	u32 seq = nlh ? nlh->nlmsg_seq : 0;
1839 	int err = -ENOBUFS;
1840 
1841 	skb = nlmsg_new(lfib_nlmsg_size(rt), GFP_KERNEL);
1842 	if (skb == NULL)
1843 		goto errout;
1844 
1845 	err = mpls_dump_route(skb, portid, seq, event, label, rt, nlm_flags);
1846 	if (err < 0) {
1847 		/* -EMSGSIZE implies BUG in lfib_nlmsg_size */
1848 		WARN_ON(err == -EMSGSIZE);
1849 		kfree_skb(skb);
1850 		goto errout;
1851 	}
1852 	rtnl_notify(skb, net, portid, RTNLGRP_MPLS_ROUTE, nlh, GFP_KERNEL);
1853 
1854 	return;
1855 errout:
1856 	if (err < 0)
1857 		rtnl_set_sk_err(net, RTNLGRP_MPLS_ROUTE, err);
1858 }
1859 
1860 static int resize_platform_label_table(struct net *net, size_t limit)
1861 {
1862 	size_t size = sizeof(struct mpls_route *) * limit;
1863 	size_t old_limit;
1864 	size_t cp_size;
1865 	struct mpls_route __rcu **labels = NULL, **old;
1866 	struct mpls_route *rt0 = NULL, *rt2 = NULL;
1867 	unsigned index;
1868 
1869 	if (size) {
1870 		labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
1871 		if (!labels)
1872 			labels = vzalloc(size);
1873 
1874 		if (!labels)
1875 			goto nolabels;
1876 	}
1877 
1878 	/* In case the predefined labels need to be populated */
1879 	if (limit > MPLS_LABEL_IPV4NULL) {
1880 		struct net_device *lo = net->loopback_dev;
1881 		rt0 = mpls_rt_alloc(1, lo->addr_len);
1882 		if (!rt0)
1883 			goto nort0;
1884 		RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
1885 		rt0->rt_protocol = RTPROT_KERNEL;
1886 		rt0->rt_payload_type = MPT_IPV4;
1887 		rt0->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
1888 		rt0->rt_nh->nh_via_alen = lo->addr_len;
1889 		memcpy(__mpls_nh_via(rt0, rt0->rt_nh), lo->dev_addr,
1890 		       lo->addr_len);
1891 	}
1892 	if (limit > MPLS_LABEL_IPV6NULL) {
1893 		struct net_device *lo = net->loopback_dev;
1894 		rt2 = mpls_rt_alloc(1, lo->addr_len);
1895 		if (!rt2)
1896 			goto nort2;
1897 		RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
1898 		rt2->rt_protocol = RTPROT_KERNEL;
1899 		rt2->rt_payload_type = MPT_IPV6;
1900 		rt2->rt_nh->nh_via_table = NEIGH_LINK_TABLE;
1901 		rt2->rt_nh->nh_via_alen = lo->addr_len;
1902 		memcpy(__mpls_nh_via(rt2, rt2->rt_nh), lo->dev_addr,
1903 		       lo->addr_len);
1904 	}
1905 
1906 	rtnl_lock();
1907 	/* Remember the original table */
1908 	old = rtnl_dereference(net->mpls.platform_label);
1909 	old_limit = net->mpls.platform_labels;
1910 
1911 	/* Free any labels beyond the new table */
1912 	for (index = limit; index < old_limit; index++)
1913 		mpls_route_update(net, index, NULL, NULL);
1914 
1915 	/* Copy over the old labels */
1916 	cp_size = size;
1917 	if (old_limit < limit)
1918 		cp_size = old_limit * sizeof(struct mpls_route *);
1919 
1920 	memcpy(labels, old, cp_size);
1921 
1922 	/* If needed set the predefined labels */
1923 	if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
1924 	    (limit > MPLS_LABEL_IPV6NULL)) {
1925 		RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
1926 		rt2 = NULL;
1927 	}
1928 
1929 	if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
1930 	    (limit > MPLS_LABEL_IPV4NULL)) {
1931 		RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
1932 		rt0 = NULL;
1933 	}
1934 
1935 	/* Update the global pointers */
1936 	net->mpls.platform_labels = limit;
1937 	rcu_assign_pointer(net->mpls.platform_label, labels);
1938 
1939 	rtnl_unlock();
1940 
1941 	mpls_rt_free(rt2);
1942 	mpls_rt_free(rt0);
1943 
1944 	if (old) {
1945 		synchronize_rcu();
1946 		kvfree(old);
1947 	}
1948 	return 0;
1949 
1950 nort2:
1951 	mpls_rt_free(rt0);
1952 nort0:
1953 	kvfree(labels);
1954 nolabels:
1955 	return -ENOMEM;
1956 }
1957 
1958 static int mpls_platform_labels(struct ctl_table *table, int write,
1959 				void __user *buffer, size_t *lenp, loff_t *ppos)
1960 {
1961 	struct net *net = table->data;
1962 	int platform_labels = net->mpls.platform_labels;
1963 	int ret;
1964 	struct ctl_table tmp = {
1965 		.procname	= table->procname,
1966 		.data		= &platform_labels,
1967 		.maxlen		= sizeof(int),
1968 		.mode		= table->mode,
1969 		.extra1		= &zero,
1970 		.extra2		= &label_limit,
1971 	};
1972 
1973 	ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
1974 
1975 	if (write && ret == 0)
1976 		ret = resize_platform_label_table(net, platform_labels);
1977 
1978 	return ret;
1979 }
1980 
1981 static const struct ctl_table mpls_table[] = {
1982 	{
1983 		.procname	= "platform_labels",
1984 		.data		= NULL,
1985 		.maxlen		= sizeof(int),
1986 		.mode		= 0644,
1987 		.proc_handler	= mpls_platform_labels,
1988 	},
1989 	{ }
1990 };
1991 
1992 static int mpls_net_init(struct net *net)
1993 {
1994 	struct ctl_table *table;
1995 
1996 	net->mpls.platform_labels = 0;
1997 	net->mpls.platform_label = NULL;
1998 
1999 	table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
2000 	if (table == NULL)
2001 		return -ENOMEM;
2002 
2003 	table[0].data = net;
2004 	net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
2005 	if (net->mpls.ctl == NULL) {
2006 		kfree(table);
2007 		return -ENOMEM;
2008 	}
2009 
2010 	return 0;
2011 }
2012 
2013 static void mpls_net_exit(struct net *net)
2014 {
2015 	struct mpls_route __rcu **platform_label;
2016 	size_t platform_labels;
2017 	struct ctl_table *table;
2018 	unsigned int index;
2019 
2020 	table = net->mpls.ctl->ctl_table_arg;
2021 	unregister_net_sysctl_table(net->mpls.ctl);
2022 	kfree(table);
2023 
2024 	/* An rcu grace period has passed since there was a device in
2025 	 * the network namespace (and thus the last in flight packet)
2026 	 * left this network namespace.  This is because
2027 	 * unregister_netdevice_many and netdev_run_todo has completed
2028 	 * for each network device that was in this network namespace.
2029 	 *
2030 	 * As such no additional rcu synchronization is necessary when
2031 	 * freeing the platform_label table.
2032 	 */
2033 	rtnl_lock();
2034 	platform_label = rtnl_dereference(net->mpls.platform_label);
2035 	platform_labels = net->mpls.platform_labels;
2036 	for (index = 0; index < platform_labels; index++) {
2037 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
2038 		RCU_INIT_POINTER(platform_label[index], NULL);
2039 		mpls_notify_route(net, index, rt, NULL, NULL);
2040 		mpls_rt_free(rt);
2041 	}
2042 	rtnl_unlock();
2043 
2044 	kvfree(platform_label);
2045 }
2046 
2047 static struct pernet_operations mpls_net_ops = {
2048 	.init = mpls_net_init,
2049 	.exit = mpls_net_exit,
2050 };
2051 
2052 static struct rtnl_af_ops mpls_af_ops __read_mostly = {
2053 	.family		   = AF_MPLS,
2054 	.fill_stats_af	   = mpls_fill_stats_af,
2055 	.get_stats_af_size = mpls_get_stats_af_size,
2056 };
2057 
2058 static int __init mpls_init(void)
2059 {
2060 	int err;
2061 
2062 	BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
2063 
2064 	err = register_pernet_subsys(&mpls_net_ops);
2065 	if (err)
2066 		goto out;
2067 
2068 	err = register_netdevice_notifier(&mpls_dev_notifier);
2069 	if (err)
2070 		goto out_unregister_pernet;
2071 
2072 	dev_add_pack(&mpls_packet_type);
2073 
2074 	rtnl_af_register(&mpls_af_ops);
2075 
2076 	rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
2077 	rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
2078 	rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
2079 	rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf,
2080 		      mpls_netconf_dump_devconf, NULL);
2081 	err = 0;
2082 out:
2083 	return err;
2084 
2085 out_unregister_pernet:
2086 	unregister_pernet_subsys(&mpls_net_ops);
2087 	goto out;
2088 }
2089 module_init(mpls_init);
2090 
2091 static void __exit mpls_exit(void)
2092 {
2093 	rtnl_unregister_all(PF_MPLS);
2094 	rtnl_af_unregister(&mpls_af_ops);
2095 	dev_remove_pack(&mpls_packet_type);
2096 	unregister_netdevice_notifier(&mpls_dev_notifier);
2097 	unregister_pernet_subsys(&mpls_net_ops);
2098 }
2099 module_exit(mpls_exit);
2100 
2101 MODULE_DESCRIPTION("MultiProtocol Label Switching");
2102 MODULE_LICENSE("GPL v2");
2103 MODULE_ALIAS_NETPROTO(PF_MPLS);
2104