xref: /openbmc/linux/drivers/net/bareudp.c (revision 61761f08e361609fd611d326b21408a058138a65)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Bareudp: UDP  tunnel encasulation for different Payload types like
3  * MPLS, NSH, IP, etc.
4  * Copyright (c) 2019 Nokia, Inc.
5  * Authors:  Martin Varghese, <martin.varghese@nokia.com>
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/hash.h>
14 #include <net/dst_metadata.h>
15 #include <net/gro_cells.h>
16 #include <net/rtnetlink.h>
17 #include <net/protocol.h>
18 #include <net/ip6_tunnel.h>
19 #include <net/ip_tunnels.h>
20 #include <net/udp_tunnel.h>
21 #include <net/bareudp.h>
22 
23 #define BAREUDP_BASE_HLEN sizeof(struct udphdr)
24 #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
25 			   sizeof(struct udphdr))
26 #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
27 			   sizeof(struct udphdr))
28 
29 static bool log_ecn_error = true;
30 module_param(log_ecn_error, bool, 0644);
31 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
32 
33 /* per-network namespace private data for this module */
34 
35 static unsigned int bareudp_net_id;
36 
37 struct bareudp_net {
38 	struct list_head        bareudp_list;
39 };
40 
41 struct bareudp_conf {
42 	__be16 ethertype;
43 	__be16 port;
44 	u16 sport_min;
45 	bool multi_proto_mode;
46 };
47 
48 /* Pseudo network device */
49 struct bareudp_dev {
50 	struct net         *net;        /* netns for packet i/o */
51 	struct net_device  *dev;        /* netdev for bareudp tunnel */
52 	__be16		   ethertype;
53 	__be16             port;
54 	u16	           sport_min;
55 	bool               multi_proto_mode;
56 	struct socket      __rcu *sock;
57 	struct list_head   next;        /* bareudp node  on namespace list */
58 	struct gro_cells   gro_cells;
59 };
60 
61 static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
62 {
63 	struct metadata_dst *tun_dst = NULL;
64 	struct bareudp_dev *bareudp;
65 	unsigned short family;
66 	unsigned int len;
67 	__be16 proto;
68 	void *oiph;
69 	int err;
70 	int nh;
71 
72 	bareudp = rcu_dereference_sk_user_data(sk);
73 	if (!bareudp)
74 		goto drop;
75 
76 	if (skb->protocol ==  htons(ETH_P_IP))
77 		family = AF_INET;
78 	else
79 		family = AF_INET6;
80 
81 	if (bareudp->ethertype == htons(ETH_P_IP)) {
82 		__u8 ipversion;
83 
84 		if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
85 				  sizeof(ipversion))) {
86 			DEV_STATS_INC(bareudp->dev, rx_dropped);
87 			goto drop;
88 		}
89 		ipversion >>= 4;
90 
91 		if (ipversion == 4) {
92 			proto = htons(ETH_P_IP);
93 		} else if (ipversion == 6 && bareudp->multi_proto_mode) {
94 			proto = htons(ETH_P_IPV6);
95 		} else {
96 			DEV_STATS_INC(bareudp->dev, rx_dropped);
97 			goto drop;
98 		}
99 	} else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
100 		struct iphdr *tunnel_hdr;
101 
102 		tunnel_hdr = (struct iphdr *)skb_network_header(skb);
103 		if (tunnel_hdr->version == 4) {
104 			if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
105 				proto = bareudp->ethertype;
106 			} else if (bareudp->multi_proto_mode &&
107 				   ipv4_is_multicast(tunnel_hdr->daddr)) {
108 				proto = htons(ETH_P_MPLS_MC);
109 			} else {
110 				DEV_STATS_INC(bareudp->dev, rx_dropped);
111 				goto drop;
112 			}
113 		} else {
114 			int addr_type;
115 			struct ipv6hdr *tunnel_hdr_v6;
116 
117 			tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
118 			addr_type =
119 			ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
120 			if (!(addr_type & IPV6_ADDR_MULTICAST)) {
121 				proto = bareudp->ethertype;
122 			} else if (bareudp->multi_proto_mode &&
123 				   (addr_type & IPV6_ADDR_MULTICAST)) {
124 				proto = htons(ETH_P_MPLS_MC);
125 			} else {
126 				DEV_STATS_INC(bareudp->dev, rx_dropped);
127 				goto drop;
128 			}
129 		}
130 	} else {
131 		proto = bareudp->ethertype;
132 	}
133 
134 	if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
135 				 proto,
136 				 !net_eq(bareudp->net,
137 				 dev_net(bareudp->dev)))) {
138 		DEV_STATS_INC(bareudp->dev, rx_dropped);
139 		goto drop;
140 	}
141 	tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
142 	if (!tun_dst) {
143 		DEV_STATS_INC(bareudp->dev, rx_dropped);
144 		goto drop;
145 	}
146 	skb_dst_set(skb, &tun_dst->dst);
147 	skb->dev = bareudp->dev;
148 	skb_reset_mac_header(skb);
149 
150 	/* Save offset of outer header relative to skb->head,
151 	 * because we are going to reset the network header to the inner header
152 	 * and might change skb->head.
153 	 */
154 	nh = skb_network_header(skb) - skb->head;
155 
156 	skb_reset_network_header(skb);
157 
158 	if (!pskb_inet_may_pull(skb)) {
159 		DEV_STATS_INC(bareudp->dev, rx_length_errors);
160 		DEV_STATS_INC(bareudp->dev, rx_errors);
161 		goto drop;
162 	}
163 
164 	/* Get the outer header. */
165 	oiph = skb->head + nh;
166 
167 	if (!ipv6_mod_enabled() || family == AF_INET)
168 		err = IP_ECN_decapsulate(oiph, skb);
169 	else
170 		err = IP6_ECN_decapsulate(oiph, skb);
171 
172 	if (unlikely(err)) {
173 		if (log_ecn_error) {
174 			if  (!ipv6_mod_enabled() || family == AF_INET)
175 				net_info_ratelimited("non-ECT from %pI4 "
176 						     "with TOS=%#x\n",
177 						     &((struct iphdr *)oiph)->saddr,
178 						     ((struct iphdr *)oiph)->tos);
179 			else
180 				net_info_ratelimited("non-ECT from %pI6\n",
181 						     &((struct ipv6hdr *)oiph)->saddr);
182 		}
183 		if (err > 1) {
184 			DEV_STATS_INC(bareudp->dev, rx_frame_errors);
185 			DEV_STATS_INC(bareudp->dev, rx_errors);
186 			goto drop;
187 		}
188 	}
189 
190 	len = skb->len;
191 	err = gro_cells_receive(&bareudp->gro_cells, skb);
192 	if (likely(err == NET_RX_SUCCESS))
193 		dev_sw_netstats_rx_add(bareudp->dev, len);
194 
195 	return 0;
196 drop:
197 	/* Consume bad packet */
198 	kfree_skb(skb);
199 
200 	return 0;
201 }
202 
203 static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
204 {
205 	return 0;
206 }
207 
208 static int bareudp_init(struct net_device *dev)
209 {
210 	struct bareudp_dev *bareudp = netdev_priv(dev);
211 	int err;
212 
213 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
214 	if (!dev->tstats)
215 		return -ENOMEM;
216 
217 	err = gro_cells_init(&bareudp->gro_cells, dev);
218 	if (err) {
219 		free_percpu(dev->tstats);
220 		return err;
221 	}
222 	return 0;
223 }
224 
225 static void bareudp_uninit(struct net_device *dev)
226 {
227 	struct bareudp_dev *bareudp = netdev_priv(dev);
228 
229 	gro_cells_destroy(&bareudp->gro_cells);
230 	free_percpu(dev->tstats);
231 }
232 
233 static struct socket *bareudp_create_sock(struct net *net, __be16 port)
234 {
235 	struct udp_port_cfg udp_conf;
236 	struct socket *sock;
237 	int err;
238 
239 	memset(&udp_conf, 0, sizeof(udp_conf));
240 
241 	if (ipv6_mod_enabled())
242 		udp_conf.family = AF_INET6;
243 	else
244 		udp_conf.family = AF_INET;
245 
246 	udp_conf.local_udp_port = port;
247 	/* Open UDP socket */
248 	err = udp_sock_create(net, &udp_conf, &sock);
249 	if (err < 0)
250 		return ERR_PTR(err);
251 
252 	udp_allow_gso(sock->sk);
253 	return sock;
254 }
255 
256 /* Create new listen socket if needed */
257 static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
258 {
259 	struct udp_tunnel_sock_cfg tunnel_cfg;
260 	struct socket *sock;
261 
262 	sock = bareudp_create_sock(bareudp->net, port);
263 	if (IS_ERR(sock))
264 		return PTR_ERR(sock);
265 
266 	/* Mark socket as an encapsulation socket */
267 	memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
268 	tunnel_cfg.sk_user_data = bareudp;
269 	tunnel_cfg.encap_type = 1;
270 	tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
271 	tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
272 	tunnel_cfg.encap_destroy = NULL;
273 	setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
274 
275 	rcu_assign_pointer(bareudp->sock, sock);
276 	return 0;
277 }
278 
279 static int bareudp_open(struct net_device *dev)
280 {
281 	struct bareudp_dev *bareudp = netdev_priv(dev);
282 	int ret = 0;
283 
284 	ret =  bareudp_socket_create(bareudp, bareudp->port);
285 	return ret;
286 }
287 
288 static void bareudp_sock_release(struct bareudp_dev *bareudp)
289 {
290 	struct socket *sock;
291 
292 	sock = bareudp->sock;
293 	rcu_assign_pointer(bareudp->sock, NULL);
294 	synchronize_net();
295 	udp_tunnel_sock_release(sock);
296 }
297 
298 static int bareudp_stop(struct net_device *dev)
299 {
300 	struct bareudp_dev *bareudp = netdev_priv(dev);
301 
302 	bareudp_sock_release(bareudp);
303 	return 0;
304 }
305 
306 static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
307 			    struct bareudp_dev *bareudp,
308 			    const struct ip_tunnel_info *info)
309 {
310 	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
311 	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
312 	struct socket *sock = rcu_dereference(bareudp->sock);
313 	bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
314 	const struct ip_tunnel_key *key = &info->key;
315 	struct rtable *rt;
316 	__be16 sport, df;
317 	int min_headroom;
318 	__u8 tos, ttl;
319 	__be32 saddr;
320 	int err;
321 
322 	if (!sock)
323 		return -ESHUTDOWN;
324 
325 	rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info,
326 				    IPPROTO_UDP, use_cache);
327 
328 	if (IS_ERR(rt))
329 		return PTR_ERR(rt);
330 
331 	skb_tunnel_check_pmtu(skb, &rt->dst,
332 			      BAREUDP_IPV4_HLEN + info->options_len, false);
333 
334 	sport = udp_flow_src_port(bareudp->net, skb,
335 				  bareudp->sport_min, USHRT_MAX,
336 				  true);
337 	tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
338 	ttl = key->ttl;
339 	df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
340 	skb_scrub_packet(skb, xnet);
341 
342 	err = -ENOSPC;
343 	if (!skb_pull(skb, skb_network_offset(skb)))
344 		goto free_dst;
345 
346 	min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
347 		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
348 
349 	err = skb_cow_head(skb, min_headroom);
350 	if (unlikely(err))
351 		goto free_dst;
352 
353 	err = udp_tunnel_handle_offloads(skb, udp_sum);
354 	if (err)
355 		goto free_dst;
356 
357 	skb_set_inner_protocol(skb, bareudp->ethertype);
358 	udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
359 			    tos, ttl, df, sport, bareudp->port,
360 			    !net_eq(bareudp->net, dev_net(bareudp->dev)),
361 			    !(info->key.tun_flags & TUNNEL_CSUM));
362 	return 0;
363 
364 free_dst:
365 	dst_release(&rt->dst);
366 	return err;
367 }
368 
369 static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
370 			     struct bareudp_dev *bareudp,
371 			     const struct ip_tunnel_info *info)
372 {
373 	bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
374 	bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
375 	struct socket *sock  = rcu_dereference(bareudp->sock);
376 	bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
377 	const struct ip_tunnel_key *key = &info->key;
378 	struct dst_entry *dst = NULL;
379 	struct in6_addr saddr, daddr;
380 	int min_headroom;
381 	__u8 prio, ttl;
382 	__be16 sport;
383 	int err;
384 
385 	if (!sock)
386 		return -ESHUTDOWN;
387 
388 	dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
389 				    IPPROTO_UDP, use_cache);
390 	if (IS_ERR(dst))
391 		return PTR_ERR(dst);
392 
393 	skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
394 			      false);
395 
396 	sport = udp_flow_src_port(bareudp->net, skb,
397 				  bareudp->sport_min, USHRT_MAX,
398 				  true);
399 	prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
400 	ttl = key->ttl;
401 
402 	skb_scrub_packet(skb, xnet);
403 
404 	err = -ENOSPC;
405 	if (!skb_pull(skb, skb_network_offset(skb)))
406 		goto free_dst;
407 
408 	min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
409 		BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
410 
411 	err = skb_cow_head(skb, min_headroom);
412 	if (unlikely(err))
413 		goto free_dst;
414 
415 	err = udp_tunnel_handle_offloads(skb, udp_sum);
416 	if (err)
417 		goto free_dst;
418 
419 	daddr = info->key.u.ipv6.dst;
420 	udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
421 			     &saddr, &daddr, prio, ttl,
422 			     info->key.label, sport, bareudp->port,
423 			     !(info->key.tun_flags & TUNNEL_CSUM));
424 	return 0;
425 
426 free_dst:
427 	dst_release(dst);
428 	return err;
429 }
430 
431 static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
432 {
433 	if (bareudp->ethertype == proto)
434 		return true;
435 
436 	if (!bareudp->multi_proto_mode)
437 		return false;
438 
439 	if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
440 	    proto == htons(ETH_P_MPLS_MC))
441 		return true;
442 
443 	if (bareudp->ethertype == htons(ETH_P_IP) &&
444 	    proto == htons(ETH_P_IPV6))
445 		return true;
446 
447 	return false;
448 }
449 
450 static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
451 {
452 	struct bareudp_dev *bareudp = netdev_priv(dev);
453 	struct ip_tunnel_info *info = NULL;
454 	int err;
455 
456 	if (!bareudp_proto_valid(bareudp, skb->protocol)) {
457 		err = -EINVAL;
458 		goto tx_error;
459 	}
460 
461 	info = skb_tunnel_info(skb);
462 	if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
463 		err = -EINVAL;
464 		goto tx_error;
465 	}
466 
467 	rcu_read_lock();
468 	if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
469 		err = bareudp6_xmit_skb(skb, dev, bareudp, info);
470 	else
471 		err = bareudp_xmit_skb(skb, dev, bareudp, info);
472 
473 	rcu_read_unlock();
474 
475 	if (likely(!err))
476 		return NETDEV_TX_OK;
477 tx_error:
478 	dev_kfree_skb(skb);
479 
480 	if (err == -ELOOP)
481 		DEV_STATS_INC(dev, collisions);
482 	else if (err == -ENETUNREACH)
483 		DEV_STATS_INC(dev, tx_carrier_errors);
484 
485 	DEV_STATS_INC(dev, tx_errors);
486 	return NETDEV_TX_OK;
487 }
488 
489 static int bareudp_fill_metadata_dst(struct net_device *dev,
490 				     struct sk_buff *skb)
491 {
492 	struct ip_tunnel_info *info = skb_tunnel_info(skb);
493 	struct bareudp_dev *bareudp = netdev_priv(dev);
494 	bool use_cache;
495 
496 	use_cache = ip_tunnel_dst_cache_usable(skb, info);
497 
498 	if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
499 		struct rtable *rt;
500 		__be32 saddr;
501 
502 		rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr,
503 					    info, IPPROTO_UDP, use_cache);
504 		if (IS_ERR(rt))
505 			return PTR_ERR(rt);
506 
507 		ip_rt_put(rt);
508 		info->key.u.ipv4.src = saddr;
509 	} else if (ip_tunnel_info_af(info) == AF_INET6) {
510 		struct dst_entry *dst;
511 		struct in6_addr saddr;
512 		struct socket *sock = rcu_dereference(bareudp->sock);
513 
514 		dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
515 					    &saddr, info, IPPROTO_UDP,
516 					    use_cache);
517 		if (IS_ERR(dst))
518 			return PTR_ERR(dst);
519 
520 		dst_release(dst);
521 		info->key.u.ipv6.src = saddr;
522 	} else {
523 		return -EINVAL;
524 	}
525 
526 	info->key.tp_src = udp_flow_src_port(bareudp->net, skb,
527 					     bareudp->sport_min,
528 			USHRT_MAX, true);
529 	info->key.tp_dst = bareudp->port;
530 	return 0;
531 }
532 
533 static const struct net_device_ops bareudp_netdev_ops = {
534 	.ndo_init               = bareudp_init,
535 	.ndo_uninit             = bareudp_uninit,
536 	.ndo_open               = bareudp_open,
537 	.ndo_stop               = bareudp_stop,
538 	.ndo_start_xmit         = bareudp_xmit,
539 	.ndo_get_stats64        = dev_get_tstats64,
540 	.ndo_fill_metadata_dst  = bareudp_fill_metadata_dst,
541 };
542 
543 static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
544 	[IFLA_BAREUDP_PORT]                = { .type = NLA_U16 },
545 	[IFLA_BAREUDP_ETHERTYPE]	   = { .type = NLA_U16 },
546 	[IFLA_BAREUDP_SRCPORT_MIN]         = { .type = NLA_U16 },
547 	[IFLA_BAREUDP_MULTIPROTO_MODE]     = { .type = NLA_FLAG },
548 };
549 
550 /* Info for udev, that this is a virtual tunnel endpoint */
551 static const struct device_type bareudp_type = {
552 	.name = "bareudp",
553 };
554 
555 /* Initialize the device structure. */
556 static void bareudp_setup(struct net_device *dev)
557 {
558 	dev->netdev_ops = &bareudp_netdev_ops;
559 	dev->needs_free_netdev = true;
560 	SET_NETDEV_DEVTYPE(dev, &bareudp_type);
561 	dev->features    |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
562 	dev->features    |= NETIF_F_RXCSUM;
563 	dev->features    |= NETIF_F_LLTX;
564 	dev->features    |= NETIF_F_GSO_SOFTWARE;
565 	dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST;
566 	dev->hw_features |= NETIF_F_RXCSUM;
567 	dev->hw_features |= NETIF_F_GSO_SOFTWARE;
568 	dev->hard_header_len = 0;
569 	dev->addr_len = 0;
570 	dev->mtu = ETH_DATA_LEN;
571 	dev->min_mtu = IPV4_MIN_MTU;
572 	dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
573 	dev->type = ARPHRD_NONE;
574 	netif_keep_dst(dev);
575 	dev->priv_flags |= IFF_NO_QUEUE;
576 	dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
577 }
578 
579 static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
580 			    struct netlink_ext_ack *extack)
581 {
582 	if (!data) {
583 		NL_SET_ERR_MSG(extack,
584 			       "Not enough attributes provided to perform the operation");
585 		return -EINVAL;
586 	}
587 	return 0;
588 }
589 
590 static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
591 			struct netlink_ext_ack *extack)
592 {
593 	memset(conf, 0, sizeof(*conf));
594 
595 	if (!data[IFLA_BAREUDP_PORT]) {
596 		NL_SET_ERR_MSG(extack, "port not specified");
597 		return -EINVAL;
598 	}
599 	if (!data[IFLA_BAREUDP_ETHERTYPE]) {
600 		NL_SET_ERR_MSG(extack, "ethertype not specified");
601 		return -EINVAL;
602 	}
603 
604 	conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
605 	conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
606 
607 	if (data[IFLA_BAREUDP_SRCPORT_MIN])
608 		conf->sport_min =  nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
609 
610 	if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
611 		conf->multi_proto_mode = true;
612 
613 	return 0;
614 }
615 
616 static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
617 					    const struct bareudp_conf *conf)
618 {
619 	struct bareudp_dev *bareudp, *t = NULL;
620 
621 	list_for_each_entry(bareudp, &bn->bareudp_list, next) {
622 		if (conf->port == bareudp->port)
623 			t = bareudp;
624 	}
625 	return t;
626 }
627 
628 static int bareudp_configure(struct net *net, struct net_device *dev,
629 			     struct bareudp_conf *conf,
630 			     struct netlink_ext_ack *extack)
631 {
632 	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
633 	struct bareudp_dev *t, *bareudp = netdev_priv(dev);
634 	int err;
635 
636 	bareudp->net = net;
637 	bareudp->dev = dev;
638 	t = bareudp_find_dev(bn, conf);
639 	if (t) {
640 		NL_SET_ERR_MSG(extack, "Another bareudp device using the same port already exists");
641 		return -EBUSY;
642 	}
643 
644 	if (conf->multi_proto_mode &&
645 	    (conf->ethertype != htons(ETH_P_MPLS_UC) &&
646 	     conf->ethertype != htons(ETH_P_IP))) {
647 		NL_SET_ERR_MSG(extack, "Cannot set multiproto mode for this ethertype (only IPv4 and unicast MPLS are supported)");
648 		return -EINVAL;
649 	}
650 
651 	bareudp->port = conf->port;
652 	bareudp->ethertype = conf->ethertype;
653 	bareudp->sport_min = conf->sport_min;
654 	bareudp->multi_proto_mode = conf->multi_proto_mode;
655 
656 	err = register_netdevice(dev);
657 	if (err)
658 		return err;
659 
660 	list_add(&bareudp->next, &bn->bareudp_list);
661 	return 0;
662 }
663 
664 static int bareudp_link_config(struct net_device *dev,
665 			       struct nlattr *tb[])
666 {
667 	int err;
668 
669 	if (tb[IFLA_MTU]) {
670 		err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
671 		if (err)
672 			return err;
673 	}
674 	return 0;
675 }
676 
677 static void bareudp_dellink(struct net_device *dev, struct list_head *head)
678 {
679 	struct bareudp_dev *bareudp = netdev_priv(dev);
680 
681 	list_del(&bareudp->next);
682 	unregister_netdevice_queue(dev, head);
683 }
684 
685 static int bareudp_newlink(struct net *net, struct net_device *dev,
686 			   struct nlattr *tb[], struct nlattr *data[],
687 			   struct netlink_ext_ack *extack)
688 {
689 	struct bareudp_conf conf;
690 	int err;
691 
692 	err = bareudp2info(data, &conf, extack);
693 	if (err)
694 		return err;
695 
696 	err = bareudp_configure(net, dev, &conf, extack);
697 	if (err)
698 		return err;
699 
700 	err = bareudp_link_config(dev, tb);
701 	if (err)
702 		goto err_unconfig;
703 
704 	return 0;
705 
706 err_unconfig:
707 	bareudp_dellink(dev, NULL);
708 	return err;
709 }
710 
711 static size_t bareudp_get_size(const struct net_device *dev)
712 {
713 	return  nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_PORT */
714 		nla_total_size(sizeof(__be16)) +  /* IFLA_BAREUDP_ETHERTYPE */
715 		nla_total_size(sizeof(__u16))  +  /* IFLA_BAREUDP_SRCPORT_MIN */
716 		nla_total_size(0)              +  /* IFLA_BAREUDP_MULTIPROTO_MODE */
717 		0;
718 }
719 
720 static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
721 {
722 	struct bareudp_dev *bareudp = netdev_priv(dev);
723 
724 	if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
725 		goto nla_put_failure;
726 	if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
727 		goto nla_put_failure;
728 	if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
729 		goto nla_put_failure;
730 	if (bareudp->multi_proto_mode &&
731 	    nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
732 		goto nla_put_failure;
733 
734 	return 0;
735 
736 nla_put_failure:
737 	return -EMSGSIZE;
738 }
739 
740 static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
741 	.kind           = "bareudp",
742 	.maxtype        = IFLA_BAREUDP_MAX,
743 	.policy         = bareudp_policy,
744 	.priv_size      = sizeof(struct bareudp_dev),
745 	.setup          = bareudp_setup,
746 	.validate       = bareudp_validate,
747 	.newlink        = bareudp_newlink,
748 	.dellink        = bareudp_dellink,
749 	.get_size       = bareudp_get_size,
750 	.fill_info      = bareudp_fill_info,
751 };
752 
753 static __net_init int bareudp_init_net(struct net *net)
754 {
755 	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
756 
757 	INIT_LIST_HEAD(&bn->bareudp_list);
758 	return 0;
759 }
760 
761 static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
762 {
763 	struct bareudp_net *bn = net_generic(net, bareudp_net_id);
764 	struct bareudp_dev *bareudp, *next;
765 
766 	list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
767 		unregister_netdevice_queue(bareudp->dev, head);
768 }
769 
770 static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
771 {
772 	struct net *net;
773 	LIST_HEAD(list);
774 
775 	rtnl_lock();
776 	list_for_each_entry(net, net_list, exit_list)
777 		bareudp_destroy_tunnels(net, &list);
778 
779 	/* unregister the devices gathered above */
780 	unregister_netdevice_many(&list);
781 	rtnl_unlock();
782 }
783 
784 static struct pernet_operations bareudp_net_ops = {
785 	.init = bareudp_init_net,
786 	.exit_batch = bareudp_exit_batch_net,
787 	.id   = &bareudp_net_id,
788 	.size = sizeof(struct bareudp_net),
789 };
790 
791 static int __init bareudp_init_module(void)
792 {
793 	int rc;
794 
795 	rc = register_pernet_subsys(&bareudp_net_ops);
796 	if (rc)
797 		goto out1;
798 
799 	rc = rtnl_link_register(&bareudp_link_ops);
800 	if (rc)
801 		goto out2;
802 
803 	return 0;
804 out2:
805 	unregister_pernet_subsys(&bareudp_net_ops);
806 out1:
807 	return rc;
808 }
809 late_initcall(bareudp_init_module);
810 
811 static void __exit bareudp_cleanup_module(void)
812 {
813 	rtnl_link_unregister(&bareudp_link_ops);
814 	unregister_pernet_subsys(&bareudp_net_ops);
815 }
816 module_exit(bareudp_cleanup_module);
817 
818 MODULE_ALIAS_RTNL_LINK("bareudp");
819 MODULE_LICENSE("GPL");
820 MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
821 MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");
822