1 // SPDX-License-Identifier: GPL-2.0 2 /* Bareudp: UDP tunnel encasulation for different Payload types like 3 * MPLS, NSH, IP, etc. 4 * Copyright (c) 2019 Nokia, Inc. 5 * Authors: Martin Varghese, <martin.varghese@nokia.com> 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/etherdevice.h> 13 #include <linux/hash.h> 14 #include <net/dst_metadata.h> 15 #include <net/gro_cells.h> 16 #include <net/rtnetlink.h> 17 #include <net/protocol.h> 18 #include <net/ip6_tunnel.h> 19 #include <net/ip_tunnels.h> 20 #include <net/udp_tunnel.h> 21 #include <net/bareudp.h> 22 23 #define BAREUDP_BASE_HLEN sizeof(struct udphdr) 24 #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \ 25 sizeof(struct udphdr)) 26 #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \ 27 sizeof(struct udphdr)) 28 29 static bool log_ecn_error = true; 30 module_param(log_ecn_error, bool, 0644); 31 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 32 33 /* per-network namespace private data for this module */ 34 35 static unsigned int bareudp_net_id; 36 37 struct bareudp_net { 38 struct list_head bareudp_list; 39 }; 40 41 /* Pseudo network device */ 42 struct bareudp_dev { 43 struct net *net; /* netns for packet i/o */ 44 struct net_device *dev; /* netdev for bareudp tunnel */ 45 __be16 ethertype; 46 __be16 port; 47 u16 sport_min; 48 bool multi_proto_mode; 49 struct socket __rcu *sock; 50 struct list_head next; /* bareudp node on namespace list */ 51 struct gro_cells gro_cells; 52 }; 53 54 static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) 55 { 56 struct metadata_dst *tun_dst = NULL; 57 struct bareudp_dev *bareudp; 58 unsigned short family; 59 unsigned int len; 60 __be16 proto; 61 void *oiph; 62 int err; 63 64 bareudp = rcu_dereference_sk_user_data(sk); 65 if (!bareudp) 66 goto drop; 67 68 if (skb->protocol == htons(ETH_P_IP)) 69 family = AF_INET; 70 else 71 family = AF_INET6; 72 73 if (bareudp->ethertype == htons(ETH_P_IP)) { 74 __u8 ipversion; 75 76 if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion, 77 sizeof(ipversion))) { 78 bareudp->dev->stats.rx_dropped++; 79 goto drop; 80 } 81 ipversion >>= 4; 82 83 if (ipversion == 4) { 84 proto = htons(ETH_P_IP); 85 } else if (ipversion == 6 && bareudp->multi_proto_mode) { 86 proto = htons(ETH_P_IPV6); 87 } else { 88 bareudp->dev->stats.rx_dropped++; 89 goto drop; 90 } 91 } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) { 92 struct iphdr *tunnel_hdr; 93 94 tunnel_hdr = (struct iphdr *)skb_network_header(skb); 95 if (tunnel_hdr->version == 4) { 96 if (!ipv4_is_multicast(tunnel_hdr->daddr)) { 97 proto = bareudp->ethertype; 98 } else if (bareudp->multi_proto_mode && 99 ipv4_is_multicast(tunnel_hdr->daddr)) { 100 proto = htons(ETH_P_MPLS_MC); 101 } else { 102 bareudp->dev->stats.rx_dropped++; 103 goto drop; 104 } 105 } else { 106 int addr_type; 107 struct ipv6hdr *tunnel_hdr_v6; 108 109 tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb); 110 addr_type = 111 ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr); 112 if (!(addr_type & IPV6_ADDR_MULTICAST)) { 113 proto = bareudp->ethertype; 114 } else if (bareudp->multi_proto_mode && 115 (addr_type & IPV6_ADDR_MULTICAST)) { 116 proto = htons(ETH_P_MPLS_MC); 117 } else { 118 bareudp->dev->stats.rx_dropped++; 119 goto drop; 120 } 121 } 122 } else { 123 proto = bareudp->ethertype; 124 } 125 126 if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN, 127 proto, 128 !net_eq(bareudp->net, 129 dev_net(bareudp->dev)))) { 130 bareudp->dev->stats.rx_dropped++; 131 goto drop; 132 } 133 tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); 134 if (!tun_dst) { 135 bareudp->dev->stats.rx_dropped++; 136 goto drop; 137 } 138 skb_dst_set(skb, &tun_dst->dst); 139 skb->dev = bareudp->dev; 140 oiph = skb_network_header(skb); 141 skb_reset_network_header(skb); 142 skb_reset_mac_header(skb); 143 144 if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) 145 err = IP_ECN_decapsulate(oiph, skb); 146 else 147 err = IP6_ECN_decapsulate(oiph, skb); 148 149 if (unlikely(err)) { 150 if (log_ecn_error) { 151 if (!IS_ENABLED(CONFIG_IPV6) || family == AF_INET) 152 net_info_ratelimited("non-ECT from %pI4 " 153 "with TOS=%#x\n", 154 &((struct iphdr *)oiph)->saddr, 155 ((struct iphdr *)oiph)->tos); 156 else 157 net_info_ratelimited("non-ECT from %pI6\n", 158 &((struct ipv6hdr *)oiph)->saddr); 159 } 160 if (err > 1) { 161 ++bareudp->dev->stats.rx_frame_errors; 162 ++bareudp->dev->stats.rx_errors; 163 goto drop; 164 } 165 } 166 167 len = skb->len; 168 err = gro_cells_receive(&bareudp->gro_cells, skb); 169 if (likely(err == NET_RX_SUCCESS)) 170 dev_sw_netstats_rx_add(bareudp->dev, len); 171 172 return 0; 173 drop: 174 /* Consume bad packet */ 175 kfree_skb(skb); 176 177 return 0; 178 } 179 180 static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb) 181 { 182 return 0; 183 } 184 185 static int bareudp_init(struct net_device *dev) 186 { 187 struct bareudp_dev *bareudp = netdev_priv(dev); 188 int err; 189 190 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 191 if (!dev->tstats) 192 return -ENOMEM; 193 194 err = gro_cells_init(&bareudp->gro_cells, dev); 195 if (err) { 196 free_percpu(dev->tstats); 197 return err; 198 } 199 return 0; 200 } 201 202 static void bareudp_uninit(struct net_device *dev) 203 { 204 struct bareudp_dev *bareudp = netdev_priv(dev); 205 206 gro_cells_destroy(&bareudp->gro_cells); 207 free_percpu(dev->tstats); 208 } 209 210 static struct socket *bareudp_create_sock(struct net *net, __be16 port) 211 { 212 struct udp_port_cfg udp_conf; 213 struct socket *sock; 214 int err; 215 216 memset(&udp_conf, 0, sizeof(udp_conf)); 217 #if IS_ENABLED(CONFIG_IPV6) 218 udp_conf.family = AF_INET6; 219 #else 220 udp_conf.family = AF_INET; 221 #endif 222 udp_conf.local_udp_port = port; 223 /* Open UDP socket */ 224 err = udp_sock_create(net, &udp_conf, &sock); 225 if (err < 0) 226 return ERR_PTR(err); 227 228 udp_allow_gso(sock->sk); 229 return sock; 230 } 231 232 /* Create new listen socket if needed */ 233 static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port) 234 { 235 struct udp_tunnel_sock_cfg tunnel_cfg; 236 struct socket *sock; 237 238 sock = bareudp_create_sock(bareudp->net, port); 239 if (IS_ERR(sock)) 240 return PTR_ERR(sock); 241 242 /* Mark socket as an encapsulation socket */ 243 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); 244 tunnel_cfg.sk_user_data = bareudp; 245 tunnel_cfg.encap_type = 1; 246 tunnel_cfg.encap_rcv = bareudp_udp_encap_recv; 247 tunnel_cfg.encap_err_lookup = bareudp_err_lookup; 248 tunnel_cfg.encap_destroy = NULL; 249 setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg); 250 251 rcu_assign_pointer(bareudp->sock, sock); 252 return 0; 253 } 254 255 static int bareudp_open(struct net_device *dev) 256 { 257 struct bareudp_dev *bareudp = netdev_priv(dev); 258 int ret = 0; 259 260 ret = bareudp_socket_create(bareudp, bareudp->port); 261 return ret; 262 } 263 264 static void bareudp_sock_release(struct bareudp_dev *bareudp) 265 { 266 struct socket *sock; 267 268 sock = bareudp->sock; 269 rcu_assign_pointer(bareudp->sock, NULL); 270 synchronize_net(); 271 udp_tunnel_sock_release(sock); 272 } 273 274 static int bareudp_stop(struct net_device *dev) 275 { 276 struct bareudp_dev *bareudp = netdev_priv(dev); 277 278 bareudp_sock_release(bareudp); 279 return 0; 280 } 281 282 static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, 283 struct bareudp_dev *bareudp, 284 const struct ip_tunnel_info *info) 285 { 286 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); 287 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 288 struct socket *sock = rcu_dereference(bareudp->sock); 289 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 290 const struct ip_tunnel_key *key = &info->key; 291 struct rtable *rt; 292 __be16 sport, df; 293 int min_headroom; 294 __u8 tos, ttl; 295 __be32 saddr; 296 int err; 297 298 if (!sock) 299 return -ESHUTDOWN; 300 301 rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info, 302 IPPROTO_UDP, use_cache); 303 304 if (IS_ERR(rt)) 305 return PTR_ERR(rt); 306 307 skb_tunnel_check_pmtu(skb, &rt->dst, 308 BAREUDP_IPV4_HLEN + info->options_len, false); 309 310 sport = udp_flow_src_port(bareudp->net, skb, 311 bareudp->sport_min, USHRT_MAX, 312 true); 313 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 314 ttl = key->ttl; 315 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; 316 skb_scrub_packet(skb, xnet); 317 318 err = -ENOSPC; 319 if (!skb_pull(skb, skb_network_offset(skb))) 320 goto free_dst; 321 322 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len + 323 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr); 324 325 err = skb_cow_head(skb, min_headroom); 326 if (unlikely(err)) 327 goto free_dst; 328 329 err = udp_tunnel_handle_offloads(skb, udp_sum); 330 if (err) 331 goto free_dst; 332 333 skb_set_inner_protocol(skb, bareudp->ethertype); 334 udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst, 335 tos, ttl, df, sport, bareudp->port, 336 !net_eq(bareudp->net, dev_net(bareudp->dev)), 337 !(info->key.tun_flags & TUNNEL_CSUM)); 338 return 0; 339 340 free_dst: 341 dst_release(&rt->dst); 342 return err; 343 } 344 345 static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, 346 struct bareudp_dev *bareudp, 347 const struct ip_tunnel_info *info) 348 { 349 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev)); 350 bool use_cache = ip_tunnel_dst_cache_usable(skb, info); 351 struct socket *sock = rcu_dereference(bareudp->sock); 352 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); 353 const struct ip_tunnel_key *key = &info->key; 354 struct dst_entry *dst = NULL; 355 struct in6_addr saddr, daddr; 356 int min_headroom; 357 __u8 prio, ttl; 358 __be16 sport; 359 int err; 360 361 if (!sock) 362 return -ESHUTDOWN; 363 364 dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info, 365 IPPROTO_UDP, use_cache); 366 if (IS_ERR(dst)) 367 return PTR_ERR(dst); 368 369 skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, 370 false); 371 372 sport = udp_flow_src_port(bareudp->net, skb, 373 bareudp->sport_min, USHRT_MAX, 374 true); 375 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); 376 ttl = key->ttl; 377 378 skb_scrub_packet(skb, xnet); 379 380 err = -ENOSPC; 381 if (!skb_pull(skb, skb_network_offset(skb))) 382 goto free_dst; 383 384 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len + 385 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr); 386 387 err = skb_cow_head(skb, min_headroom); 388 if (unlikely(err)) 389 goto free_dst; 390 391 err = udp_tunnel_handle_offloads(skb, udp_sum); 392 if (err) 393 goto free_dst; 394 395 daddr = info->key.u.ipv6.dst; 396 udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev, 397 &saddr, &daddr, prio, ttl, 398 info->key.label, sport, bareudp->port, 399 !(info->key.tun_flags & TUNNEL_CSUM)); 400 return 0; 401 402 free_dst: 403 dst_release(dst); 404 return err; 405 } 406 407 static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto) 408 { 409 if (bareudp->ethertype == proto) 410 return true; 411 412 if (!bareudp->multi_proto_mode) 413 return false; 414 415 if (bareudp->ethertype == htons(ETH_P_MPLS_UC) && 416 proto == htons(ETH_P_MPLS_MC)) 417 return true; 418 419 if (bareudp->ethertype == htons(ETH_P_IP) && 420 proto == htons(ETH_P_IPV6)) 421 return true; 422 423 return false; 424 } 425 426 static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev) 427 { 428 struct bareudp_dev *bareudp = netdev_priv(dev); 429 struct ip_tunnel_info *info = NULL; 430 int err; 431 432 if (!bareudp_proto_valid(bareudp, skb->protocol)) { 433 err = -EINVAL; 434 goto tx_error; 435 } 436 437 info = skb_tunnel_info(skb); 438 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { 439 err = -EINVAL; 440 goto tx_error; 441 } 442 443 rcu_read_lock(); 444 if (IS_ENABLED(CONFIG_IPV6) && info->mode & IP_TUNNEL_INFO_IPV6) 445 err = bareudp6_xmit_skb(skb, dev, bareudp, info); 446 else 447 err = bareudp_xmit_skb(skb, dev, bareudp, info); 448 449 rcu_read_unlock(); 450 451 if (likely(!err)) 452 return NETDEV_TX_OK; 453 tx_error: 454 dev_kfree_skb(skb); 455 456 if (err == -ELOOP) 457 dev->stats.collisions++; 458 else if (err == -ENETUNREACH) 459 dev->stats.tx_carrier_errors++; 460 461 dev->stats.tx_errors++; 462 return NETDEV_TX_OK; 463 } 464 465 static int bareudp_fill_metadata_dst(struct net_device *dev, 466 struct sk_buff *skb) 467 { 468 struct ip_tunnel_info *info = skb_tunnel_info(skb); 469 struct bareudp_dev *bareudp = netdev_priv(dev); 470 bool use_cache; 471 472 use_cache = ip_tunnel_dst_cache_usable(skb, info); 473 474 if (!IS_ENABLED(CONFIG_IPV6) || ip_tunnel_info_af(info) == AF_INET) { 475 struct rtable *rt; 476 __be32 saddr; 477 478 rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, 479 info, IPPROTO_UDP, use_cache); 480 if (IS_ERR(rt)) 481 return PTR_ERR(rt); 482 483 ip_rt_put(rt); 484 info->key.u.ipv4.src = saddr; 485 } else if (ip_tunnel_info_af(info) == AF_INET6) { 486 struct dst_entry *dst; 487 struct in6_addr saddr; 488 struct socket *sock = rcu_dereference(bareudp->sock); 489 490 dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, 491 &saddr, info, IPPROTO_UDP, 492 use_cache); 493 if (IS_ERR(dst)) 494 return PTR_ERR(dst); 495 496 dst_release(dst); 497 info->key.u.ipv6.src = saddr; 498 } else { 499 return -EINVAL; 500 } 501 502 info->key.tp_src = udp_flow_src_port(bareudp->net, skb, 503 bareudp->sport_min, 504 USHRT_MAX, true); 505 info->key.tp_dst = bareudp->port; 506 return 0; 507 } 508 509 static const struct net_device_ops bareudp_netdev_ops = { 510 .ndo_init = bareudp_init, 511 .ndo_uninit = bareudp_uninit, 512 .ndo_open = bareudp_open, 513 .ndo_stop = bareudp_stop, 514 .ndo_start_xmit = bareudp_xmit, 515 .ndo_get_stats64 = dev_get_tstats64, 516 .ndo_fill_metadata_dst = bareudp_fill_metadata_dst, 517 }; 518 519 static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = { 520 [IFLA_BAREUDP_PORT] = { .type = NLA_U16 }, 521 [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 }, 522 [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 }, 523 [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG }, 524 }; 525 526 /* Info for udev, that this is a virtual tunnel endpoint */ 527 static const struct device_type bareudp_type = { 528 .name = "bareudp", 529 }; 530 531 /* Initialize the device structure. */ 532 static void bareudp_setup(struct net_device *dev) 533 { 534 dev->netdev_ops = &bareudp_netdev_ops; 535 dev->needs_free_netdev = true; 536 SET_NETDEV_DEVTYPE(dev, &bareudp_type); 537 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 538 dev->features |= NETIF_F_RXCSUM; 539 dev->features |= NETIF_F_LLTX; 540 dev->features |= NETIF_F_GSO_SOFTWARE; 541 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_FRAGLIST; 542 dev->hw_features |= NETIF_F_RXCSUM; 543 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 544 dev->hard_header_len = 0; 545 dev->addr_len = 0; 546 dev->mtu = ETH_DATA_LEN; 547 dev->min_mtu = IPV4_MIN_MTU; 548 dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN; 549 dev->type = ARPHRD_NONE; 550 netif_keep_dst(dev); 551 dev->priv_flags |= IFF_NO_QUEUE; 552 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; 553 } 554 555 static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[], 556 struct netlink_ext_ack *extack) 557 { 558 if (!data) { 559 NL_SET_ERR_MSG(extack, 560 "Not enough attributes provided to perform the operation"); 561 return -EINVAL; 562 } 563 return 0; 564 } 565 566 static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, 567 struct netlink_ext_ack *extack) 568 { 569 memset(conf, 0, sizeof(*conf)); 570 571 if (!data[IFLA_BAREUDP_PORT]) { 572 NL_SET_ERR_MSG(extack, "port not specified"); 573 return -EINVAL; 574 } 575 if (!data[IFLA_BAREUDP_ETHERTYPE]) { 576 NL_SET_ERR_MSG(extack, "ethertype not specified"); 577 return -EINVAL; 578 } 579 580 if (data[IFLA_BAREUDP_PORT]) 581 conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]); 582 583 if (data[IFLA_BAREUDP_ETHERTYPE]) 584 conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]); 585 586 if (data[IFLA_BAREUDP_SRCPORT_MIN]) 587 conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]); 588 589 if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) 590 conf->multi_proto_mode = true; 591 592 return 0; 593 } 594 595 static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn, 596 const struct bareudp_conf *conf) 597 { 598 struct bareudp_dev *bareudp, *t = NULL; 599 600 list_for_each_entry(bareudp, &bn->bareudp_list, next) { 601 if (conf->port == bareudp->port) 602 t = bareudp; 603 } 604 return t; 605 } 606 607 static int bareudp_configure(struct net *net, struct net_device *dev, 608 struct bareudp_conf *conf) 609 { 610 struct bareudp_net *bn = net_generic(net, bareudp_net_id); 611 struct bareudp_dev *t, *bareudp = netdev_priv(dev); 612 int err; 613 614 bareudp->net = net; 615 bareudp->dev = dev; 616 t = bareudp_find_dev(bn, conf); 617 if (t) 618 return -EBUSY; 619 620 if (conf->multi_proto_mode && 621 (conf->ethertype != htons(ETH_P_MPLS_UC) && 622 conf->ethertype != htons(ETH_P_IP))) 623 return -EINVAL; 624 625 bareudp->port = conf->port; 626 bareudp->ethertype = conf->ethertype; 627 bareudp->sport_min = conf->sport_min; 628 bareudp->multi_proto_mode = conf->multi_proto_mode; 629 630 err = register_netdevice(dev); 631 if (err) 632 return err; 633 634 list_add(&bareudp->next, &bn->bareudp_list); 635 return 0; 636 } 637 638 static int bareudp_link_config(struct net_device *dev, 639 struct nlattr *tb[]) 640 { 641 int err; 642 643 if (tb[IFLA_MTU]) { 644 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU])); 645 if (err) 646 return err; 647 } 648 return 0; 649 } 650 651 static void bareudp_dellink(struct net_device *dev, struct list_head *head) 652 { 653 struct bareudp_dev *bareudp = netdev_priv(dev); 654 655 list_del(&bareudp->next); 656 unregister_netdevice_queue(dev, head); 657 } 658 659 static int bareudp_newlink(struct net *net, struct net_device *dev, 660 struct nlattr *tb[], struct nlattr *data[], 661 struct netlink_ext_ack *extack) 662 { 663 struct bareudp_conf conf; 664 int err; 665 666 err = bareudp2info(data, &conf, extack); 667 if (err) 668 return err; 669 670 err = bareudp_configure(net, dev, &conf); 671 if (err) 672 return err; 673 674 err = bareudp_link_config(dev, tb); 675 if (err) 676 goto err_unconfig; 677 678 return 0; 679 680 err_unconfig: 681 bareudp_dellink(dev, NULL); 682 return err; 683 } 684 685 static size_t bareudp_get_size(const struct net_device *dev) 686 { 687 return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */ 688 nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ 689 nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ 690 nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ 691 0; 692 } 693 694 static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) 695 { 696 struct bareudp_dev *bareudp = netdev_priv(dev); 697 698 if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port)) 699 goto nla_put_failure; 700 if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype)) 701 goto nla_put_failure; 702 if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min)) 703 goto nla_put_failure; 704 if (bareudp->multi_proto_mode && 705 nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) 706 goto nla_put_failure; 707 708 return 0; 709 710 nla_put_failure: 711 return -EMSGSIZE; 712 } 713 714 static struct rtnl_link_ops bareudp_link_ops __read_mostly = { 715 .kind = "bareudp", 716 .maxtype = IFLA_BAREUDP_MAX, 717 .policy = bareudp_policy, 718 .priv_size = sizeof(struct bareudp_dev), 719 .setup = bareudp_setup, 720 .validate = bareudp_validate, 721 .newlink = bareudp_newlink, 722 .dellink = bareudp_dellink, 723 .get_size = bareudp_get_size, 724 .fill_info = bareudp_fill_info, 725 }; 726 727 struct net_device *bareudp_dev_create(struct net *net, const char *name, 728 u8 name_assign_type, 729 struct bareudp_conf *conf) 730 { 731 struct nlattr *tb[IFLA_MAX + 1]; 732 struct net_device *dev; 733 int err; 734 735 memset(tb, 0, sizeof(tb)); 736 dev = rtnl_create_link(net, name, name_assign_type, 737 &bareudp_link_ops, tb, NULL); 738 if (IS_ERR(dev)) 739 return dev; 740 741 err = bareudp_configure(net, dev, conf); 742 if (err) { 743 free_netdev(dev); 744 return ERR_PTR(err); 745 } 746 err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN); 747 if (err) 748 goto err; 749 750 err = rtnl_configure_link(dev, NULL); 751 if (err < 0) 752 goto err; 753 754 return dev; 755 err: 756 bareudp_dellink(dev, NULL); 757 return ERR_PTR(err); 758 } 759 EXPORT_SYMBOL_GPL(bareudp_dev_create); 760 761 static __net_init int bareudp_init_net(struct net *net) 762 { 763 struct bareudp_net *bn = net_generic(net, bareudp_net_id); 764 765 INIT_LIST_HEAD(&bn->bareudp_list); 766 return 0; 767 } 768 769 static void bareudp_destroy_tunnels(struct net *net, struct list_head *head) 770 { 771 struct bareudp_net *bn = net_generic(net, bareudp_net_id); 772 struct bareudp_dev *bareudp, *next; 773 774 list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next) 775 unregister_netdevice_queue(bareudp->dev, head); 776 } 777 778 static void __net_exit bareudp_exit_batch_net(struct list_head *net_list) 779 { 780 struct net *net; 781 LIST_HEAD(list); 782 783 rtnl_lock(); 784 list_for_each_entry(net, net_list, exit_list) 785 bareudp_destroy_tunnels(net, &list); 786 787 /* unregister the devices gathered above */ 788 unregister_netdevice_many(&list); 789 rtnl_unlock(); 790 } 791 792 static struct pernet_operations bareudp_net_ops = { 793 .init = bareudp_init_net, 794 .exit_batch = bareudp_exit_batch_net, 795 .id = &bareudp_net_id, 796 .size = sizeof(struct bareudp_net), 797 }; 798 799 static int __init bareudp_init_module(void) 800 { 801 int rc; 802 803 rc = register_pernet_subsys(&bareudp_net_ops); 804 if (rc) 805 goto out1; 806 807 rc = rtnl_link_register(&bareudp_link_ops); 808 if (rc) 809 goto out2; 810 811 return 0; 812 out2: 813 unregister_pernet_subsys(&bareudp_net_ops); 814 out1: 815 return rc; 816 } 817 late_initcall(bareudp_init_module); 818 819 static void __exit bareudp_cleanup_module(void) 820 { 821 rtnl_link_unregister(&bareudp_link_ops); 822 unregister_pernet_subsys(&bareudp_net_ops); 823 } 824 module_exit(bareudp_cleanup_module); 825 826 MODULE_ALIAS_RTNL_LINK("bareudp"); 827 MODULE_LICENSE("GPL"); 828 MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>"); 829 MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic"); 830