1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of the GNU General Public License as 5 * published by the Free Software Foundation; either version 2 of 6 * the License, or (at your option) any later version. 7 * 8 */ 9 10 #include "ipvlan.h" 11 12 static u32 ipvlan_jhash_secret __read_mostly; 13 14 void ipvlan_init_secret(void) 15 { 16 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret)); 17 } 18 19 static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, 20 unsigned int len, bool success, bool mcast) 21 { 22 if (!ipvlan) 23 return; 24 25 if (likely(success)) { 26 struct ipvl_pcpu_stats *pcptr; 27 28 pcptr = this_cpu_ptr(ipvlan->pcpu_stats); 29 u64_stats_update_begin(&pcptr->syncp); 30 pcptr->rx_pkts++; 31 pcptr->rx_bytes += len; 32 if (mcast) 33 pcptr->rx_mcast++; 34 u64_stats_update_end(&pcptr->syncp); 35 } else { 36 this_cpu_inc(ipvlan->pcpu_stats->rx_errs); 37 } 38 } 39 40 static u8 ipvlan_get_v6_hash(const void *iaddr) 41 { 42 const struct in6_addr *ip6_addr = iaddr; 43 44 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) & 45 IPVLAN_HASH_MASK; 46 } 47 48 static u8 ipvlan_get_v4_hash(const void *iaddr) 49 { 50 const struct in_addr *ip4_addr = iaddr; 51 52 return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) & 53 IPVLAN_HASH_MASK; 54 } 55 56 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 57 const void *iaddr, bool is_v6) 58 { 59 struct ipvl_addr *addr; 60 u8 hash; 61 62 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) : 63 ipvlan_get_v4_hash(iaddr); 64 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) { 65 if (is_v6 && addr->atype == IPVL_IPV6 && 66 ipv6_addr_equal(&addr->ip6addr, iaddr)) 67 return addr; 68 else if (!is_v6 && addr->atype == IPVL_IPV4 && 69 addr->ip4addr.s_addr == 70 ((struct in_addr *)iaddr)->s_addr) 71 return addr; 72 } 73 return NULL; 74 } 75 76 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) 77 { 78 struct ipvl_port *port = ipvlan->port; 79 u8 hash; 80 81 hash = (addr->atype == IPVL_IPV6) ? 82 ipvlan_get_v6_hash(&addr->ip6addr) : 83 ipvlan_get_v4_hash(&addr->ip4addr); 84 if (hlist_unhashed(&addr->hlnode)) 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 86 } 87 88 void ipvlan_ht_addr_del(struct ipvl_addr *addr) 89 { 90 hlist_del_init_rcu(&addr->hlnode); 91 } 92 93 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, 94 const void *iaddr, bool is_v6) 95 { 96 struct ipvl_addr *addr; 97 98 list_for_each_entry(addr, &ipvlan->addrs, anode) { 99 if ((is_v6 && addr->atype == IPVL_IPV6 && 100 ipv6_addr_equal(&addr->ip6addr, iaddr)) || 101 (!is_v6 && addr->atype == IPVL_IPV4 && 102 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) 103 return addr; 104 } 105 return NULL; 106 } 107 108 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) 109 { 110 struct ipvl_dev *ipvlan; 111 112 ASSERT_RTNL(); 113 114 list_for_each_entry(ipvlan, &port->ipvlans, pnode) { 115 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) 116 return true; 117 } 118 return false; 119 } 120 121 static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) 122 { 123 void *lyr3h = NULL; 124 125 switch (skb->protocol) { 126 case htons(ETH_P_ARP): { 127 struct arphdr *arph; 128 129 if (unlikely(!pskb_may_pull(skb, sizeof(*arph)))) 130 return NULL; 131 132 arph = arp_hdr(skb); 133 *type = IPVL_ARP; 134 lyr3h = arph; 135 break; 136 } 137 case htons(ETH_P_IP): { 138 u32 pktlen; 139 struct iphdr *ip4h; 140 141 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) 142 return NULL; 143 144 ip4h = ip_hdr(skb); 145 pktlen = ntohs(ip4h->tot_len); 146 if (ip4h->ihl < 5 || ip4h->version != 4) 147 return NULL; 148 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) 149 return NULL; 150 151 *type = IPVL_IPV4; 152 lyr3h = ip4h; 153 break; 154 } 155 case htons(ETH_P_IPV6): { 156 struct ipv6hdr *ip6h; 157 158 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) 159 return NULL; 160 161 ip6h = ipv6_hdr(skb); 162 if (ip6h->version != 6) 163 return NULL; 164 165 *type = IPVL_IPV6; 166 lyr3h = ip6h; 167 /* Only Neighbour Solicitation pkts need different treatment */ 168 if (ipv6_addr_any(&ip6h->saddr) && 169 ip6h->nexthdr == NEXTHDR_ICMP) { 170 *type = IPVL_ICMPV6; 171 lyr3h = ip6h + 1; 172 } 173 break; 174 } 175 default: 176 return NULL; 177 } 178 179 return lyr3h; 180 } 181 182 unsigned int ipvlan_mac_hash(const unsigned char *addr) 183 { 184 u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2), 185 ipvlan_jhash_secret); 186 187 return hash & IPVLAN_MAC_FILTER_MASK; 188 } 189 190 void ipvlan_process_multicast(struct work_struct *work) 191 { 192 struct ipvl_port *port = container_of(work, struct ipvl_port, wq); 193 struct ethhdr *ethh; 194 struct ipvl_dev *ipvlan; 195 struct sk_buff *skb, *nskb; 196 struct sk_buff_head list; 197 unsigned int len; 198 unsigned int mac_hash; 199 int ret; 200 u8 pkt_type; 201 bool hlocal, dlocal; 202 203 __skb_queue_head_init(&list); 204 205 spin_lock_bh(&port->backlog.lock); 206 skb_queue_splice_tail_init(&port->backlog, &list); 207 spin_unlock_bh(&port->backlog.lock); 208 209 while ((skb = __skb_dequeue(&list)) != NULL) { 210 ethh = eth_hdr(skb); 211 hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr); 212 mac_hash = ipvlan_mac_hash(ethh->h_dest); 213 214 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast)) 215 pkt_type = PACKET_BROADCAST; 216 else 217 pkt_type = PACKET_MULTICAST; 218 219 dlocal = false; 220 rcu_read_lock(); 221 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { 222 if (hlocal && (ipvlan->dev == skb->dev)) { 223 dlocal = true; 224 continue; 225 } 226 if (!test_bit(mac_hash, ipvlan->mac_filters)) 227 continue; 228 229 ret = NET_RX_DROP; 230 len = skb->len + ETH_HLEN; 231 nskb = skb_clone(skb, GFP_ATOMIC); 232 if (!nskb) 233 goto acct; 234 235 nskb->pkt_type = pkt_type; 236 nskb->dev = ipvlan->dev; 237 if (hlocal) 238 ret = dev_forward_skb(ipvlan->dev, nskb); 239 else 240 ret = netif_rx(nskb); 241 acct: 242 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); 243 } 244 rcu_read_unlock(); 245 246 if (dlocal) { 247 /* If the packet originated here, send it out. */ 248 skb->dev = port->dev; 249 skb->pkt_type = pkt_type; 250 dev_queue_xmit(skb); 251 } else { 252 kfree_skb(skb); 253 } 254 } 255 } 256 257 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, 258 bool local) 259 { 260 struct ipvl_dev *ipvlan = addr->master; 261 struct net_device *dev = ipvlan->dev; 262 unsigned int len; 263 rx_handler_result_t ret = RX_HANDLER_CONSUMED; 264 bool success = false; 265 266 len = skb->len + ETH_HLEN; 267 if (unlikely(!(dev->flags & IFF_UP))) { 268 kfree_skb(skb); 269 goto out; 270 } 271 272 skb = skb_share_check(skb, GFP_ATOMIC); 273 if (!skb) 274 goto out; 275 276 skb->dev = dev; 277 skb->pkt_type = PACKET_HOST; 278 279 if (local) { 280 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) 281 success = true; 282 } else { 283 ret = RX_HANDLER_ANOTHER; 284 success = true; 285 } 286 287 out: 288 ipvlan_count_rx(ipvlan, len, success, false); 289 return ret; 290 } 291 292 static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, 293 void *lyr3h, int addr_type, 294 bool use_dest) 295 { 296 struct ipvl_addr *addr = NULL; 297 298 if (addr_type == IPVL_IPV6) { 299 struct ipv6hdr *ip6h; 300 struct in6_addr *i6addr; 301 302 ip6h = (struct ipv6hdr *)lyr3h; 303 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr; 304 addr = ipvlan_ht_addr_lookup(port, i6addr, true); 305 } else if (addr_type == IPVL_ICMPV6) { 306 struct nd_msg *ndmh; 307 struct in6_addr *i6addr; 308 309 /* Make sure that the NeighborSolicitation ICMPv6 packets 310 * are handled to avoid DAD issue. 311 */ 312 ndmh = (struct nd_msg *)lyr3h; 313 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { 314 i6addr = &ndmh->target; 315 addr = ipvlan_ht_addr_lookup(port, i6addr, true); 316 } 317 } else if (addr_type == IPVL_IPV4) { 318 struct iphdr *ip4h; 319 __be32 *i4addr; 320 321 ip4h = (struct iphdr *)lyr3h; 322 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr; 323 addr = ipvlan_ht_addr_lookup(port, i4addr, false); 324 } else if (addr_type == IPVL_ARP) { 325 struct arphdr *arph; 326 unsigned char *arp_ptr; 327 __be32 dip; 328 329 arph = (struct arphdr *)lyr3h; 330 arp_ptr = (unsigned char *)(arph + 1); 331 if (use_dest) 332 arp_ptr += (2 * port->dev->addr_len) + 4; 333 else 334 arp_ptr += port->dev->addr_len; 335 336 memcpy(&dip, arp_ptr, 4); 337 addr = ipvlan_ht_addr_lookup(port, &dip, false); 338 } 339 340 return addr; 341 } 342 343 static int ipvlan_process_v4_outbound(struct sk_buff *skb) 344 { 345 const struct iphdr *ip4h = ip_hdr(skb); 346 struct net_device *dev = skb->dev; 347 struct rtable *rt; 348 int err, ret = NET_XMIT_DROP; 349 struct flowi4 fl4 = { 350 .flowi4_oif = dev_get_iflink(dev), 351 .flowi4_tos = RT_TOS(ip4h->tos), 352 .flowi4_flags = FLOWI_FLAG_ANYSRC, 353 .daddr = ip4h->daddr, 354 .saddr = ip4h->saddr, 355 }; 356 357 rt = ip_route_output_flow(dev_net(dev), &fl4, NULL); 358 if (IS_ERR(rt)) 359 goto err; 360 361 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { 362 ip_rt_put(rt); 363 goto err; 364 } 365 skb_dst_drop(skb); 366 skb_dst_set(skb, &rt->dst); 367 err = ip_local_out(skb); 368 if (unlikely(net_xmit_eval(err))) 369 dev->stats.tx_errors++; 370 else 371 ret = NET_XMIT_SUCCESS; 372 goto out; 373 err: 374 dev->stats.tx_errors++; 375 kfree_skb(skb); 376 out: 377 return ret; 378 } 379 380 static int ipvlan_process_v6_outbound(struct sk_buff *skb) 381 { 382 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 383 struct net_device *dev = skb->dev; 384 struct dst_entry *dst; 385 int err, ret = NET_XMIT_DROP; 386 struct flowi6 fl6 = { 387 .flowi6_iif = skb->dev->ifindex, 388 .daddr = ip6h->daddr, 389 .saddr = ip6h->saddr, 390 .flowi6_flags = FLOWI_FLAG_ANYSRC, 391 .flowlabel = ip6_flowinfo(ip6h), 392 .flowi6_mark = skb->mark, 393 .flowi6_proto = ip6h->nexthdr, 394 }; 395 396 dst = ip6_route_output(dev_net(dev), NULL, &fl6); 397 if (dst->error) { 398 ret = dst->error; 399 dst_release(dst); 400 goto err; 401 } 402 skb_dst_drop(skb); 403 skb_dst_set(skb, dst); 404 err = ip6_local_out(skb); 405 if (unlikely(net_xmit_eval(err))) 406 dev->stats.tx_errors++; 407 else 408 ret = NET_XMIT_SUCCESS; 409 goto out; 410 err: 411 dev->stats.tx_errors++; 412 kfree_skb(skb); 413 out: 414 return ret; 415 } 416 417 static int ipvlan_process_outbound(struct sk_buff *skb, 418 const struct ipvl_dev *ipvlan) 419 { 420 struct ethhdr *ethh = eth_hdr(skb); 421 int ret = NET_XMIT_DROP; 422 423 /* In this mode we dont care about multicast and broadcast traffic */ 424 if (is_multicast_ether_addr(ethh->h_dest)) { 425 pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n", 426 ntohs(skb->protocol)); 427 kfree_skb(skb); 428 goto out; 429 } 430 431 /* The ipvlan is a pseudo-L2 device, so the packets that we receive 432 * will have L2; which need to discarded and processed further 433 * in the net-ns of the main-device. 434 */ 435 if (skb_mac_header_was_set(skb)) { 436 skb_pull(skb, sizeof(*ethh)); 437 skb->mac_header = (typeof(skb->mac_header))~0U; 438 skb_reset_network_header(skb); 439 } 440 441 if (skb->protocol == htons(ETH_P_IPV6)) 442 ret = ipvlan_process_v6_outbound(skb); 443 else if (skb->protocol == htons(ETH_P_IP)) 444 ret = ipvlan_process_v4_outbound(skb); 445 else { 446 pr_warn_ratelimited("Dropped outbound packet type=%x\n", 447 ntohs(skb->protocol)); 448 kfree_skb(skb); 449 } 450 out: 451 return ret; 452 } 453 454 static void ipvlan_multicast_enqueue(struct ipvl_port *port, 455 struct sk_buff *skb) 456 { 457 if (skb->protocol == htons(ETH_P_PAUSE)) { 458 kfree_skb(skb); 459 return; 460 } 461 462 spin_lock(&port->backlog.lock); 463 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { 464 __skb_queue_tail(&port->backlog, skb); 465 spin_unlock(&port->backlog.lock); 466 schedule_work(&port->wq); 467 } else { 468 spin_unlock(&port->backlog.lock); 469 atomic_long_inc(&skb->dev->rx_dropped); 470 kfree_skb(skb); 471 } 472 } 473 474 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) 475 { 476 const struct ipvl_dev *ipvlan = netdev_priv(dev); 477 void *lyr3h; 478 struct ipvl_addr *addr; 479 int addr_type; 480 481 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 482 if (!lyr3h) 483 goto out; 484 485 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 486 if (addr) 487 return ipvlan_rcv_frame(addr, skb, true); 488 489 out: 490 skb->dev = ipvlan->phy_dev; 491 return ipvlan_process_outbound(skb, ipvlan); 492 } 493 494 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) 495 { 496 const struct ipvl_dev *ipvlan = netdev_priv(dev); 497 struct ethhdr *eth = eth_hdr(skb); 498 struct ipvl_addr *addr; 499 void *lyr3h; 500 int addr_type; 501 502 if (ether_addr_equal(eth->h_dest, eth->h_source)) { 503 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 504 if (lyr3h) { 505 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 506 if (addr) 507 return ipvlan_rcv_frame(addr, skb, true); 508 } 509 skb = skb_share_check(skb, GFP_ATOMIC); 510 if (!skb) 511 return NET_XMIT_DROP; 512 513 /* Packet definitely does not belong to any of the 514 * virtual devices, but the dest is local. So forward 515 * the skb for the main-dev. At the RX side we just return 516 * RX_PASS for it to be processed further on the stack. 517 */ 518 return dev_forward_skb(ipvlan->phy_dev, skb); 519 520 } else if (is_multicast_ether_addr(eth->h_dest)) { 521 ipvlan_multicast_enqueue(ipvlan->port, skb); 522 return NET_XMIT_SUCCESS; 523 } 524 525 skb->dev = ipvlan->phy_dev; 526 return dev_queue_xmit(skb); 527 } 528 529 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 530 { 531 struct ipvl_dev *ipvlan = netdev_priv(dev); 532 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); 533 534 if (!port) 535 goto out; 536 537 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 538 goto out; 539 540 switch(port->mode) { 541 case IPVLAN_MODE_L2: 542 return ipvlan_xmit_mode_l2(skb, dev); 543 case IPVLAN_MODE_L3: 544 return ipvlan_xmit_mode_l3(skb, dev); 545 } 546 547 /* Should not reach here */ 548 WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n", 549 port->mode); 550 out: 551 kfree_skb(skb); 552 return NET_XMIT_DROP; 553 } 554 555 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) 556 { 557 struct ethhdr *eth = eth_hdr(skb); 558 struct ipvl_addr *addr; 559 void *lyr3h; 560 int addr_type; 561 562 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { 563 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 564 if (!lyr3h) 565 return true; 566 567 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false); 568 if (addr) 569 return false; 570 } 571 572 return true; 573 } 574 575 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, 576 struct ipvl_port *port) 577 { 578 void *lyr3h; 579 int addr_type; 580 struct ipvl_addr *addr; 581 struct sk_buff *skb = *pskb; 582 rx_handler_result_t ret = RX_HANDLER_PASS; 583 584 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 585 if (!lyr3h) 586 goto out; 587 588 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 589 if (addr) 590 ret = ipvlan_rcv_frame(addr, skb, false); 591 592 out: 593 return ret; 594 } 595 596 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, 597 struct ipvl_port *port) 598 { 599 struct sk_buff *skb = *pskb; 600 struct ethhdr *eth = eth_hdr(skb); 601 rx_handler_result_t ret = RX_HANDLER_PASS; 602 void *lyr3h; 603 int addr_type; 604 605 if (is_multicast_ether_addr(eth->h_dest)) { 606 if (ipvlan_external_frame(skb, port)) { 607 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 608 609 /* External frames are queued for device local 610 * distribution, but a copy is given to master 611 * straight away to avoid sending duplicates later 612 * when work-queue processes this frame. This is 613 * achieved by returning RX_HANDLER_PASS. 614 */ 615 if (nskb) 616 ipvlan_multicast_enqueue(port, nskb); 617 } 618 } else { 619 struct ipvl_addr *addr; 620 621 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 622 if (!lyr3h) 623 return ret; 624 625 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 626 if (addr) 627 ret = ipvlan_rcv_frame(addr, skb, false); 628 } 629 630 return ret; 631 } 632 633 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) 634 { 635 struct sk_buff *skb = *pskb; 636 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev); 637 638 if (!port) 639 return RX_HANDLER_PASS; 640 641 switch (port->mode) { 642 case IPVLAN_MODE_L2: 643 return ipvlan_handle_mode_l2(pskb, port); 644 case IPVLAN_MODE_L3: 645 return ipvlan_handle_mode_l3(pskb, port); 646 } 647 648 /* Should not reach here */ 649 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", 650 port->mode); 651 kfree_skb(skb); 652 return NET_RX_DROP; 653 } 654