1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> 2 * 3 * This program is free software; you can redistribute it and/or 4 * modify it under the terms of the GNU General Public License as 5 * published by the Free Software Foundation; either version 2 of 6 * the License, or (at your option) any later version. 7 * 8 */ 9 10 #include "ipvlan.h" 11 12 static u32 ipvlan_jhash_secret __read_mostly; 13 14 void ipvlan_init_secret(void) 15 { 16 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret)); 17 } 18 19 static void ipvlan_count_rx(const struct ipvl_dev *ipvlan, 20 unsigned int len, bool success, bool mcast) 21 { 22 if (!ipvlan) 23 return; 24 25 if (likely(success)) { 26 struct ipvl_pcpu_stats *pcptr; 27 28 pcptr = this_cpu_ptr(ipvlan->pcpu_stats); 29 u64_stats_update_begin(&pcptr->syncp); 30 pcptr->rx_pkts++; 31 pcptr->rx_bytes += len; 32 if (mcast) 33 pcptr->rx_mcast++; 34 u64_stats_update_end(&pcptr->syncp); 35 } else { 36 this_cpu_inc(ipvlan->pcpu_stats->rx_errs); 37 } 38 } 39 40 static u8 ipvlan_get_v6_hash(const void *iaddr) 41 { 42 const struct in6_addr *ip6_addr = iaddr; 43 44 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) & 45 IPVLAN_HASH_MASK; 46 } 47 48 static u8 ipvlan_get_v4_hash(const void *iaddr) 49 { 50 const struct in_addr *ip4_addr = iaddr; 51 52 return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) & 53 IPVLAN_HASH_MASK; 54 } 55 56 struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 57 const void *iaddr, bool is_v6) 58 { 59 struct ipvl_addr *addr; 60 u8 hash; 61 62 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) : 63 ipvlan_get_v4_hash(iaddr); 64 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) { 65 if (is_v6 && addr->atype == IPVL_IPV6 && 66 ipv6_addr_equal(&addr->ip6addr, iaddr)) 67 return addr; 68 else if (!is_v6 && addr->atype == IPVL_IPV4 && 69 addr->ip4addr.s_addr == 70 ((struct in_addr *)iaddr)->s_addr) 71 return addr; 72 } 73 return NULL; 74 } 75 76 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) 77 { 78 struct ipvl_port *port = ipvlan->port; 79 u8 hash; 80 81 hash = (addr->atype == IPVL_IPV6) ? 82 ipvlan_get_v6_hash(&addr->ip6addr) : 83 ipvlan_get_v4_hash(&addr->ip4addr); 84 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 85 } 86 87 void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88 { 89 hlist_del_rcu(&addr->hlnode); 90 if (sync) 91 synchronize_rcu(); 92 } 93 94 bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6) 95 { 96 struct ipvl_port *port = ipvlan->port; 97 struct ipvl_addr *addr; 98 99 list_for_each_entry(addr, &ipvlan->addrs, anode) { 100 if ((is_v6 && addr->atype == IPVL_IPV6 && 101 ipv6_addr_equal(&addr->ip6addr, iaddr)) || 102 (!is_v6 && addr->atype == IPVL_IPV4 && 103 addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr)) 104 return true; 105 } 106 107 if (ipvlan_ht_addr_lookup(port, iaddr, is_v6)) 108 return true; 109 110 return false; 111 } 112 113 static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) 114 { 115 void *lyr3h = NULL; 116 117 switch (skb->protocol) { 118 case htons(ETH_P_ARP): { 119 struct arphdr *arph; 120 121 if (unlikely(!pskb_may_pull(skb, sizeof(*arph)))) 122 return NULL; 123 124 arph = arp_hdr(skb); 125 *type = IPVL_ARP; 126 lyr3h = arph; 127 break; 128 } 129 case htons(ETH_P_IP): { 130 u32 pktlen; 131 struct iphdr *ip4h; 132 133 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) 134 return NULL; 135 136 ip4h = ip_hdr(skb); 137 pktlen = ntohs(ip4h->tot_len); 138 if (ip4h->ihl < 5 || ip4h->version != 4) 139 return NULL; 140 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) 141 return NULL; 142 143 *type = IPVL_IPV4; 144 lyr3h = ip4h; 145 break; 146 } 147 case htons(ETH_P_IPV6): { 148 struct ipv6hdr *ip6h; 149 150 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) 151 return NULL; 152 153 ip6h = ipv6_hdr(skb); 154 if (ip6h->version != 6) 155 return NULL; 156 157 *type = IPVL_IPV6; 158 lyr3h = ip6h; 159 /* Only Neighbour Solicitation pkts need different treatment */ 160 if (ipv6_addr_any(&ip6h->saddr) && 161 ip6h->nexthdr == NEXTHDR_ICMP) { 162 *type = IPVL_ICMPV6; 163 lyr3h = ip6h + 1; 164 } 165 break; 166 } 167 default: 168 return NULL; 169 } 170 171 return lyr3h; 172 } 173 174 unsigned int ipvlan_mac_hash(const unsigned char *addr) 175 { 176 u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2), 177 ipvlan_jhash_secret); 178 179 return hash & IPVLAN_MAC_FILTER_MASK; 180 } 181 182 static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb, 183 const struct ipvl_dev *in_dev, bool local) 184 { 185 struct ethhdr *eth = eth_hdr(skb); 186 struct ipvl_dev *ipvlan; 187 struct sk_buff *nskb; 188 unsigned int len; 189 unsigned int mac_hash; 190 int ret; 191 192 if (skb->protocol == htons(ETH_P_PAUSE)) 193 return; 194 195 list_for_each_entry(ipvlan, &port->ipvlans, pnode) { 196 if (local && (ipvlan == in_dev)) 197 continue; 198 199 mac_hash = ipvlan_mac_hash(eth->h_dest); 200 if (!test_bit(mac_hash, ipvlan->mac_filters)) 201 continue; 202 203 ret = NET_RX_DROP; 204 len = skb->len + ETH_HLEN; 205 nskb = skb_clone(skb, GFP_ATOMIC); 206 if (!nskb) 207 goto mcast_acct; 208 209 if (ether_addr_equal(eth->h_dest, ipvlan->phy_dev->broadcast)) 210 nskb->pkt_type = PACKET_BROADCAST; 211 else 212 nskb->pkt_type = PACKET_MULTICAST; 213 214 nskb->dev = ipvlan->dev; 215 if (local) 216 ret = dev_forward_skb(ipvlan->dev, nskb); 217 else 218 ret = netif_rx(nskb); 219 mcast_acct: 220 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); 221 } 222 223 /* Locally generated? ...Forward a copy to the main-device as 224 * well. On the RX side we'll ignore it (wont give it to any 225 * of the virtual devices. 226 */ 227 if (local) { 228 nskb = skb_clone(skb, GFP_ATOMIC); 229 if (nskb) { 230 if (ether_addr_equal(eth->h_dest, port->dev->broadcast)) 231 nskb->pkt_type = PACKET_BROADCAST; 232 else 233 nskb->pkt_type = PACKET_MULTICAST; 234 235 dev_forward_skb(port->dev, nskb); 236 } 237 } 238 } 239 240 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb, 241 bool local) 242 { 243 struct ipvl_dev *ipvlan = addr->master; 244 struct net_device *dev = ipvlan->dev; 245 unsigned int len; 246 rx_handler_result_t ret = RX_HANDLER_CONSUMED; 247 bool success = false; 248 249 len = skb->len + ETH_HLEN; 250 if (unlikely(!(dev->flags & IFF_UP))) { 251 kfree_skb(skb); 252 goto out; 253 } 254 255 skb = skb_share_check(skb, GFP_ATOMIC); 256 if (!skb) 257 goto out; 258 259 skb->dev = dev; 260 skb->pkt_type = PACKET_HOST; 261 262 if (local) { 263 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) 264 success = true; 265 } else { 266 ret = RX_HANDLER_ANOTHER; 267 success = true; 268 } 269 270 out: 271 ipvlan_count_rx(ipvlan, len, success, false); 272 return ret; 273 } 274 275 static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, 276 void *lyr3h, int addr_type, 277 bool use_dest) 278 { 279 struct ipvl_addr *addr = NULL; 280 281 if (addr_type == IPVL_IPV6) { 282 struct ipv6hdr *ip6h; 283 struct in6_addr *i6addr; 284 285 ip6h = (struct ipv6hdr *)lyr3h; 286 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr; 287 addr = ipvlan_ht_addr_lookup(port, i6addr, true); 288 } else if (addr_type == IPVL_ICMPV6) { 289 struct nd_msg *ndmh; 290 struct in6_addr *i6addr; 291 292 /* Make sure that the NeighborSolicitation ICMPv6 packets 293 * are handled to avoid DAD issue. 294 */ 295 ndmh = (struct nd_msg *)lyr3h; 296 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { 297 i6addr = &ndmh->target; 298 addr = ipvlan_ht_addr_lookup(port, i6addr, true); 299 } 300 } else if (addr_type == IPVL_IPV4) { 301 struct iphdr *ip4h; 302 __be32 *i4addr; 303 304 ip4h = (struct iphdr *)lyr3h; 305 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr; 306 addr = ipvlan_ht_addr_lookup(port, i4addr, false); 307 } else if (addr_type == IPVL_ARP) { 308 struct arphdr *arph; 309 unsigned char *arp_ptr; 310 __be32 dip; 311 312 arph = (struct arphdr *)lyr3h; 313 arp_ptr = (unsigned char *)(arph + 1); 314 if (use_dest) 315 arp_ptr += (2 * port->dev->addr_len) + 4; 316 else 317 arp_ptr += port->dev->addr_len; 318 319 memcpy(&dip, arp_ptr, 4); 320 addr = ipvlan_ht_addr_lookup(port, &dip, false); 321 } 322 323 return addr; 324 } 325 326 static int ipvlan_process_v4_outbound(struct sk_buff *skb) 327 { 328 const struct iphdr *ip4h = ip_hdr(skb); 329 struct net_device *dev = skb->dev; 330 struct rtable *rt; 331 int err, ret = NET_XMIT_DROP; 332 struct flowi4 fl4 = { 333 .flowi4_oif = dev->iflink, 334 .flowi4_tos = RT_TOS(ip4h->tos), 335 .flowi4_flags = FLOWI_FLAG_ANYSRC, 336 .daddr = ip4h->daddr, 337 .saddr = ip4h->saddr, 338 }; 339 340 rt = ip_route_output_flow(dev_net(dev), &fl4, NULL); 341 if (IS_ERR(rt)) 342 goto err; 343 344 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { 345 ip_rt_put(rt); 346 goto err; 347 } 348 skb_dst_drop(skb); 349 skb_dst_set(skb, &rt->dst); 350 err = ip_local_out(skb); 351 if (unlikely(net_xmit_eval(err))) 352 dev->stats.tx_errors++; 353 else 354 ret = NET_XMIT_SUCCESS; 355 goto out; 356 err: 357 dev->stats.tx_errors++; 358 kfree_skb(skb); 359 out: 360 return ret; 361 } 362 363 static int ipvlan_process_v6_outbound(struct sk_buff *skb) 364 { 365 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 366 struct net_device *dev = skb->dev; 367 struct dst_entry *dst; 368 int err, ret = NET_XMIT_DROP; 369 struct flowi6 fl6 = { 370 .flowi6_iif = skb->dev->ifindex, 371 .daddr = ip6h->daddr, 372 .saddr = ip6h->saddr, 373 .flowi6_flags = FLOWI_FLAG_ANYSRC, 374 .flowlabel = ip6_flowinfo(ip6h), 375 .flowi6_mark = skb->mark, 376 .flowi6_proto = ip6h->nexthdr, 377 }; 378 379 dst = ip6_route_output(dev_net(dev), NULL, &fl6); 380 if (dst->error) { 381 ret = dst->error; 382 dst_release(dst); 383 goto err; 384 } 385 skb_dst_drop(skb); 386 skb_dst_set(skb, dst); 387 err = ip6_local_out(skb); 388 if (unlikely(net_xmit_eval(err))) 389 dev->stats.tx_errors++; 390 else 391 ret = NET_XMIT_SUCCESS; 392 goto out; 393 err: 394 dev->stats.tx_errors++; 395 kfree_skb(skb); 396 out: 397 return ret; 398 } 399 400 static int ipvlan_process_outbound(struct sk_buff *skb, 401 const struct ipvl_dev *ipvlan) 402 { 403 struct ethhdr *ethh = eth_hdr(skb); 404 int ret = NET_XMIT_DROP; 405 406 /* In this mode we dont care about multicast and broadcast traffic */ 407 if (is_multicast_ether_addr(ethh->h_dest)) { 408 pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n", 409 ntohs(skb->protocol)); 410 kfree_skb(skb); 411 goto out; 412 } 413 414 /* The ipvlan is a pseudo-L2 device, so the packets that we receive 415 * will have L2; which need to discarded and processed further 416 * in the net-ns of the main-device. 417 */ 418 if (skb_mac_header_was_set(skb)) { 419 skb_pull(skb, sizeof(*ethh)); 420 skb->mac_header = (typeof(skb->mac_header))~0U; 421 skb_reset_network_header(skb); 422 } 423 424 if (skb->protocol == htons(ETH_P_IPV6)) 425 ret = ipvlan_process_v6_outbound(skb); 426 else if (skb->protocol == htons(ETH_P_IP)) 427 ret = ipvlan_process_v4_outbound(skb); 428 else { 429 pr_warn_ratelimited("Dropped outbound packet type=%x\n", 430 ntohs(skb->protocol)); 431 kfree_skb(skb); 432 } 433 out: 434 return ret; 435 } 436 437 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) 438 { 439 const struct ipvl_dev *ipvlan = netdev_priv(dev); 440 void *lyr3h; 441 struct ipvl_addr *addr; 442 int addr_type; 443 444 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 445 if (!lyr3h) 446 goto out; 447 448 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 449 if (addr) 450 return ipvlan_rcv_frame(addr, skb, true); 451 452 out: 453 skb->dev = ipvlan->phy_dev; 454 return ipvlan_process_outbound(skb, ipvlan); 455 } 456 457 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) 458 { 459 const struct ipvl_dev *ipvlan = netdev_priv(dev); 460 struct ethhdr *eth = eth_hdr(skb); 461 struct ipvl_addr *addr; 462 void *lyr3h; 463 int addr_type; 464 465 if (ether_addr_equal(eth->h_dest, eth->h_source)) { 466 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 467 if (lyr3h) { 468 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 469 if (addr) 470 return ipvlan_rcv_frame(addr, skb, true); 471 } 472 skb = skb_share_check(skb, GFP_ATOMIC); 473 if (!skb) 474 return NET_XMIT_DROP; 475 476 /* Packet definitely does not belong to any of the 477 * virtual devices, but the dest is local. So forward 478 * the skb for the main-dev. At the RX side we just return 479 * RX_PASS for it to be processed further on the stack. 480 */ 481 return dev_forward_skb(ipvlan->phy_dev, skb); 482 483 } else if (is_multicast_ether_addr(eth->h_dest)) { 484 u8 ip_summed = skb->ip_summed; 485 486 skb->ip_summed = CHECKSUM_UNNECESSARY; 487 ipvlan_multicast_frame(ipvlan->port, skb, ipvlan, true); 488 skb->ip_summed = ip_summed; 489 } 490 491 skb->dev = ipvlan->phy_dev; 492 return dev_queue_xmit(skb); 493 } 494 495 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 496 { 497 struct ipvl_dev *ipvlan = netdev_priv(dev); 498 struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 499 500 if (!port) 501 goto out; 502 503 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 504 goto out; 505 506 switch(port->mode) { 507 case IPVLAN_MODE_L2: 508 return ipvlan_xmit_mode_l2(skb, dev); 509 case IPVLAN_MODE_L3: 510 return ipvlan_xmit_mode_l3(skb, dev); 511 } 512 513 /* Should not reach here */ 514 WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n", 515 port->mode); 516 out: 517 kfree_skb(skb); 518 return NET_XMIT_DROP; 519 } 520 521 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) 522 { 523 struct ethhdr *eth = eth_hdr(skb); 524 struct ipvl_addr *addr; 525 void *lyr3h; 526 int addr_type; 527 528 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { 529 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 530 if (!lyr3h) 531 return true; 532 533 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false); 534 if (addr) 535 return false; 536 } 537 538 return true; 539 } 540 541 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, 542 struct ipvl_port *port) 543 { 544 void *lyr3h; 545 int addr_type; 546 struct ipvl_addr *addr; 547 struct sk_buff *skb = *pskb; 548 rx_handler_result_t ret = RX_HANDLER_PASS; 549 550 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 551 if (!lyr3h) 552 goto out; 553 554 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 555 if (addr) 556 ret = ipvlan_rcv_frame(addr, skb, false); 557 558 out: 559 return ret; 560 } 561 562 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, 563 struct ipvl_port *port) 564 { 565 struct sk_buff *skb = *pskb; 566 struct ethhdr *eth = eth_hdr(skb); 567 rx_handler_result_t ret = RX_HANDLER_PASS; 568 void *lyr3h; 569 int addr_type; 570 571 if (is_multicast_ether_addr(eth->h_dest)) { 572 if (ipvlan_external_frame(skb, port)) 573 ipvlan_multicast_frame(port, skb, NULL, false); 574 } else { 575 struct ipvl_addr *addr; 576 577 lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); 578 if (!lyr3h) 579 return ret; 580 581 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 582 if (addr) 583 ret = ipvlan_rcv_frame(addr, skb, false); 584 } 585 586 return ret; 587 } 588 589 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) 590 { 591 struct sk_buff *skb = *pskb; 592 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev); 593 594 if (!port) 595 return RX_HANDLER_PASS; 596 597 switch (port->mode) { 598 case IPVLAN_MODE_L2: 599 return ipvlan_handle_mode_l2(pskb, port); 600 case IPVLAN_MODE_L3: 601 return ipvlan_handle_mode_l3(pskb, port); 602 } 603 604 /* Should not reach here */ 605 WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n", 606 port->mode); 607 kfree_skb(skb); 608 return NET_RX_DROP; 609 } 610