1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2007-2014 Nicira, Inc. 4 */ 5 6 #include <linux/uaccess.h> 7 #include <linux/netdevice.h> 8 #include <linux/etherdevice.h> 9 #include <linux/if_ether.h> 10 #include <linux/if_vlan.h> 11 #include <net/llc_pdu.h> 12 #include <linux/kernel.h> 13 #include <linux/jhash.h> 14 #include <linux/jiffies.h> 15 #include <linux/llc.h> 16 #include <linux/module.h> 17 #include <linux/in.h> 18 #include <linux/rcupdate.h> 19 #include <linux/cpumask.h> 20 #include <linux/if_arp.h> 21 #include <linux/ip.h> 22 #include <linux/ipv6.h> 23 #include <linux/mpls.h> 24 #include <linux/sctp.h> 25 #include <linux/smp.h> 26 #include <linux/tcp.h> 27 #include <linux/udp.h> 28 #include <linux/icmp.h> 29 #include <linux/icmpv6.h> 30 #include <linux/rculist.h> 31 #include <net/ip.h> 32 #include <net/ip_tunnels.h> 33 #include <net/ipv6.h> 34 #include <net/mpls.h> 35 #include <net/ndisc.h> 36 #include <net/nsh.h> 37 38 #include "conntrack.h" 39 #include "datapath.h" 40 #include "flow.h" 41 #include "flow_netlink.h" 42 #include "vport.h" 43 44 u64 ovs_flow_used_time(unsigned long flow_jiffies) 45 { 46 struct timespec64 cur_ts; 47 u64 cur_ms, idle_ms; 48 49 ktime_get_ts64(&cur_ts); 50 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); 51 cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC + 52 cur_ts.tv_nsec / NSEC_PER_MSEC; 53 54 return cur_ms - idle_ms; 55 } 56 57 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 58 59 void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, 60 const struct sk_buff *skb) 61 { 62 struct sw_flow_stats *stats; 63 unsigned int cpu = smp_processor_id(); 64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 65 66 stats = rcu_dereference(flow->stats[cpu]); 67 68 /* Check if already have CPU-specific stats. */ 69 if (likely(stats)) { 70 spin_lock(&stats->lock); 71 /* Mark if we write on the pre-allocated stats. */ 72 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) 73 flow->stats_last_writer = cpu; 74 } else { 75 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */ 76 spin_lock(&stats->lock); 77 78 /* If the current CPU is the only writer on the 79 * pre-allocated stats keep using them. 80 */ 81 if (unlikely(flow->stats_last_writer != cpu)) { 82 /* A previous locker may have already allocated the 83 * stats, so we need to check again. If CPU-specific 84 * stats were already allocated, we update the pre- 85 * allocated stats as we have already locked them. 86 */ 87 if (likely(flow->stats_last_writer != -1) && 88 likely(!rcu_access_pointer(flow->stats[cpu]))) { 89 /* Try to allocate CPU-specific stats. */ 90 struct sw_flow_stats *new_stats; 91 92 new_stats = 93 kmem_cache_alloc_node(flow_stats_cache, 94 GFP_NOWAIT | 95 __GFP_THISNODE | 96 __GFP_NOWARN | 97 __GFP_NOMEMALLOC, 98 numa_node_id()); 99 if (likely(new_stats)) { 100 new_stats->used = jiffies; 101 new_stats->packet_count = 1; 102 new_stats->byte_count = len; 103 new_stats->tcp_flags = tcp_flags; 104 spin_lock_init(&new_stats->lock); 105 106 rcu_assign_pointer(flow->stats[cpu], 107 new_stats); 108 cpumask_set_cpu(cpu, &flow->cpu_used_mask); 109 goto unlock; 110 } 111 } 112 flow->stats_last_writer = cpu; 113 } 114 } 115 116 stats->used = jiffies; 117 stats->packet_count++; 118 stats->byte_count += len; 119 stats->tcp_flags |= tcp_flags; 120 unlock: 121 spin_unlock(&stats->lock); 122 } 123 124 /* Must be called with rcu_read_lock or ovs_mutex. */ 125 void ovs_flow_stats_get(const struct sw_flow *flow, 126 struct ovs_flow_stats *ovs_stats, 127 unsigned long *used, __be16 *tcp_flags) 128 { 129 int cpu; 130 131 *used = 0; 132 *tcp_flags = 0; 133 memset(ovs_stats, 0, sizeof(*ovs_stats)); 134 135 /* We open code this to make sure cpu 0 is always considered */ 136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); 138 139 if (stats) { 140 /* Local CPU may write on non-local stats, so we must 141 * block bottom-halves here. 142 */ 143 spin_lock_bh(&stats->lock); 144 if (!*used || time_after(stats->used, *used)) 145 *used = stats->used; 146 *tcp_flags |= stats->tcp_flags; 147 ovs_stats->n_packets += stats->packet_count; 148 ovs_stats->n_bytes += stats->byte_count; 149 spin_unlock_bh(&stats->lock); 150 } 151 } 152 } 153 154 /* Called with ovs_mutex. */ 155 void ovs_flow_stats_clear(struct sw_flow *flow) 156 { 157 int cpu; 158 159 /* We open code this to make sure cpu 0 is always considered */ 160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { 161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]); 162 163 if (stats) { 164 spin_lock_bh(&stats->lock); 165 stats->used = 0; 166 stats->packet_count = 0; 167 stats->byte_count = 0; 168 stats->tcp_flags = 0; 169 spin_unlock_bh(&stats->lock); 170 } 171 } 172 } 173 174 static int check_header(struct sk_buff *skb, int len) 175 { 176 if (unlikely(skb->len < len)) 177 return -EINVAL; 178 if (unlikely(!pskb_may_pull(skb, len))) 179 return -ENOMEM; 180 return 0; 181 } 182 183 static bool arphdr_ok(struct sk_buff *skb) 184 { 185 return pskb_may_pull(skb, skb_network_offset(skb) + 186 sizeof(struct arp_eth_header)); 187 } 188 189 static int check_iphdr(struct sk_buff *skb) 190 { 191 unsigned int nh_ofs = skb_network_offset(skb); 192 unsigned int ip_len; 193 int err; 194 195 err = check_header(skb, nh_ofs + sizeof(struct iphdr)); 196 if (unlikely(err)) 197 return err; 198 199 ip_len = ip_hdrlen(skb); 200 if (unlikely(ip_len < sizeof(struct iphdr) || 201 skb->len < nh_ofs + ip_len)) 202 return -EINVAL; 203 204 skb_set_transport_header(skb, nh_ofs + ip_len); 205 return 0; 206 } 207 208 static bool tcphdr_ok(struct sk_buff *skb) 209 { 210 int th_ofs = skb_transport_offset(skb); 211 int tcp_len; 212 213 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) 214 return false; 215 216 tcp_len = tcp_hdrlen(skb); 217 if (unlikely(tcp_len < sizeof(struct tcphdr) || 218 skb->len < th_ofs + tcp_len)) 219 return false; 220 221 return true; 222 } 223 224 static bool udphdr_ok(struct sk_buff *skb) 225 { 226 return pskb_may_pull(skb, skb_transport_offset(skb) + 227 sizeof(struct udphdr)); 228 } 229 230 static bool sctphdr_ok(struct sk_buff *skb) 231 { 232 return pskb_may_pull(skb, skb_transport_offset(skb) + 233 sizeof(struct sctphdr)); 234 } 235 236 static bool icmphdr_ok(struct sk_buff *skb) 237 { 238 return pskb_may_pull(skb, skb_transport_offset(skb) + 239 sizeof(struct icmphdr)); 240 } 241 242 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) 243 { 244 unsigned short frag_off; 245 unsigned int payload_ofs = 0; 246 unsigned int nh_ofs = skb_network_offset(skb); 247 unsigned int nh_len; 248 struct ipv6hdr *nh; 249 int err, nexthdr, flags = 0; 250 251 err = check_header(skb, nh_ofs + sizeof(*nh)); 252 if (unlikely(err)) 253 return err; 254 255 nh = ipv6_hdr(skb); 256 257 key->ip.proto = NEXTHDR_NONE; 258 key->ip.tos = ipv6_get_dsfield(nh); 259 key->ip.ttl = nh->hop_limit; 260 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); 261 key->ipv6.addr.src = nh->saddr; 262 key->ipv6.addr.dst = nh->daddr; 263 264 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); 265 if (flags & IP6_FH_F_FRAG) { 266 if (frag_off) { 267 key->ip.frag = OVS_FRAG_TYPE_LATER; 268 key->ip.proto = nexthdr; 269 return 0; 270 } 271 key->ip.frag = OVS_FRAG_TYPE_FIRST; 272 } else { 273 key->ip.frag = OVS_FRAG_TYPE_NONE; 274 } 275 276 /* Delayed handling of error in ipv6_find_hdr() as it 277 * always sets flags and frag_off to a valid value which may be 278 * used to set key->ip.frag above. 279 */ 280 if (unlikely(nexthdr < 0)) 281 return -EPROTO; 282 283 nh_len = payload_ofs - nh_ofs; 284 skb_set_transport_header(skb, nh_ofs + nh_len); 285 key->ip.proto = nexthdr; 286 return nh_len; 287 } 288 289 static bool icmp6hdr_ok(struct sk_buff *skb) 290 { 291 return pskb_may_pull(skb, skb_transport_offset(skb) + 292 sizeof(struct icmp6hdr)); 293 } 294 295 /** 296 * Parse vlan tag from vlan header. 297 * @skb: skb containing frame to parse 298 * @key_vh: pointer to parsed vlan tag 299 * @untag_vlan: should the vlan header be removed from the frame 300 * 301 * Returns ERROR on memory error. 302 * Returns 0 if it encounters a non-vlan or incomplete packet. 303 * Returns 1 after successfully parsing vlan tag. 304 */ 305 static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh, 306 bool untag_vlan) 307 { 308 struct vlan_head *vh = (struct vlan_head *)skb->data; 309 310 if (likely(!eth_type_vlan(vh->tpid))) 311 return 0; 312 313 if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16))) 314 return 0; 315 316 if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) + 317 sizeof(__be16)))) 318 return -ENOMEM; 319 320 vh = (struct vlan_head *)skb->data; 321 key_vh->tci = vh->tci | htons(VLAN_CFI_MASK); 322 key_vh->tpid = vh->tpid; 323 324 if (unlikely(untag_vlan)) { 325 int offset = skb->data - skb_mac_header(skb); 326 u16 tci; 327 int err; 328 329 __skb_push(skb, offset); 330 err = __skb_vlan_pop(skb, &tci); 331 __skb_pull(skb, offset); 332 if (err) 333 return err; 334 __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci); 335 } else { 336 __skb_pull(skb, sizeof(struct vlan_head)); 337 } 338 return 1; 339 } 340 341 static void clear_vlan(struct sw_flow_key *key) 342 { 343 key->eth.vlan.tci = 0; 344 key->eth.vlan.tpid = 0; 345 key->eth.cvlan.tci = 0; 346 key->eth.cvlan.tpid = 0; 347 } 348 349 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) 350 { 351 int res; 352 353 if (skb_vlan_tag_present(skb)) { 354 key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK); 355 key->eth.vlan.tpid = skb->vlan_proto; 356 } else { 357 /* Parse outer vlan tag in the non-accelerated case. */ 358 res = parse_vlan_tag(skb, &key->eth.vlan, true); 359 if (res <= 0) 360 return res; 361 } 362 363 /* Parse inner vlan tag. */ 364 res = parse_vlan_tag(skb, &key->eth.cvlan, false); 365 if (res <= 0) 366 return res; 367 368 return 0; 369 } 370 371 static __be16 parse_ethertype(struct sk_buff *skb) 372 { 373 struct llc_snap_hdr { 374 u8 dsap; /* Always 0xAA */ 375 u8 ssap; /* Always 0xAA */ 376 u8 ctrl; 377 u8 oui[3]; 378 __be16 ethertype; 379 }; 380 struct llc_snap_hdr *llc; 381 __be16 proto; 382 383 proto = *(__be16 *) skb->data; 384 __skb_pull(skb, sizeof(__be16)); 385 386 if (eth_proto_is_802_3(proto)) 387 return proto; 388 389 if (skb->len < sizeof(struct llc_snap_hdr)) 390 return htons(ETH_P_802_2); 391 392 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) 393 return htons(0); 394 395 llc = (struct llc_snap_hdr *) skb->data; 396 if (llc->dsap != LLC_SAP_SNAP || 397 llc->ssap != LLC_SAP_SNAP || 398 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) 399 return htons(ETH_P_802_2); 400 401 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 402 403 if (eth_proto_is_802_3(llc->ethertype)) 404 return llc->ethertype; 405 406 return htons(ETH_P_802_2); 407 } 408 409 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, 410 int nh_len) 411 { 412 struct icmp6hdr *icmp = icmp6_hdr(skb); 413 414 /* The ICMPv6 type and code fields use the 16-bit transport port 415 * fields, so we need to store them in 16-bit network byte order. 416 */ 417 key->tp.src = htons(icmp->icmp6_type); 418 key->tp.dst = htons(icmp->icmp6_code); 419 memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd)); 420 421 if (icmp->icmp6_code == 0 && 422 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || 423 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { 424 int icmp_len = skb->len - skb_transport_offset(skb); 425 struct nd_msg *nd; 426 int offset; 427 428 /* In order to process neighbor discovery options, we need the 429 * entire packet. 430 */ 431 if (unlikely(icmp_len < sizeof(*nd))) 432 return 0; 433 434 if (unlikely(skb_linearize(skb))) 435 return -ENOMEM; 436 437 nd = (struct nd_msg *)skb_transport_header(skb); 438 key->ipv6.nd.target = nd->target; 439 440 icmp_len -= sizeof(*nd); 441 offset = 0; 442 while (icmp_len >= 8) { 443 struct nd_opt_hdr *nd_opt = 444 (struct nd_opt_hdr *)(nd->opt + offset); 445 int opt_len = nd_opt->nd_opt_len * 8; 446 447 if (unlikely(!opt_len || opt_len > icmp_len)) 448 return 0; 449 450 /* Store the link layer address if the appropriate 451 * option is provided. It is considered an error if 452 * the same link layer option is specified twice. 453 */ 454 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR 455 && opt_len == 8) { 456 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) 457 goto invalid; 458 ether_addr_copy(key->ipv6.nd.sll, 459 &nd->opt[offset+sizeof(*nd_opt)]); 460 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR 461 && opt_len == 8) { 462 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) 463 goto invalid; 464 ether_addr_copy(key->ipv6.nd.tll, 465 &nd->opt[offset+sizeof(*nd_opt)]); 466 } 467 468 icmp_len -= opt_len; 469 offset += opt_len; 470 } 471 } 472 473 return 0; 474 475 invalid: 476 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); 477 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); 478 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); 479 480 return 0; 481 } 482 483 static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key) 484 { 485 struct nshhdr *nh; 486 unsigned int nh_ofs = skb_network_offset(skb); 487 u8 version, length; 488 int err; 489 490 err = check_header(skb, nh_ofs + NSH_BASE_HDR_LEN); 491 if (unlikely(err)) 492 return err; 493 494 nh = nsh_hdr(skb); 495 version = nsh_get_ver(nh); 496 length = nsh_hdr_len(nh); 497 498 if (version != 0) 499 return -EINVAL; 500 501 err = check_header(skb, nh_ofs + length); 502 if (unlikely(err)) 503 return err; 504 505 nh = nsh_hdr(skb); 506 key->nsh.base.flags = nsh_get_flags(nh); 507 key->nsh.base.ttl = nsh_get_ttl(nh); 508 key->nsh.base.mdtype = nh->mdtype; 509 key->nsh.base.np = nh->np; 510 key->nsh.base.path_hdr = nh->path_hdr; 511 switch (key->nsh.base.mdtype) { 512 case NSH_M_TYPE1: 513 if (length != NSH_M_TYPE1_LEN) 514 return -EINVAL; 515 memcpy(key->nsh.context, nh->md1.context, 516 sizeof(nh->md1)); 517 break; 518 case NSH_M_TYPE2: 519 memset(key->nsh.context, 0, 520 sizeof(nh->md1)); 521 break; 522 default: 523 return -EINVAL; 524 } 525 526 return 0; 527 } 528 529 /** 530 * key_extract_l3l4 - extracts L3/L4 header information. 531 * @skb: sk_buff that contains the frame, with skb->data pointing to the 532 * L3 header 533 * @key: output flow key 534 * 535 */ 536 static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) 537 { 538 int error; 539 540 /* Network layer. */ 541 if (key->eth.type == htons(ETH_P_IP)) { 542 struct iphdr *nh; 543 __be16 offset; 544 545 error = check_iphdr(skb); 546 if (unlikely(error)) { 547 memset(&key->ip, 0, sizeof(key->ip)); 548 memset(&key->ipv4, 0, sizeof(key->ipv4)); 549 if (error == -EINVAL) { 550 skb->transport_header = skb->network_header; 551 error = 0; 552 } 553 return error; 554 } 555 556 nh = ip_hdr(skb); 557 key->ipv4.addr.src = nh->saddr; 558 key->ipv4.addr.dst = nh->daddr; 559 560 key->ip.proto = nh->protocol; 561 key->ip.tos = nh->tos; 562 key->ip.ttl = nh->ttl; 563 564 offset = nh->frag_off & htons(IP_OFFSET); 565 if (offset) { 566 key->ip.frag = OVS_FRAG_TYPE_LATER; 567 memset(&key->tp, 0, sizeof(key->tp)); 568 return 0; 569 } 570 if (nh->frag_off & htons(IP_MF) || 571 skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 572 key->ip.frag = OVS_FRAG_TYPE_FIRST; 573 else 574 key->ip.frag = OVS_FRAG_TYPE_NONE; 575 576 /* Transport layer. */ 577 if (key->ip.proto == IPPROTO_TCP) { 578 if (tcphdr_ok(skb)) { 579 struct tcphdr *tcp = tcp_hdr(skb); 580 key->tp.src = tcp->source; 581 key->tp.dst = tcp->dest; 582 key->tp.flags = TCP_FLAGS_BE16(tcp); 583 } else { 584 memset(&key->tp, 0, sizeof(key->tp)); 585 } 586 587 } else if (key->ip.proto == IPPROTO_UDP) { 588 if (udphdr_ok(skb)) { 589 struct udphdr *udp = udp_hdr(skb); 590 key->tp.src = udp->source; 591 key->tp.dst = udp->dest; 592 } else { 593 memset(&key->tp, 0, sizeof(key->tp)); 594 } 595 } else if (key->ip.proto == IPPROTO_SCTP) { 596 if (sctphdr_ok(skb)) { 597 struct sctphdr *sctp = sctp_hdr(skb); 598 key->tp.src = sctp->source; 599 key->tp.dst = sctp->dest; 600 } else { 601 memset(&key->tp, 0, sizeof(key->tp)); 602 } 603 } else if (key->ip.proto == IPPROTO_ICMP) { 604 if (icmphdr_ok(skb)) { 605 struct icmphdr *icmp = icmp_hdr(skb); 606 /* The ICMP type and code fields use the 16-bit 607 * transport port fields, so we need to store 608 * them in 16-bit network byte order. */ 609 key->tp.src = htons(icmp->type); 610 key->tp.dst = htons(icmp->code); 611 } else { 612 memset(&key->tp, 0, sizeof(key->tp)); 613 } 614 } 615 616 } else if (key->eth.type == htons(ETH_P_ARP) || 617 key->eth.type == htons(ETH_P_RARP)) { 618 struct arp_eth_header *arp; 619 bool arp_available = arphdr_ok(skb); 620 621 arp = (struct arp_eth_header *)skb_network_header(skb); 622 623 if (arp_available && 624 arp->ar_hrd == htons(ARPHRD_ETHER) && 625 arp->ar_pro == htons(ETH_P_IP) && 626 arp->ar_hln == ETH_ALEN && 627 arp->ar_pln == 4) { 628 629 /* We only match on the lower 8 bits of the opcode. */ 630 if (ntohs(arp->ar_op) <= 0xff) 631 key->ip.proto = ntohs(arp->ar_op); 632 else 633 key->ip.proto = 0; 634 635 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); 636 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); 637 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); 638 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); 639 } else { 640 memset(&key->ip, 0, sizeof(key->ip)); 641 memset(&key->ipv4, 0, sizeof(key->ipv4)); 642 } 643 } else if (eth_p_mpls(key->eth.type)) { 644 u8 label_count = 1; 645 646 memset(&key->mpls, 0, sizeof(key->mpls)); 647 skb_set_inner_network_header(skb, skb->mac_len); 648 while (1) { 649 __be32 lse; 650 651 error = check_header(skb, skb->mac_len + 652 label_count * MPLS_HLEN); 653 if (unlikely(error)) 654 return 0; 655 656 memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN); 657 658 if (label_count <= MPLS_LABEL_DEPTH) 659 memcpy(&key->mpls.lse[label_count - 1], &lse, 660 MPLS_HLEN); 661 662 skb_set_inner_network_header(skb, skb->mac_len + 663 label_count * MPLS_HLEN); 664 if (lse & htonl(MPLS_LS_S_MASK)) 665 break; 666 667 label_count++; 668 } 669 if (label_count > MPLS_LABEL_DEPTH) 670 label_count = MPLS_LABEL_DEPTH; 671 672 key->mpls.num_labels_mask = GENMASK(label_count - 1, 0); 673 } else if (key->eth.type == htons(ETH_P_IPV6)) { 674 int nh_len; /* IPv6 Header + Extensions */ 675 676 nh_len = parse_ipv6hdr(skb, key); 677 if (unlikely(nh_len < 0)) { 678 switch (nh_len) { 679 case -EINVAL: 680 memset(&key->ip, 0, sizeof(key->ip)); 681 memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); 682 fallthrough; 683 case -EPROTO: 684 skb->transport_header = skb->network_header; 685 error = 0; 686 break; 687 default: 688 error = nh_len; 689 } 690 return error; 691 } 692 693 if (key->ip.frag == OVS_FRAG_TYPE_LATER) { 694 memset(&key->tp, 0, sizeof(key->tp)); 695 return 0; 696 } 697 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 698 key->ip.frag = OVS_FRAG_TYPE_FIRST; 699 700 /* Transport layer. */ 701 if (key->ip.proto == NEXTHDR_TCP) { 702 if (tcphdr_ok(skb)) { 703 struct tcphdr *tcp = tcp_hdr(skb); 704 key->tp.src = tcp->source; 705 key->tp.dst = tcp->dest; 706 key->tp.flags = TCP_FLAGS_BE16(tcp); 707 } else { 708 memset(&key->tp, 0, sizeof(key->tp)); 709 } 710 } else if (key->ip.proto == NEXTHDR_UDP) { 711 if (udphdr_ok(skb)) { 712 struct udphdr *udp = udp_hdr(skb); 713 key->tp.src = udp->source; 714 key->tp.dst = udp->dest; 715 } else { 716 memset(&key->tp, 0, sizeof(key->tp)); 717 } 718 } else if (key->ip.proto == NEXTHDR_SCTP) { 719 if (sctphdr_ok(skb)) { 720 struct sctphdr *sctp = sctp_hdr(skb); 721 key->tp.src = sctp->source; 722 key->tp.dst = sctp->dest; 723 } else { 724 memset(&key->tp, 0, sizeof(key->tp)); 725 } 726 } else if (key->ip.proto == NEXTHDR_ICMP) { 727 if (icmp6hdr_ok(skb)) { 728 error = parse_icmpv6(skb, key, nh_len); 729 if (error) 730 return error; 731 } else { 732 memset(&key->tp, 0, sizeof(key->tp)); 733 } 734 } 735 } else if (key->eth.type == htons(ETH_P_NSH)) { 736 error = parse_nsh(skb, key); 737 if (error) 738 return error; 739 } 740 return 0; 741 } 742 743 /** 744 * key_extract - extracts a flow key from an Ethernet frame. 745 * @skb: sk_buff that contains the frame, with skb->data pointing to the 746 * Ethernet header 747 * @key: output flow key 748 * 749 * The caller must ensure that skb->len >= ETH_HLEN. 750 * 751 * Returns 0 if successful, otherwise a negative errno value. 752 * 753 * Initializes @skb header fields as follows: 754 * 755 * - skb->mac_header: the L2 header. 756 * 757 * - skb->network_header: just past the L2 header, or just past the 758 * VLAN header, to the first byte of the L2 payload. 759 * 760 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 761 * on output, then just past the IP header, if one is present and 762 * of a correct length, otherwise the same as skb->network_header. 763 * For other key->eth.type values it is left untouched. 764 * 765 * - skb->protocol: the type of the data starting at skb->network_header. 766 * Equals to key->eth.type. 767 */ 768 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 769 { 770 struct ethhdr *eth; 771 772 /* Flags are always used as part of stats */ 773 key->tp.flags = 0; 774 775 skb_reset_mac_header(skb); 776 777 /* Link layer. */ 778 clear_vlan(key); 779 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { 780 if (unlikely(eth_type_vlan(skb->protocol))) 781 return -EINVAL; 782 783 skb_reset_network_header(skb); 784 key->eth.type = skb->protocol; 785 } else { 786 eth = eth_hdr(skb); 787 ether_addr_copy(key->eth.src, eth->h_source); 788 ether_addr_copy(key->eth.dst, eth->h_dest); 789 790 __skb_pull(skb, 2 * ETH_ALEN); 791 /* We are going to push all headers that we pull, so no need to 792 * update skb->csum here. 793 */ 794 795 if (unlikely(parse_vlan(skb, key))) 796 return -ENOMEM; 797 798 key->eth.type = parse_ethertype(skb); 799 if (unlikely(key->eth.type == htons(0))) 800 return -ENOMEM; 801 802 /* Multiple tagged packets need to retain TPID to satisfy 803 * skb_vlan_pop(), which will later shift the ethertype into 804 * skb->protocol. 805 */ 806 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) 807 skb->protocol = key->eth.cvlan.tpid; 808 else 809 skb->protocol = key->eth.type; 810 811 skb_reset_network_header(skb); 812 __skb_push(skb, skb->data - skb_mac_header(skb)); 813 } 814 815 skb_reset_mac_len(skb); 816 817 /* Fill out L3/L4 key info, if any */ 818 return key_extract_l3l4(skb, key); 819 } 820 821 /* In the case of conntrack fragment handling it expects L3 headers, 822 * add a helper. 823 */ 824 int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key) 825 { 826 return key_extract_l3l4(skb, key); 827 } 828 829 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 830 { 831 int res; 832 833 res = key_extract(skb, key); 834 if (!res) 835 key->mac_proto &= ~SW_FLOW_KEY_INVALID; 836 837 return res; 838 } 839 840 static int key_extract_mac_proto(struct sk_buff *skb) 841 { 842 switch (skb->dev->type) { 843 case ARPHRD_ETHER: 844 return MAC_PROTO_ETHERNET; 845 case ARPHRD_NONE: 846 if (skb->protocol == htons(ETH_P_TEB)) 847 return MAC_PROTO_ETHERNET; 848 return MAC_PROTO_NONE; 849 } 850 WARN_ON_ONCE(1); 851 return -EINVAL; 852 } 853 854 int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, 855 struct sk_buff *skb, struct sw_flow_key *key) 856 { 857 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 858 struct tc_skb_ext *tc_ext; 859 #endif 860 int res, err; 861 862 /* Extract metadata from packet. */ 863 if (tun_info) { 864 key->tun_proto = ip_tunnel_info_af(tun_info); 865 memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key)); 866 867 if (tun_info->options_len) { 868 BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) * 869 8)) - 1 870 > sizeof(key->tun_opts)); 871 872 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len), 873 tun_info); 874 key->tun_opts_len = tun_info->options_len; 875 } else { 876 key->tun_opts_len = 0; 877 } 878 } else { 879 key->tun_proto = 0; 880 key->tun_opts_len = 0; 881 memset(&key->tun_key, 0, sizeof(key->tun_key)); 882 } 883 884 key->phy.priority = skb->priority; 885 key->phy.in_port = OVS_CB(skb)->input_vport->port_no; 886 key->phy.skb_mark = skb->mark; 887 key->ovs_flow_hash = 0; 888 res = key_extract_mac_proto(skb); 889 if (res < 0) 890 return res; 891 key->mac_proto = res; 892 893 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 894 if (static_branch_unlikely(&tc_recirc_sharing_support)) { 895 tc_ext = skb_ext_find(skb, TC_SKB_EXT); 896 key->recirc_id = tc_ext ? tc_ext->chain : 0; 897 OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0; 898 } else { 899 key->recirc_id = 0; 900 } 901 #else 902 key->recirc_id = 0; 903 #endif 904 905 err = key_extract(skb, key); 906 if (!err) 907 ovs_ct_fill_key(skb, key); /* Must be after key_extract(). */ 908 return err; 909 } 910 911 int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr, 912 struct sk_buff *skb, 913 struct sw_flow_key *key, bool log) 914 { 915 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; 916 u64 attrs = 0; 917 int err; 918 919 err = parse_flow_nlattrs(attr, a, &attrs, log); 920 if (err) 921 return -EINVAL; 922 923 /* Extract metadata from netlink attributes. */ 924 err = ovs_nla_get_flow_metadata(net, a, attrs, key, log); 925 if (err) 926 return err; 927 928 /* key_extract assumes that skb->protocol is set-up for 929 * layer 3 packets which is the case for other callers, 930 * in particular packets received from the network stack. 931 * Here the correct value can be set from the metadata 932 * extracted above. 933 * For L2 packet key eth type would be zero. skb protocol 934 * would be set to correct value later during key-extact. 935 */ 936 937 skb->protocol = key->eth.type; 938 err = key_extract(skb, key); 939 if (err) 940 return err; 941 942 /* Check that we have conntrack original direction tuple metadata only 943 * for packets for which it makes sense. Otherwise the key may be 944 * corrupted due to overlapping key fields. 945 */ 946 if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) && 947 key->eth.type != htons(ETH_P_IP)) 948 return -EINVAL; 949 if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) && 950 (key->eth.type != htons(ETH_P_IPV6) || 951 sw_flow_key_is_nd(key))) 952 return -EINVAL; 953 954 return 0; 955 } 956