1 /* 2 * Copyright (c) 2007-2011 Nicira Networks. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #include "flow.h" 20 #include "datapath.h" 21 #include <linux/uaccess.h> 22 #include <linux/netdevice.h> 23 #include <linux/etherdevice.h> 24 #include <linux/if_ether.h> 25 #include <linux/if_vlan.h> 26 #include <net/llc_pdu.h> 27 #include <linux/kernel.h> 28 #include <linux/jhash.h> 29 #include <linux/jiffies.h> 30 #include <linux/llc.h> 31 #include <linux/module.h> 32 #include <linux/in.h> 33 #include <linux/rcupdate.h> 34 #include <linux/if_arp.h> 35 #include <linux/ip.h> 36 #include <linux/ipv6.h> 37 #include <linux/tcp.h> 38 #include <linux/udp.h> 39 #include <linux/icmp.h> 40 #include <linux/icmpv6.h> 41 #include <linux/rculist.h> 42 #include <net/ip.h> 43 #include <net/ipv6.h> 44 #include <net/ndisc.h> 45 46 static struct kmem_cache *flow_cache; 47 48 static int check_header(struct sk_buff *skb, int len) 49 { 50 if (unlikely(skb->len < len)) 51 return -EINVAL; 52 if (unlikely(!pskb_may_pull(skb, len))) 53 return -ENOMEM; 54 return 0; 55 } 56 57 static bool arphdr_ok(struct sk_buff *skb) 58 { 59 return pskb_may_pull(skb, skb_network_offset(skb) + 60 sizeof(struct arp_eth_header)); 61 } 62 63 static int check_iphdr(struct sk_buff *skb) 64 { 65 unsigned int nh_ofs = skb_network_offset(skb); 66 unsigned int ip_len; 67 int err; 68 69 err = check_header(skb, nh_ofs + sizeof(struct iphdr)); 70 if (unlikely(err)) 71 return err; 72 73 ip_len = ip_hdrlen(skb); 74 if (unlikely(ip_len < sizeof(struct iphdr) || 75 skb->len < nh_ofs + ip_len)) 76 return -EINVAL; 77 78 skb_set_transport_header(skb, nh_ofs + ip_len); 79 return 0; 80 } 81 82 static bool tcphdr_ok(struct sk_buff *skb) 83 { 84 int th_ofs = skb_transport_offset(skb); 85 int tcp_len; 86 87 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) 88 return false; 89 90 tcp_len = tcp_hdrlen(skb); 91 if (unlikely(tcp_len < sizeof(struct tcphdr) || 92 skb->len < th_ofs + tcp_len)) 93 return false; 94 95 return true; 96 } 97 98 static bool udphdr_ok(struct sk_buff *skb) 99 { 100 return pskb_may_pull(skb, skb_transport_offset(skb) + 101 sizeof(struct udphdr)); 102 } 103 104 static bool icmphdr_ok(struct sk_buff *skb) 105 { 106 return pskb_may_pull(skb, skb_transport_offset(skb) + 107 sizeof(struct icmphdr)); 108 } 109 110 u64 ovs_flow_used_time(unsigned long flow_jiffies) 111 { 112 struct timespec cur_ts; 113 u64 cur_ms, idle_ms; 114 115 ktime_get_ts(&cur_ts); 116 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); 117 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + 118 cur_ts.tv_nsec / NSEC_PER_MSEC; 119 120 return cur_ms - idle_ms; 121 } 122 123 #define SW_FLOW_KEY_OFFSET(field) \ 124 (offsetof(struct sw_flow_key, field) + \ 125 FIELD_SIZEOF(struct sw_flow_key, field)) 126 127 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key, 128 int *key_lenp) 129 { 130 unsigned int nh_ofs = skb_network_offset(skb); 131 unsigned int nh_len; 132 int payload_ofs; 133 struct ipv6hdr *nh; 134 uint8_t nexthdr; 135 __be16 frag_off; 136 int err; 137 138 *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label); 139 140 err = check_header(skb, nh_ofs + sizeof(*nh)); 141 if (unlikely(err)) 142 return err; 143 144 nh = ipv6_hdr(skb); 145 nexthdr = nh->nexthdr; 146 payload_ofs = (u8 *)(nh + 1) - skb->data; 147 148 key->ip.proto = NEXTHDR_NONE; 149 key->ip.tos = ipv6_get_dsfield(nh); 150 key->ip.ttl = nh->hop_limit; 151 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); 152 key->ipv6.addr.src = nh->saddr; 153 key->ipv6.addr.dst = nh->daddr; 154 155 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); 156 if (unlikely(payload_ofs < 0)) 157 return -EINVAL; 158 159 if (frag_off) { 160 if (frag_off & htons(~0x7)) 161 key->ip.frag = OVS_FRAG_TYPE_LATER; 162 else 163 key->ip.frag = OVS_FRAG_TYPE_FIRST; 164 } 165 166 nh_len = payload_ofs - nh_ofs; 167 skb_set_transport_header(skb, nh_ofs + nh_len); 168 key->ip.proto = nexthdr; 169 return nh_len; 170 } 171 172 static bool icmp6hdr_ok(struct sk_buff *skb) 173 { 174 return pskb_may_pull(skb, skb_transport_offset(skb) + 175 sizeof(struct icmp6hdr)); 176 } 177 178 #define TCP_FLAGS_OFFSET 13 179 #define TCP_FLAG_MASK 0x3f 180 181 void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) 182 { 183 u8 tcp_flags = 0; 184 185 if (flow->key.eth.type == htons(ETH_P_IP) && 186 flow->key.ip.proto == IPPROTO_TCP) { 187 u8 *tcp = (u8 *)tcp_hdr(skb); 188 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; 189 } 190 191 spin_lock(&flow->lock); 192 flow->used = jiffies; 193 flow->packet_count++; 194 flow->byte_count += skb->len; 195 flow->tcp_flags |= tcp_flags; 196 spin_unlock(&flow->lock); 197 } 198 199 struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) 200 { 201 int actions_len = nla_len(actions); 202 struct sw_flow_actions *sfa; 203 204 /* At least DP_MAX_PORTS actions are required to be able to flood a 205 * packet to every port. Factor of 2 allows for setting VLAN tags, 206 * etc. */ 207 if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4)) 208 return ERR_PTR(-EINVAL); 209 210 sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); 211 if (!sfa) 212 return ERR_PTR(-ENOMEM); 213 214 sfa->actions_len = actions_len; 215 memcpy(sfa->actions, nla_data(actions), actions_len); 216 return sfa; 217 } 218 219 struct sw_flow *ovs_flow_alloc(void) 220 { 221 struct sw_flow *flow; 222 223 flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); 224 if (!flow) 225 return ERR_PTR(-ENOMEM); 226 227 spin_lock_init(&flow->lock); 228 flow->sf_acts = NULL; 229 230 return flow; 231 } 232 233 static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) 234 { 235 hash = jhash_1word(hash, table->hash_seed); 236 return flex_array_get(table->buckets, 237 (hash & (table->n_buckets - 1))); 238 } 239 240 static struct flex_array *alloc_buckets(unsigned int n_buckets) 241 { 242 struct flex_array *buckets; 243 int i, err; 244 245 buckets = flex_array_alloc(sizeof(struct hlist_head *), 246 n_buckets, GFP_KERNEL); 247 if (!buckets) 248 return NULL; 249 250 err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); 251 if (err) { 252 flex_array_free(buckets); 253 return NULL; 254 } 255 256 for (i = 0; i < n_buckets; i++) 257 INIT_HLIST_HEAD((struct hlist_head *) 258 flex_array_get(buckets, i)); 259 260 return buckets; 261 } 262 263 static void free_buckets(struct flex_array *buckets) 264 { 265 flex_array_free(buckets); 266 } 267 268 struct flow_table *ovs_flow_tbl_alloc(int new_size) 269 { 270 struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); 271 272 if (!table) 273 return NULL; 274 275 table->buckets = alloc_buckets(new_size); 276 277 if (!table->buckets) { 278 kfree(table); 279 return NULL; 280 } 281 table->n_buckets = new_size; 282 table->count = 0; 283 table->node_ver = 0; 284 table->keep_flows = false; 285 get_random_bytes(&table->hash_seed, sizeof(u32)); 286 287 return table; 288 } 289 290 void ovs_flow_tbl_destroy(struct flow_table *table) 291 { 292 int i; 293 294 if (!table) 295 return; 296 297 if (table->keep_flows) 298 goto skip_flows; 299 300 for (i = 0; i < table->n_buckets; i++) { 301 struct sw_flow *flow; 302 struct hlist_head *head = flex_array_get(table->buckets, i); 303 struct hlist_node *node, *n; 304 int ver = table->node_ver; 305 306 hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { 307 hlist_del_rcu(&flow->hash_node[ver]); 308 ovs_flow_free(flow); 309 } 310 } 311 312 skip_flows: 313 free_buckets(table->buckets); 314 kfree(table); 315 } 316 317 static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) 318 { 319 struct flow_table *table = container_of(rcu, struct flow_table, rcu); 320 321 ovs_flow_tbl_destroy(table); 322 } 323 324 void ovs_flow_tbl_deferred_destroy(struct flow_table *table) 325 { 326 if (!table) 327 return; 328 329 call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); 330 } 331 332 struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last) 333 { 334 struct sw_flow *flow; 335 struct hlist_head *head; 336 struct hlist_node *n; 337 int ver; 338 int i; 339 340 ver = table->node_ver; 341 while (*bucket < table->n_buckets) { 342 i = 0; 343 head = flex_array_get(table->buckets, *bucket); 344 hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { 345 if (i < *last) { 346 i++; 347 continue; 348 } 349 *last = i + 1; 350 return flow; 351 } 352 (*bucket)++; 353 *last = 0; 354 } 355 356 return NULL; 357 } 358 359 static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) 360 { 361 int old_ver; 362 int i; 363 364 old_ver = old->node_ver; 365 new->node_ver = !old_ver; 366 367 /* Insert in new table. */ 368 for (i = 0; i < old->n_buckets; i++) { 369 struct sw_flow *flow; 370 struct hlist_head *head; 371 struct hlist_node *n; 372 373 head = flex_array_get(old->buckets, i); 374 375 hlist_for_each_entry(flow, n, head, hash_node[old_ver]) 376 ovs_flow_tbl_insert(new, flow); 377 } 378 old->keep_flows = true; 379 } 380 381 static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) 382 { 383 struct flow_table *new_table; 384 385 new_table = ovs_flow_tbl_alloc(n_buckets); 386 if (!new_table) 387 return ERR_PTR(-ENOMEM); 388 389 flow_table_copy_flows(table, new_table); 390 391 return new_table; 392 } 393 394 struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) 395 { 396 return __flow_tbl_rehash(table, table->n_buckets); 397 } 398 399 struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) 400 { 401 return __flow_tbl_rehash(table, table->n_buckets * 2); 402 } 403 404 void ovs_flow_free(struct sw_flow *flow) 405 { 406 if (unlikely(!flow)) 407 return; 408 409 kfree((struct sf_flow_acts __force *)flow->sf_acts); 410 kmem_cache_free(flow_cache, flow); 411 } 412 413 /* RCU callback used by ovs_flow_deferred_free. */ 414 static void rcu_free_flow_callback(struct rcu_head *rcu) 415 { 416 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); 417 418 ovs_flow_free(flow); 419 } 420 421 /* Schedules 'flow' to be freed after the next RCU grace period. 422 * The caller must hold rcu_read_lock for this to be sensible. */ 423 void ovs_flow_deferred_free(struct sw_flow *flow) 424 { 425 call_rcu(&flow->rcu, rcu_free_flow_callback); 426 } 427 428 /* RCU callback used by ovs_flow_deferred_free_acts. */ 429 static void rcu_free_acts_callback(struct rcu_head *rcu) 430 { 431 struct sw_flow_actions *sf_acts = container_of(rcu, 432 struct sw_flow_actions, rcu); 433 kfree(sf_acts); 434 } 435 436 /* Schedules 'sf_acts' to be freed after the next RCU grace period. 437 * The caller must hold rcu_read_lock for this to be sensible. */ 438 void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) 439 { 440 call_rcu(&sf_acts->rcu, rcu_free_acts_callback); 441 } 442 443 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) 444 { 445 struct qtag_prefix { 446 __be16 eth_type; /* ETH_P_8021Q */ 447 __be16 tci; 448 }; 449 struct qtag_prefix *qp; 450 451 if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16))) 452 return 0; 453 454 if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) + 455 sizeof(__be16)))) 456 return -ENOMEM; 457 458 qp = (struct qtag_prefix *) skb->data; 459 key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); 460 __skb_pull(skb, sizeof(struct qtag_prefix)); 461 462 return 0; 463 } 464 465 static __be16 parse_ethertype(struct sk_buff *skb) 466 { 467 struct llc_snap_hdr { 468 u8 dsap; /* Always 0xAA */ 469 u8 ssap; /* Always 0xAA */ 470 u8 ctrl; 471 u8 oui[3]; 472 __be16 ethertype; 473 }; 474 struct llc_snap_hdr *llc; 475 __be16 proto; 476 477 proto = *(__be16 *) skb->data; 478 __skb_pull(skb, sizeof(__be16)); 479 480 if (ntohs(proto) >= 1536) 481 return proto; 482 483 if (skb->len < sizeof(struct llc_snap_hdr)) 484 return htons(ETH_P_802_2); 485 486 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) 487 return htons(0); 488 489 llc = (struct llc_snap_hdr *) skb->data; 490 if (llc->dsap != LLC_SAP_SNAP || 491 llc->ssap != LLC_SAP_SNAP || 492 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) 493 return htons(ETH_P_802_2); 494 495 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 496 return llc->ethertype; 497 } 498 499 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, 500 int *key_lenp, int nh_len) 501 { 502 struct icmp6hdr *icmp = icmp6_hdr(skb); 503 int error = 0; 504 int key_len; 505 506 /* The ICMPv6 type and code fields use the 16-bit transport port 507 * fields, so we need to store them in 16-bit network byte order. 508 */ 509 key->ipv6.tp.src = htons(icmp->icmp6_type); 510 key->ipv6.tp.dst = htons(icmp->icmp6_code); 511 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); 512 513 if (icmp->icmp6_code == 0 && 514 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || 515 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { 516 int icmp_len = skb->len - skb_transport_offset(skb); 517 struct nd_msg *nd; 518 int offset; 519 520 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); 521 522 /* In order to process neighbor discovery options, we need the 523 * entire packet. 524 */ 525 if (unlikely(icmp_len < sizeof(*nd))) 526 goto out; 527 if (unlikely(skb_linearize(skb))) { 528 error = -ENOMEM; 529 goto out; 530 } 531 532 nd = (struct nd_msg *)skb_transport_header(skb); 533 key->ipv6.nd.target = nd->target; 534 key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); 535 536 icmp_len -= sizeof(*nd); 537 offset = 0; 538 while (icmp_len >= 8) { 539 struct nd_opt_hdr *nd_opt = 540 (struct nd_opt_hdr *)(nd->opt + offset); 541 int opt_len = nd_opt->nd_opt_len * 8; 542 543 if (unlikely(!opt_len || opt_len > icmp_len)) 544 goto invalid; 545 546 /* Store the link layer address if the appropriate 547 * option is provided. It is considered an error if 548 * the same link layer option is specified twice. 549 */ 550 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR 551 && opt_len == 8) { 552 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) 553 goto invalid; 554 memcpy(key->ipv6.nd.sll, 555 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); 556 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR 557 && opt_len == 8) { 558 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) 559 goto invalid; 560 memcpy(key->ipv6.nd.tll, 561 &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN); 562 } 563 564 icmp_len -= opt_len; 565 offset += opt_len; 566 } 567 } 568 569 goto out; 570 571 invalid: 572 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); 573 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); 574 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); 575 576 out: 577 *key_lenp = key_len; 578 return error; 579 } 580 581 /** 582 * ovs_flow_extract - extracts a flow key from an Ethernet frame. 583 * @skb: sk_buff that contains the frame, with skb->data pointing to the 584 * Ethernet header 585 * @in_port: port number on which @skb was received. 586 * @key: output flow key 587 * @key_lenp: length of output flow key 588 * 589 * The caller must ensure that skb->len >= ETH_HLEN. 590 * 591 * Returns 0 if successful, otherwise a negative errno value. 592 * 593 * Initializes @skb header pointers as follows: 594 * 595 * - skb->mac_header: the Ethernet header. 596 * 597 * - skb->network_header: just past the Ethernet header, or just past the 598 * VLAN header, to the first byte of the Ethernet payload. 599 * 600 * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6 601 * on output, then just past the IP header, if one is present and 602 * of a correct length, otherwise the same as skb->network_header. 603 * For other key->dl_type values it is left untouched. 604 */ 605 int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, 606 int *key_lenp) 607 { 608 int error = 0; 609 int key_len = SW_FLOW_KEY_OFFSET(eth); 610 struct ethhdr *eth; 611 612 memset(key, 0, sizeof(*key)); 613 614 key->phy.priority = skb->priority; 615 key->phy.in_port = in_port; 616 617 skb_reset_mac_header(skb); 618 619 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet 620 * header in the linear data area. 621 */ 622 eth = eth_hdr(skb); 623 memcpy(key->eth.src, eth->h_source, ETH_ALEN); 624 memcpy(key->eth.dst, eth->h_dest, ETH_ALEN); 625 626 __skb_pull(skb, 2 * ETH_ALEN); 627 628 if (vlan_tx_tag_present(skb)) 629 key->eth.tci = htons(skb->vlan_tci); 630 else if (eth->h_proto == htons(ETH_P_8021Q)) 631 if (unlikely(parse_vlan(skb, key))) 632 return -ENOMEM; 633 634 key->eth.type = parse_ethertype(skb); 635 if (unlikely(key->eth.type == htons(0))) 636 return -ENOMEM; 637 638 skb_reset_network_header(skb); 639 __skb_push(skb, skb->data - skb_mac_header(skb)); 640 641 /* Network layer. */ 642 if (key->eth.type == htons(ETH_P_IP)) { 643 struct iphdr *nh; 644 __be16 offset; 645 646 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); 647 648 error = check_iphdr(skb); 649 if (unlikely(error)) { 650 if (error == -EINVAL) { 651 skb->transport_header = skb->network_header; 652 error = 0; 653 } 654 goto out; 655 } 656 657 nh = ip_hdr(skb); 658 key->ipv4.addr.src = nh->saddr; 659 key->ipv4.addr.dst = nh->daddr; 660 661 key->ip.proto = nh->protocol; 662 key->ip.tos = nh->tos; 663 key->ip.ttl = nh->ttl; 664 665 offset = nh->frag_off & htons(IP_OFFSET); 666 if (offset) { 667 key->ip.frag = OVS_FRAG_TYPE_LATER; 668 goto out; 669 } 670 if (nh->frag_off & htons(IP_MF) || 671 skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 672 key->ip.frag = OVS_FRAG_TYPE_FIRST; 673 674 /* Transport layer. */ 675 if (key->ip.proto == IPPROTO_TCP) { 676 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); 677 if (tcphdr_ok(skb)) { 678 struct tcphdr *tcp = tcp_hdr(skb); 679 key->ipv4.tp.src = tcp->source; 680 key->ipv4.tp.dst = tcp->dest; 681 } 682 } else if (key->ip.proto == IPPROTO_UDP) { 683 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); 684 if (udphdr_ok(skb)) { 685 struct udphdr *udp = udp_hdr(skb); 686 key->ipv4.tp.src = udp->source; 687 key->ipv4.tp.dst = udp->dest; 688 } 689 } else if (key->ip.proto == IPPROTO_ICMP) { 690 key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); 691 if (icmphdr_ok(skb)) { 692 struct icmphdr *icmp = icmp_hdr(skb); 693 /* The ICMP type and code fields use the 16-bit 694 * transport port fields, so we need to store 695 * them in 16-bit network byte order. */ 696 key->ipv4.tp.src = htons(icmp->type); 697 key->ipv4.tp.dst = htons(icmp->code); 698 } 699 } 700 701 } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { 702 struct arp_eth_header *arp; 703 704 arp = (struct arp_eth_header *)skb_network_header(skb); 705 706 if (arp->ar_hrd == htons(ARPHRD_ETHER) 707 && arp->ar_pro == htons(ETH_P_IP) 708 && arp->ar_hln == ETH_ALEN 709 && arp->ar_pln == 4) { 710 711 /* We only match on the lower 8 bits of the opcode. */ 712 if (ntohs(arp->ar_op) <= 0xff) 713 key->ip.proto = ntohs(arp->ar_op); 714 715 if (key->ip.proto == ARPOP_REQUEST 716 || key->ip.proto == ARPOP_REPLY) { 717 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); 718 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); 719 memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); 720 memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); 721 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); 722 } 723 } 724 } else if (key->eth.type == htons(ETH_P_IPV6)) { 725 int nh_len; /* IPv6 Header + Extensions */ 726 727 nh_len = parse_ipv6hdr(skb, key, &key_len); 728 if (unlikely(nh_len < 0)) { 729 if (nh_len == -EINVAL) 730 skb->transport_header = skb->network_header; 731 else 732 error = nh_len; 733 goto out; 734 } 735 736 if (key->ip.frag == OVS_FRAG_TYPE_LATER) 737 goto out; 738 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 739 key->ip.frag = OVS_FRAG_TYPE_FIRST; 740 741 /* Transport layer. */ 742 if (key->ip.proto == NEXTHDR_TCP) { 743 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); 744 if (tcphdr_ok(skb)) { 745 struct tcphdr *tcp = tcp_hdr(skb); 746 key->ipv6.tp.src = tcp->source; 747 key->ipv6.tp.dst = tcp->dest; 748 } 749 } else if (key->ip.proto == NEXTHDR_UDP) { 750 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); 751 if (udphdr_ok(skb)) { 752 struct udphdr *udp = udp_hdr(skb); 753 key->ipv6.tp.src = udp->source; 754 key->ipv6.tp.dst = udp->dest; 755 } 756 } else if (key->ip.proto == NEXTHDR_ICMP) { 757 key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); 758 if (icmp6hdr_ok(skb)) { 759 error = parse_icmpv6(skb, key, &key_len, nh_len); 760 if (error < 0) 761 goto out; 762 } 763 } 764 } 765 766 out: 767 *key_lenp = key_len; 768 return error; 769 } 770 771 u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len) 772 { 773 return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0); 774 } 775 776 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, 777 struct sw_flow_key *key, int key_len) 778 { 779 struct sw_flow *flow; 780 struct hlist_node *n; 781 struct hlist_head *head; 782 u32 hash; 783 784 hash = ovs_flow_hash(key, key_len); 785 786 head = find_bucket(table, hash); 787 hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { 788 789 if (flow->hash == hash && 790 !memcmp(&flow->key, key, key_len)) { 791 return flow; 792 } 793 } 794 return NULL; 795 } 796 797 void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) 798 { 799 struct hlist_head *head; 800 801 head = find_bucket(table, flow->hash); 802 hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); 803 table->count++; 804 } 805 806 void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) 807 { 808 hlist_del_rcu(&flow->hash_node[table->node_ver]); 809 table->count--; 810 BUG_ON(table->count < 0); 811 } 812 813 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ 814 const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { 815 [OVS_KEY_ATTR_ENCAP] = -1, 816 [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), 817 [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), 818 [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), 819 [OVS_KEY_ATTR_VLAN] = sizeof(__be16), 820 [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), 821 [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), 822 [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), 823 [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), 824 [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), 825 [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), 826 [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), 827 [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), 828 [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), 829 }; 830 831 static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, 832 const struct nlattr *a[], u32 *attrs) 833 { 834 const struct ovs_key_icmp *icmp_key; 835 const struct ovs_key_tcp *tcp_key; 836 const struct ovs_key_udp *udp_key; 837 838 switch (swkey->ip.proto) { 839 case IPPROTO_TCP: 840 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) 841 return -EINVAL; 842 *attrs &= ~(1 << OVS_KEY_ATTR_TCP); 843 844 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); 845 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); 846 swkey->ipv4.tp.src = tcp_key->tcp_src; 847 swkey->ipv4.tp.dst = tcp_key->tcp_dst; 848 break; 849 850 case IPPROTO_UDP: 851 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) 852 return -EINVAL; 853 *attrs &= ~(1 << OVS_KEY_ATTR_UDP); 854 855 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); 856 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); 857 swkey->ipv4.tp.src = udp_key->udp_src; 858 swkey->ipv4.tp.dst = udp_key->udp_dst; 859 break; 860 861 case IPPROTO_ICMP: 862 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP))) 863 return -EINVAL; 864 *attrs &= ~(1 << OVS_KEY_ATTR_ICMP); 865 866 *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp); 867 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); 868 swkey->ipv4.tp.src = htons(icmp_key->icmp_type); 869 swkey->ipv4.tp.dst = htons(icmp_key->icmp_code); 870 break; 871 } 872 873 return 0; 874 } 875 876 static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len, 877 const struct nlattr *a[], u32 *attrs) 878 { 879 const struct ovs_key_icmpv6 *icmpv6_key; 880 const struct ovs_key_tcp *tcp_key; 881 const struct ovs_key_udp *udp_key; 882 883 switch (swkey->ip.proto) { 884 case IPPROTO_TCP: 885 if (!(*attrs & (1 << OVS_KEY_ATTR_TCP))) 886 return -EINVAL; 887 *attrs &= ~(1 << OVS_KEY_ATTR_TCP); 888 889 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); 890 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); 891 swkey->ipv6.tp.src = tcp_key->tcp_src; 892 swkey->ipv6.tp.dst = tcp_key->tcp_dst; 893 break; 894 895 case IPPROTO_UDP: 896 if (!(*attrs & (1 << OVS_KEY_ATTR_UDP))) 897 return -EINVAL; 898 *attrs &= ~(1 << OVS_KEY_ATTR_UDP); 899 900 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); 901 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); 902 swkey->ipv6.tp.src = udp_key->udp_src; 903 swkey->ipv6.tp.dst = udp_key->udp_dst; 904 break; 905 906 case IPPROTO_ICMPV6: 907 if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6))) 908 return -EINVAL; 909 *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); 910 911 *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp); 912 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); 913 swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type); 914 swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code); 915 916 if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) || 917 swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { 918 const struct ovs_key_nd *nd_key; 919 920 if (!(*attrs & (1 << OVS_KEY_ATTR_ND))) 921 return -EINVAL; 922 *attrs &= ~(1 << OVS_KEY_ATTR_ND); 923 924 *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd); 925 nd_key = nla_data(a[OVS_KEY_ATTR_ND]); 926 memcpy(&swkey->ipv6.nd.target, nd_key->nd_target, 927 sizeof(swkey->ipv6.nd.target)); 928 memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN); 929 memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN); 930 } 931 break; 932 } 933 934 return 0; 935 } 936 937 static int parse_flow_nlattrs(const struct nlattr *attr, 938 const struct nlattr *a[], u32 *attrsp) 939 { 940 const struct nlattr *nla; 941 u32 attrs; 942 int rem; 943 944 attrs = 0; 945 nla_for_each_nested(nla, attr, rem) { 946 u16 type = nla_type(nla); 947 int expected_len; 948 949 if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type)) 950 return -EINVAL; 951 952 expected_len = ovs_key_lens[type]; 953 if (nla_len(nla) != expected_len && expected_len != -1) 954 return -EINVAL; 955 956 attrs |= 1 << type; 957 a[type] = nla; 958 } 959 if (rem) 960 return -EINVAL; 961 962 *attrsp = attrs; 963 return 0; 964 } 965 966 /** 967 * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key. 968 * @swkey: receives the extracted flow key. 969 * @key_lenp: number of bytes used in @swkey. 970 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute 971 * sequence. 972 */ 973 int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, 974 const struct nlattr *attr) 975 { 976 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; 977 const struct ovs_key_ethernet *eth_key; 978 int key_len; 979 u32 attrs; 980 int err; 981 982 memset(swkey, 0, sizeof(struct sw_flow_key)); 983 key_len = SW_FLOW_KEY_OFFSET(eth); 984 985 err = parse_flow_nlattrs(attr, a, &attrs); 986 if (err) 987 return err; 988 989 /* Metadata attributes. */ 990 if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { 991 swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]); 992 attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); 993 } 994 if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { 995 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); 996 if (in_port >= DP_MAX_PORTS) 997 return -EINVAL; 998 swkey->phy.in_port = in_port; 999 attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); 1000 } else { 1001 swkey->phy.in_port = USHRT_MAX; 1002 } 1003 1004 /* Data attributes. */ 1005 if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET))) 1006 return -EINVAL; 1007 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); 1008 1009 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); 1010 memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN); 1011 memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN); 1012 1013 if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) && 1014 nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) { 1015 const struct nlattr *encap; 1016 __be16 tci; 1017 1018 if (attrs != ((1 << OVS_KEY_ATTR_VLAN) | 1019 (1 << OVS_KEY_ATTR_ETHERTYPE) | 1020 (1 << OVS_KEY_ATTR_ENCAP))) 1021 return -EINVAL; 1022 1023 encap = a[OVS_KEY_ATTR_ENCAP]; 1024 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); 1025 if (tci & htons(VLAN_TAG_PRESENT)) { 1026 swkey->eth.tci = tci; 1027 1028 err = parse_flow_nlattrs(encap, a, &attrs); 1029 if (err) 1030 return err; 1031 } else if (!tci) { 1032 /* Corner case for truncated 802.1Q header. */ 1033 if (nla_len(encap)) 1034 return -EINVAL; 1035 1036 swkey->eth.type = htons(ETH_P_8021Q); 1037 *key_lenp = key_len; 1038 return 0; 1039 } else { 1040 return -EINVAL; 1041 } 1042 } 1043 1044 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { 1045 swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); 1046 if (ntohs(swkey->eth.type) < 1536) 1047 return -EINVAL; 1048 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); 1049 } else { 1050 swkey->eth.type = htons(ETH_P_802_2); 1051 } 1052 1053 if (swkey->eth.type == htons(ETH_P_IP)) { 1054 const struct ovs_key_ipv4 *ipv4_key; 1055 1056 if (!(attrs & (1 << OVS_KEY_ATTR_IPV4))) 1057 return -EINVAL; 1058 attrs &= ~(1 << OVS_KEY_ATTR_IPV4); 1059 1060 key_len = SW_FLOW_KEY_OFFSET(ipv4.addr); 1061 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); 1062 if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) 1063 return -EINVAL; 1064 swkey->ip.proto = ipv4_key->ipv4_proto; 1065 swkey->ip.tos = ipv4_key->ipv4_tos; 1066 swkey->ip.ttl = ipv4_key->ipv4_ttl; 1067 swkey->ip.frag = ipv4_key->ipv4_frag; 1068 swkey->ipv4.addr.src = ipv4_key->ipv4_src; 1069 swkey->ipv4.addr.dst = ipv4_key->ipv4_dst; 1070 1071 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { 1072 err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs); 1073 if (err) 1074 return err; 1075 } 1076 } else if (swkey->eth.type == htons(ETH_P_IPV6)) { 1077 const struct ovs_key_ipv6 *ipv6_key; 1078 1079 if (!(attrs & (1 << OVS_KEY_ATTR_IPV6))) 1080 return -EINVAL; 1081 attrs &= ~(1 << OVS_KEY_ATTR_IPV6); 1082 1083 key_len = SW_FLOW_KEY_OFFSET(ipv6.label); 1084 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); 1085 if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) 1086 return -EINVAL; 1087 swkey->ipv6.label = ipv6_key->ipv6_label; 1088 swkey->ip.proto = ipv6_key->ipv6_proto; 1089 swkey->ip.tos = ipv6_key->ipv6_tclass; 1090 swkey->ip.ttl = ipv6_key->ipv6_hlimit; 1091 swkey->ip.frag = ipv6_key->ipv6_frag; 1092 memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src, 1093 sizeof(swkey->ipv6.addr.src)); 1094 memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst, 1095 sizeof(swkey->ipv6.addr.dst)); 1096 1097 if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) { 1098 err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs); 1099 if (err) 1100 return err; 1101 } 1102 } else if (swkey->eth.type == htons(ETH_P_ARP)) { 1103 const struct ovs_key_arp *arp_key; 1104 1105 if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) 1106 return -EINVAL; 1107 attrs &= ~(1 << OVS_KEY_ATTR_ARP); 1108 1109 key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); 1110 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); 1111 swkey->ipv4.addr.src = arp_key->arp_sip; 1112 swkey->ipv4.addr.dst = arp_key->arp_tip; 1113 if (arp_key->arp_op & htons(0xff00)) 1114 return -EINVAL; 1115 swkey->ip.proto = ntohs(arp_key->arp_op); 1116 memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN); 1117 memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN); 1118 } 1119 1120 if (attrs) 1121 return -EINVAL; 1122 *key_lenp = key_len; 1123 1124 return 0; 1125 } 1126 1127 /** 1128 * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. 1129 * @in_port: receives the extracted input port. 1130 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute 1131 * sequence. 1132 * 1133 * This parses a series of Netlink attributes that form a flow key, which must 1134 * take the same form accepted by flow_from_nlattrs(), but only enough of it to 1135 * get the metadata, that is, the parts of the flow key that cannot be 1136 * extracted from the packet itself. 1137 */ 1138 int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, 1139 const struct nlattr *attr) 1140 { 1141 const struct nlattr *nla; 1142 int rem; 1143 1144 *in_port = USHRT_MAX; 1145 *priority = 0; 1146 1147 nla_for_each_nested(nla, attr, rem) { 1148 int type = nla_type(nla); 1149 1150 if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) { 1151 if (nla_len(nla) != ovs_key_lens[type]) 1152 return -EINVAL; 1153 1154 switch (type) { 1155 case OVS_KEY_ATTR_PRIORITY: 1156 *priority = nla_get_u32(nla); 1157 break; 1158 1159 case OVS_KEY_ATTR_IN_PORT: 1160 if (nla_get_u32(nla) >= DP_MAX_PORTS) 1161 return -EINVAL; 1162 *in_port = nla_get_u32(nla); 1163 break; 1164 } 1165 } 1166 } 1167 if (rem) 1168 return -EINVAL; 1169 return 0; 1170 } 1171 1172 int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) 1173 { 1174 struct ovs_key_ethernet *eth_key; 1175 struct nlattr *nla, *encap; 1176 1177 if (swkey->phy.priority) 1178 NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); 1179 1180 if (swkey->phy.in_port != USHRT_MAX) 1181 NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); 1182 1183 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); 1184 if (!nla) 1185 goto nla_put_failure; 1186 eth_key = nla_data(nla); 1187 memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN); 1188 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); 1189 1190 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { 1191 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); 1192 NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); 1193 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); 1194 if (!swkey->eth.tci) 1195 goto unencap; 1196 } else { 1197 encap = NULL; 1198 } 1199 1200 if (swkey->eth.type == htons(ETH_P_802_2)) 1201 goto unencap; 1202 1203 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); 1204 1205 if (swkey->eth.type == htons(ETH_P_IP)) { 1206 struct ovs_key_ipv4 *ipv4_key; 1207 1208 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); 1209 if (!nla) 1210 goto nla_put_failure; 1211 ipv4_key = nla_data(nla); 1212 ipv4_key->ipv4_src = swkey->ipv4.addr.src; 1213 ipv4_key->ipv4_dst = swkey->ipv4.addr.dst; 1214 ipv4_key->ipv4_proto = swkey->ip.proto; 1215 ipv4_key->ipv4_tos = swkey->ip.tos; 1216 ipv4_key->ipv4_ttl = swkey->ip.ttl; 1217 ipv4_key->ipv4_frag = swkey->ip.frag; 1218 } else if (swkey->eth.type == htons(ETH_P_IPV6)) { 1219 struct ovs_key_ipv6 *ipv6_key; 1220 1221 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); 1222 if (!nla) 1223 goto nla_put_failure; 1224 ipv6_key = nla_data(nla); 1225 memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src, 1226 sizeof(ipv6_key->ipv6_src)); 1227 memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst, 1228 sizeof(ipv6_key->ipv6_dst)); 1229 ipv6_key->ipv6_label = swkey->ipv6.label; 1230 ipv6_key->ipv6_proto = swkey->ip.proto; 1231 ipv6_key->ipv6_tclass = swkey->ip.tos; 1232 ipv6_key->ipv6_hlimit = swkey->ip.ttl; 1233 ipv6_key->ipv6_frag = swkey->ip.frag; 1234 } else if (swkey->eth.type == htons(ETH_P_ARP)) { 1235 struct ovs_key_arp *arp_key; 1236 1237 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); 1238 if (!nla) 1239 goto nla_put_failure; 1240 arp_key = nla_data(nla); 1241 memset(arp_key, 0, sizeof(struct ovs_key_arp)); 1242 arp_key->arp_sip = swkey->ipv4.addr.src; 1243 arp_key->arp_tip = swkey->ipv4.addr.dst; 1244 arp_key->arp_op = htons(swkey->ip.proto); 1245 memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN); 1246 memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN); 1247 } 1248 1249 if ((swkey->eth.type == htons(ETH_P_IP) || 1250 swkey->eth.type == htons(ETH_P_IPV6)) && 1251 swkey->ip.frag != OVS_FRAG_TYPE_LATER) { 1252 1253 if (swkey->ip.proto == IPPROTO_TCP) { 1254 struct ovs_key_tcp *tcp_key; 1255 1256 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); 1257 if (!nla) 1258 goto nla_put_failure; 1259 tcp_key = nla_data(nla); 1260 if (swkey->eth.type == htons(ETH_P_IP)) { 1261 tcp_key->tcp_src = swkey->ipv4.tp.src; 1262 tcp_key->tcp_dst = swkey->ipv4.tp.dst; 1263 } else if (swkey->eth.type == htons(ETH_P_IPV6)) { 1264 tcp_key->tcp_src = swkey->ipv6.tp.src; 1265 tcp_key->tcp_dst = swkey->ipv6.tp.dst; 1266 } 1267 } else if (swkey->ip.proto == IPPROTO_UDP) { 1268 struct ovs_key_udp *udp_key; 1269 1270 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); 1271 if (!nla) 1272 goto nla_put_failure; 1273 udp_key = nla_data(nla); 1274 if (swkey->eth.type == htons(ETH_P_IP)) { 1275 udp_key->udp_src = swkey->ipv4.tp.src; 1276 udp_key->udp_dst = swkey->ipv4.tp.dst; 1277 } else if (swkey->eth.type == htons(ETH_P_IPV6)) { 1278 udp_key->udp_src = swkey->ipv6.tp.src; 1279 udp_key->udp_dst = swkey->ipv6.tp.dst; 1280 } 1281 } else if (swkey->eth.type == htons(ETH_P_IP) && 1282 swkey->ip.proto == IPPROTO_ICMP) { 1283 struct ovs_key_icmp *icmp_key; 1284 1285 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); 1286 if (!nla) 1287 goto nla_put_failure; 1288 icmp_key = nla_data(nla); 1289 icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src); 1290 icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst); 1291 } else if (swkey->eth.type == htons(ETH_P_IPV6) && 1292 swkey->ip.proto == IPPROTO_ICMPV6) { 1293 struct ovs_key_icmpv6 *icmpv6_key; 1294 1295 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, 1296 sizeof(*icmpv6_key)); 1297 if (!nla) 1298 goto nla_put_failure; 1299 icmpv6_key = nla_data(nla); 1300 icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src); 1301 icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst); 1302 1303 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || 1304 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { 1305 struct ovs_key_nd *nd_key; 1306 1307 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); 1308 if (!nla) 1309 goto nla_put_failure; 1310 nd_key = nla_data(nla); 1311 memcpy(nd_key->nd_target, &swkey->ipv6.nd.target, 1312 sizeof(nd_key->nd_target)); 1313 memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN); 1314 memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN); 1315 } 1316 } 1317 } 1318 1319 unencap: 1320 if (encap) 1321 nla_nest_end(skb, encap); 1322 1323 return 0; 1324 1325 nla_put_failure: 1326 return -EMSGSIZE; 1327 } 1328 1329 /* Initializes the flow module. 1330 * Returns zero if successful or a negative error code. */ 1331 int ovs_flow_init(void) 1332 { 1333 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, 1334 0, NULL); 1335 if (flow_cache == NULL) 1336 return -ENOMEM; 1337 1338 return 0; 1339 } 1340 1341 /* Uninitializes the flow module. */ 1342 void ovs_flow_exit(void) 1343 { 1344 kmem_cache_destroy(flow_cache); 1345 } 1346