1 /* 2 * Copyright (c) 2007-2017 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/skbuff.h> 22 #include <linux/in.h> 23 #include <linux/ip.h> 24 #include <linux/openvswitch.h> 25 #include <linux/netfilter_ipv6.h> 26 #include <linux/sctp.h> 27 #include <linux/tcp.h> 28 #include <linux/udp.h> 29 #include <linux/in6.h> 30 #include <linux/if_arp.h> 31 #include <linux/if_vlan.h> 32 33 #include <net/dst.h> 34 #include <net/ip.h> 35 #include <net/ipv6.h> 36 #include <net/ip6_fib.h> 37 #include <net/checksum.h> 38 #include <net/dsfield.h> 39 #include <net/mpls.h> 40 #include <net/sctp/checksum.h> 41 42 #include "datapath.h" 43 #include "flow.h" 44 #include "conntrack.h" 45 #include "vport.h" 46 #include "flow_netlink.h" 47 48 struct deferred_action { 49 struct sk_buff *skb; 50 const struct nlattr *actions; 51 int actions_len; 52 53 /* Store pkt_key clone when creating deferred action. */ 54 struct sw_flow_key pkt_key; 55 }; 56 57 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN) 58 struct ovs_frag_data { 59 unsigned long dst; 60 struct vport *vport; 61 struct ovs_skb_cb cb; 62 __be16 inner_protocol; 63 u16 network_offset; /* valid only for MPLS */ 64 u16 vlan_tci; 65 __be16 vlan_proto; 66 unsigned int l2_len; 67 u8 mac_proto; 68 u8 l2_data[MAX_L2_LEN]; 69 }; 70 71 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage); 72 73 #define DEFERRED_ACTION_FIFO_SIZE 10 74 #define OVS_RECURSION_LIMIT 5 75 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2) 76 struct action_fifo { 77 int head; 78 int tail; 79 /* Deferred action fifo queue storage. */ 80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE]; 81 }; 82 83 struct action_flow_keys { 84 struct sw_flow_key key[OVS_DEFERRED_ACTION_THRESHOLD]; 85 }; 86 87 static struct action_fifo __percpu *action_fifos; 88 static struct action_flow_keys __percpu *flow_keys; 89 static DEFINE_PER_CPU(int, exec_actions_level); 90 91 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys' 92 * space. Return NULL if out of key spaces. 93 */ 94 static struct sw_flow_key *clone_key(const struct sw_flow_key *key_) 95 { 96 struct action_flow_keys *keys = this_cpu_ptr(flow_keys); 97 int level = this_cpu_read(exec_actions_level); 98 struct sw_flow_key *key = NULL; 99 100 if (level <= OVS_DEFERRED_ACTION_THRESHOLD) { 101 key = &keys->key[level - 1]; 102 *key = *key_; 103 } 104 105 return key; 106 } 107 108 static void action_fifo_init(struct action_fifo *fifo) 109 { 110 fifo->head = 0; 111 fifo->tail = 0; 112 } 113 114 static bool action_fifo_is_empty(const struct action_fifo *fifo) 115 { 116 return (fifo->head == fifo->tail); 117 } 118 119 static struct deferred_action *action_fifo_get(struct action_fifo *fifo) 120 { 121 if (action_fifo_is_empty(fifo)) 122 return NULL; 123 124 return &fifo->fifo[fifo->tail++]; 125 } 126 127 static struct deferred_action *action_fifo_put(struct action_fifo *fifo) 128 { 129 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1) 130 return NULL; 131 132 return &fifo->fifo[fifo->head++]; 133 } 134 135 /* Return true if fifo is not full */ 136 static struct deferred_action *add_deferred_actions(struct sk_buff *skb, 137 const struct sw_flow_key *key, 138 const struct nlattr *actions, 139 const int actions_len) 140 { 141 struct action_fifo *fifo; 142 struct deferred_action *da; 143 144 fifo = this_cpu_ptr(action_fifos); 145 da = action_fifo_put(fifo); 146 if (da) { 147 da->skb = skb; 148 da->actions = actions; 149 da->actions_len = actions_len; 150 da->pkt_key = *key; 151 } 152 153 return da; 154 } 155 156 static void invalidate_flow_key(struct sw_flow_key *key) 157 { 158 key->mac_proto |= SW_FLOW_KEY_INVALID; 159 } 160 161 static bool is_flow_key_valid(const struct sw_flow_key *key) 162 { 163 return !(key->mac_proto & SW_FLOW_KEY_INVALID); 164 } 165 166 static int clone_execute(struct datapath *dp, struct sk_buff *skb, 167 struct sw_flow_key *key, 168 u32 recirc_id, 169 const struct nlattr *actions, int len, 170 bool last, bool clone_flow_key); 171 172 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, 173 __be16 ethertype) 174 { 175 if (skb->ip_summed == CHECKSUM_COMPLETE) { 176 __be16 diff[] = { ~(hdr->h_proto), ethertype }; 177 178 skb->csum = ~csum_partial((char *)diff, sizeof(diff), 179 ~skb->csum); 180 } 181 182 hdr->h_proto = ethertype; 183 } 184 185 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, 186 const struct ovs_action_push_mpls *mpls) 187 { 188 struct mpls_shim_hdr *new_mpls_lse; 189 190 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ 191 if (skb->encapsulation) 192 return -ENOTSUPP; 193 194 if (skb_cow_head(skb, MPLS_HLEN) < 0) 195 return -ENOMEM; 196 197 if (!skb->inner_protocol) { 198 skb_set_inner_network_header(skb, skb->mac_len); 199 skb_set_inner_protocol(skb, skb->protocol); 200 } 201 202 skb_push(skb, MPLS_HLEN); 203 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), 204 skb->mac_len); 205 skb_reset_mac_header(skb); 206 skb_set_network_header(skb, skb->mac_len); 207 208 new_mpls_lse = mpls_hdr(skb); 209 new_mpls_lse->label_stack_entry = mpls->mpls_lse; 210 211 skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); 212 213 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) 214 update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype); 215 skb->protocol = mpls->mpls_ethertype; 216 217 invalidate_flow_key(key); 218 return 0; 219 } 220 221 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, 222 const __be16 ethertype) 223 { 224 int err; 225 226 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 227 if (unlikely(err)) 228 return err; 229 230 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); 231 232 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), 233 skb->mac_len); 234 235 __skb_pull(skb, MPLS_HLEN); 236 skb_reset_mac_header(skb); 237 skb_set_network_header(skb, skb->mac_len); 238 239 if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) { 240 struct ethhdr *hdr; 241 242 /* mpls_hdr() is used to locate the ethertype field correctly in the 243 * presence of VLAN tags. 244 */ 245 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); 246 update_ethertype(skb, hdr, ethertype); 247 } 248 if (eth_p_mpls(skb->protocol)) 249 skb->protocol = ethertype; 250 251 invalidate_flow_key(key); 252 return 0; 253 } 254 255 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, 256 const __be32 *mpls_lse, const __be32 *mask) 257 { 258 struct mpls_shim_hdr *stack; 259 __be32 lse; 260 int err; 261 262 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); 263 if (unlikely(err)) 264 return err; 265 266 stack = mpls_hdr(skb); 267 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask); 268 if (skb->ip_summed == CHECKSUM_COMPLETE) { 269 __be32 diff[] = { ~(stack->label_stack_entry), lse }; 270 271 skb->csum = ~csum_partial((char *)diff, sizeof(diff), 272 ~skb->csum); 273 } 274 275 stack->label_stack_entry = lse; 276 flow_key->mpls.top_lse = lse; 277 return 0; 278 } 279 280 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) 281 { 282 int err; 283 284 err = skb_vlan_pop(skb); 285 if (skb_vlan_tag_present(skb)) { 286 invalidate_flow_key(key); 287 } else { 288 key->eth.vlan.tci = 0; 289 key->eth.vlan.tpid = 0; 290 } 291 return err; 292 } 293 294 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, 295 const struct ovs_action_push_vlan *vlan) 296 { 297 if (skb_vlan_tag_present(skb)) { 298 invalidate_flow_key(key); 299 } else { 300 key->eth.vlan.tci = vlan->vlan_tci; 301 key->eth.vlan.tpid = vlan->vlan_tpid; 302 } 303 return skb_vlan_push(skb, vlan->vlan_tpid, 304 ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK); 305 } 306 307 /* 'src' is already properly masked. */ 308 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_) 309 { 310 u16 *dst = (u16 *)dst_; 311 const u16 *src = (const u16 *)src_; 312 const u16 *mask = (const u16 *)mask_; 313 314 OVS_SET_MASKED(dst[0], src[0], mask[0]); 315 OVS_SET_MASKED(dst[1], src[1], mask[1]); 316 OVS_SET_MASKED(dst[2], src[2], mask[2]); 317 } 318 319 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, 320 const struct ovs_key_ethernet *key, 321 const struct ovs_key_ethernet *mask) 322 { 323 int err; 324 325 err = skb_ensure_writable(skb, ETH_HLEN); 326 if (unlikely(err)) 327 return err; 328 329 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 330 331 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src, 332 mask->eth_src); 333 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst, 334 mask->eth_dst); 335 336 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 337 338 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source); 339 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest); 340 return 0; 341 } 342 343 /* pop_eth does not support VLAN packets as this action is never called 344 * for them. 345 */ 346 static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key) 347 { 348 skb_pull_rcsum(skb, ETH_HLEN); 349 skb_reset_mac_header(skb); 350 skb_reset_mac_len(skb); 351 352 /* safe right before invalidate_flow_key */ 353 key->mac_proto = MAC_PROTO_NONE; 354 invalidate_flow_key(key); 355 return 0; 356 } 357 358 static int push_eth(struct sk_buff *skb, struct sw_flow_key *key, 359 const struct ovs_action_push_eth *ethh) 360 { 361 struct ethhdr *hdr; 362 363 /* Add the new Ethernet header */ 364 if (skb_cow_head(skb, ETH_HLEN) < 0) 365 return -ENOMEM; 366 367 skb_push(skb, ETH_HLEN); 368 skb_reset_mac_header(skb); 369 skb_reset_mac_len(skb); 370 371 hdr = eth_hdr(skb); 372 ether_addr_copy(hdr->h_source, ethh->addresses.eth_src); 373 ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst); 374 hdr->h_proto = skb->protocol; 375 376 skb_postpush_rcsum(skb, hdr, ETH_HLEN); 377 378 /* safe right before invalidate_flow_key */ 379 key->mac_proto = MAC_PROTO_ETHERNET; 380 invalidate_flow_key(key); 381 return 0; 382 } 383 384 static int push_nsh(struct sk_buff *skb, struct sw_flow_key *key, 385 const struct nshhdr *nh) 386 { 387 int err; 388 389 err = nsh_push(skb, nh); 390 if (err) 391 return err; 392 393 /* safe right before invalidate_flow_key */ 394 key->mac_proto = MAC_PROTO_NONE; 395 invalidate_flow_key(key); 396 return 0; 397 } 398 399 static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key) 400 { 401 int err; 402 403 err = nsh_pop(skb); 404 if (err) 405 return err; 406 407 /* safe right before invalidate_flow_key */ 408 if (skb->protocol == htons(ETH_P_TEB)) 409 key->mac_proto = MAC_PROTO_ETHERNET; 410 else 411 key->mac_proto = MAC_PROTO_NONE; 412 invalidate_flow_key(key); 413 return 0; 414 } 415 416 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh, 417 __be32 addr, __be32 new_addr) 418 { 419 int transport_len = skb->len - skb_transport_offset(skb); 420 421 if (nh->frag_off & htons(IP_OFFSET)) 422 return; 423 424 if (nh->protocol == IPPROTO_TCP) { 425 if (likely(transport_len >= sizeof(struct tcphdr))) 426 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 427 addr, new_addr, true); 428 } else if (nh->protocol == IPPROTO_UDP) { 429 if (likely(transport_len >= sizeof(struct udphdr))) { 430 struct udphdr *uh = udp_hdr(skb); 431 432 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 433 inet_proto_csum_replace4(&uh->check, skb, 434 addr, new_addr, true); 435 if (!uh->check) 436 uh->check = CSUM_MANGLED_0; 437 } 438 } 439 } 440 } 441 442 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, 443 __be32 *addr, __be32 new_addr) 444 { 445 update_ip_l4_checksum(skb, nh, *addr, new_addr); 446 csum_replace4(&nh->check, *addr, new_addr); 447 skb_clear_hash(skb); 448 *addr = new_addr; 449 } 450 451 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, 452 __be32 addr[4], const __be32 new_addr[4]) 453 { 454 int transport_len = skb->len - skb_transport_offset(skb); 455 456 if (l4_proto == NEXTHDR_TCP) { 457 if (likely(transport_len >= sizeof(struct tcphdr))) 458 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, 459 addr, new_addr, true); 460 } else if (l4_proto == NEXTHDR_UDP) { 461 if (likely(transport_len >= sizeof(struct udphdr))) { 462 struct udphdr *uh = udp_hdr(skb); 463 464 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 465 inet_proto_csum_replace16(&uh->check, skb, 466 addr, new_addr, true); 467 if (!uh->check) 468 uh->check = CSUM_MANGLED_0; 469 } 470 } 471 } else if (l4_proto == NEXTHDR_ICMP) { 472 if (likely(transport_len >= sizeof(struct icmp6hdr))) 473 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum, 474 skb, addr, new_addr, true); 475 } 476 } 477 478 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4], 479 const __be32 mask[4], __be32 masked[4]) 480 { 481 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]); 482 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]); 483 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]); 484 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]); 485 } 486 487 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, 488 __be32 addr[4], const __be32 new_addr[4], 489 bool recalculate_csum) 490 { 491 if (recalculate_csum) 492 update_ipv6_checksum(skb, l4_proto, addr, new_addr); 493 494 skb_clear_hash(skb); 495 memcpy(addr, new_addr, sizeof(__be32[4])); 496 } 497 498 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) 499 { 500 /* Bits 21-24 are always unmasked, so this retains their values. */ 501 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); 502 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); 503 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); 504 } 505 506 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, 507 u8 mask) 508 { 509 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask); 510 511 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); 512 nh->ttl = new_ttl; 513 } 514 515 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key, 516 const struct ovs_key_ipv4 *key, 517 const struct ovs_key_ipv4 *mask) 518 { 519 struct iphdr *nh; 520 __be32 new_addr; 521 int err; 522 523 err = skb_ensure_writable(skb, skb_network_offset(skb) + 524 sizeof(struct iphdr)); 525 if (unlikely(err)) 526 return err; 527 528 nh = ip_hdr(skb); 529 530 /* Setting an IP addresses is typically only a side effect of 531 * matching on them in the current userspace implementation, so it 532 * makes sense to check if the value actually changed. 533 */ 534 if (mask->ipv4_src) { 535 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src); 536 537 if (unlikely(new_addr != nh->saddr)) { 538 set_ip_addr(skb, nh, &nh->saddr, new_addr); 539 flow_key->ipv4.addr.src = new_addr; 540 } 541 } 542 if (mask->ipv4_dst) { 543 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst); 544 545 if (unlikely(new_addr != nh->daddr)) { 546 set_ip_addr(skb, nh, &nh->daddr, new_addr); 547 flow_key->ipv4.addr.dst = new_addr; 548 } 549 } 550 if (mask->ipv4_tos) { 551 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos); 552 flow_key->ip.tos = nh->tos; 553 } 554 if (mask->ipv4_ttl) { 555 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl); 556 flow_key->ip.ttl = nh->ttl; 557 } 558 559 return 0; 560 } 561 562 static bool is_ipv6_mask_nonzero(const __be32 addr[4]) 563 { 564 return !!(addr[0] | addr[1] | addr[2] | addr[3]); 565 } 566 567 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, 568 const struct ovs_key_ipv6 *key, 569 const struct ovs_key_ipv6 *mask) 570 { 571 struct ipv6hdr *nh; 572 int err; 573 574 err = skb_ensure_writable(skb, skb_network_offset(skb) + 575 sizeof(struct ipv6hdr)); 576 if (unlikely(err)) 577 return err; 578 579 nh = ipv6_hdr(skb); 580 581 /* Setting an IP addresses is typically only a side effect of 582 * matching on them in the current userspace implementation, so it 583 * makes sense to check if the value actually changed. 584 */ 585 if (is_ipv6_mask_nonzero(mask->ipv6_src)) { 586 __be32 *saddr = (__be32 *)&nh->saddr; 587 __be32 masked[4]; 588 589 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); 590 591 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { 592 set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked, 593 true); 594 memcpy(&flow_key->ipv6.addr.src, masked, 595 sizeof(flow_key->ipv6.addr.src)); 596 } 597 } 598 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) { 599 unsigned int offset = 0; 600 int flags = IP6_FH_F_SKIP_RH; 601 bool recalc_csum = true; 602 __be32 *daddr = (__be32 *)&nh->daddr; 603 __be32 masked[4]; 604 605 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked); 606 607 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) { 608 if (ipv6_ext_hdr(nh->nexthdr)) 609 recalc_csum = (ipv6_find_hdr(skb, &offset, 610 NEXTHDR_ROUTING, 611 NULL, &flags) 612 != NEXTHDR_ROUTING); 613 614 set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked, 615 recalc_csum); 616 memcpy(&flow_key->ipv6.addr.dst, masked, 617 sizeof(flow_key->ipv6.addr.dst)); 618 } 619 } 620 if (mask->ipv6_tclass) { 621 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); 622 flow_key->ip.tos = ipv6_get_dsfield(nh); 623 } 624 if (mask->ipv6_label) { 625 set_ipv6_fl(nh, ntohl(key->ipv6_label), 626 ntohl(mask->ipv6_label)); 627 flow_key->ipv6.label = 628 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); 629 } 630 if (mask->ipv6_hlimit) { 631 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit, 632 mask->ipv6_hlimit); 633 flow_key->ip.ttl = nh->hop_limit; 634 } 635 return 0; 636 } 637 638 static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key, 639 const struct nlattr *a) 640 { 641 struct nshhdr *nh; 642 size_t length; 643 int err; 644 u8 flags; 645 u8 ttl; 646 int i; 647 648 struct ovs_key_nsh key; 649 struct ovs_key_nsh mask; 650 651 err = nsh_key_from_nlattr(a, &key, &mask); 652 if (err) 653 return err; 654 655 /* Make sure the NSH base header is there */ 656 if (!pskb_may_pull(skb, skb_network_offset(skb) + NSH_BASE_HDR_LEN)) 657 return -ENOMEM; 658 659 nh = nsh_hdr(skb); 660 length = nsh_hdr_len(nh); 661 662 /* Make sure the whole NSH header is there */ 663 err = skb_ensure_writable(skb, skb_network_offset(skb) + 664 length); 665 if (unlikely(err)) 666 return err; 667 668 nh = nsh_hdr(skb); 669 skb_postpull_rcsum(skb, nh, length); 670 flags = nsh_get_flags(nh); 671 flags = OVS_MASKED(flags, key.base.flags, mask.base.flags); 672 flow_key->nsh.base.flags = flags; 673 ttl = nsh_get_ttl(nh); 674 ttl = OVS_MASKED(ttl, key.base.ttl, mask.base.ttl); 675 flow_key->nsh.base.ttl = ttl; 676 nsh_set_flags_and_ttl(nh, flags, ttl); 677 nh->path_hdr = OVS_MASKED(nh->path_hdr, key.base.path_hdr, 678 mask.base.path_hdr); 679 flow_key->nsh.base.path_hdr = nh->path_hdr; 680 switch (nh->mdtype) { 681 case NSH_M_TYPE1: 682 for (i = 0; i < NSH_MD1_CONTEXT_SIZE; i++) { 683 nh->md1.context[i] = 684 OVS_MASKED(nh->md1.context[i], key.context[i], 685 mask.context[i]); 686 } 687 memcpy(flow_key->nsh.context, nh->md1.context, 688 sizeof(nh->md1.context)); 689 break; 690 case NSH_M_TYPE2: 691 memset(flow_key->nsh.context, 0, 692 sizeof(flow_key->nsh.context)); 693 break; 694 default: 695 return -EINVAL; 696 } 697 skb_postpush_rcsum(skb, nh, length); 698 return 0; 699 } 700 701 /* Must follow skb_ensure_writable() since that can move the skb data. */ 702 static void set_tp_port(struct sk_buff *skb, __be16 *port, 703 __be16 new_port, __sum16 *check) 704 { 705 inet_proto_csum_replace2(check, skb, *port, new_port, false); 706 *port = new_port; 707 } 708 709 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, 710 const struct ovs_key_udp *key, 711 const struct ovs_key_udp *mask) 712 { 713 struct udphdr *uh; 714 __be16 src, dst; 715 int err; 716 717 err = skb_ensure_writable(skb, skb_transport_offset(skb) + 718 sizeof(struct udphdr)); 719 if (unlikely(err)) 720 return err; 721 722 uh = udp_hdr(skb); 723 /* Either of the masks is non-zero, so do not bother checking them. */ 724 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src); 725 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst); 726 727 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { 728 if (likely(src != uh->source)) { 729 set_tp_port(skb, &uh->source, src, &uh->check); 730 flow_key->tp.src = src; 731 } 732 if (likely(dst != uh->dest)) { 733 set_tp_port(skb, &uh->dest, dst, &uh->check); 734 flow_key->tp.dst = dst; 735 } 736 737 if (unlikely(!uh->check)) 738 uh->check = CSUM_MANGLED_0; 739 } else { 740 uh->source = src; 741 uh->dest = dst; 742 flow_key->tp.src = src; 743 flow_key->tp.dst = dst; 744 } 745 746 skb_clear_hash(skb); 747 748 return 0; 749 } 750 751 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key, 752 const struct ovs_key_tcp *key, 753 const struct ovs_key_tcp *mask) 754 { 755 struct tcphdr *th; 756 __be16 src, dst; 757 int err; 758 759 err = skb_ensure_writable(skb, skb_transport_offset(skb) + 760 sizeof(struct tcphdr)); 761 if (unlikely(err)) 762 return err; 763 764 th = tcp_hdr(skb); 765 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src); 766 if (likely(src != th->source)) { 767 set_tp_port(skb, &th->source, src, &th->check); 768 flow_key->tp.src = src; 769 } 770 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst); 771 if (likely(dst != th->dest)) { 772 set_tp_port(skb, &th->dest, dst, &th->check); 773 flow_key->tp.dst = dst; 774 } 775 skb_clear_hash(skb); 776 777 return 0; 778 } 779 780 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, 781 const struct ovs_key_sctp *key, 782 const struct ovs_key_sctp *mask) 783 { 784 unsigned int sctphoff = skb_transport_offset(skb); 785 struct sctphdr *sh; 786 __le32 old_correct_csum, new_csum, old_csum; 787 int err; 788 789 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); 790 if (unlikely(err)) 791 return err; 792 793 sh = sctp_hdr(skb); 794 old_csum = sh->checksum; 795 old_correct_csum = sctp_compute_cksum(skb, sctphoff); 796 797 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src); 798 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst); 799 800 new_csum = sctp_compute_cksum(skb, sctphoff); 801 802 /* Carry any checksum errors through. */ 803 sh->checksum = old_csum ^ old_correct_csum ^ new_csum; 804 805 skb_clear_hash(skb); 806 flow_key->tp.src = sh->source; 807 flow_key->tp.dst = sh->dest; 808 809 return 0; 810 } 811 812 static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb) 813 { 814 struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage); 815 struct vport *vport = data->vport; 816 817 if (skb_cow_head(skb, data->l2_len) < 0) { 818 kfree_skb(skb); 819 return -ENOMEM; 820 } 821 822 __skb_dst_copy(skb, data->dst); 823 *OVS_CB(skb) = data->cb; 824 skb->inner_protocol = data->inner_protocol; 825 if (data->vlan_tci & VLAN_CFI_MASK) 826 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK); 827 else 828 __vlan_hwaccel_clear_tag(skb); 829 830 /* Reconstruct the MAC header. */ 831 skb_push(skb, data->l2_len); 832 memcpy(skb->data, &data->l2_data, data->l2_len); 833 skb_postpush_rcsum(skb, skb->data, data->l2_len); 834 skb_reset_mac_header(skb); 835 836 if (eth_p_mpls(skb->protocol)) { 837 skb->inner_network_header = skb->network_header; 838 skb_set_network_header(skb, data->network_offset); 839 skb_reset_mac_len(skb); 840 } 841 842 ovs_vport_send(vport, skb, data->mac_proto); 843 return 0; 844 } 845 846 static unsigned int 847 ovs_dst_get_mtu(const struct dst_entry *dst) 848 { 849 return dst->dev->mtu; 850 } 851 852 static struct dst_ops ovs_dst_ops = { 853 .family = AF_UNSPEC, 854 .mtu = ovs_dst_get_mtu, 855 }; 856 857 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is 858 * ovs_vport_output(), which is called once per fragmented packet. 859 */ 860 static void prepare_frag(struct vport *vport, struct sk_buff *skb, 861 u16 orig_network_offset, u8 mac_proto) 862 { 863 unsigned int hlen = skb_network_offset(skb); 864 struct ovs_frag_data *data; 865 866 data = this_cpu_ptr(&ovs_frag_data_storage); 867 data->dst = skb->_skb_refdst; 868 data->vport = vport; 869 data->cb = *OVS_CB(skb); 870 data->inner_protocol = skb->inner_protocol; 871 data->network_offset = orig_network_offset; 872 if (skb_vlan_tag_present(skb)) 873 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK; 874 else 875 data->vlan_tci = 0; 876 data->vlan_proto = skb->vlan_proto; 877 data->mac_proto = mac_proto; 878 data->l2_len = hlen; 879 memcpy(&data->l2_data, skb->data, hlen); 880 881 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 882 skb_pull(skb, hlen); 883 } 884 885 static void ovs_fragment(struct net *net, struct vport *vport, 886 struct sk_buff *skb, u16 mru, 887 struct sw_flow_key *key) 888 { 889 u16 orig_network_offset = 0; 890 891 if (eth_p_mpls(skb->protocol)) { 892 orig_network_offset = skb_network_offset(skb); 893 skb->network_header = skb->inner_network_header; 894 } 895 896 if (skb_network_offset(skb) > MAX_L2_LEN) { 897 OVS_NLERR(1, "L2 header too long to fragment"); 898 goto err; 899 } 900 901 if (key->eth.type == htons(ETH_P_IP)) { 902 struct dst_entry ovs_dst; 903 unsigned long orig_dst; 904 905 prepare_frag(vport, skb, orig_network_offset, 906 ovs_key_mac_proto(key)); 907 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1, 908 DST_OBSOLETE_NONE, DST_NOCOUNT); 909 ovs_dst.dev = vport->dev; 910 911 orig_dst = skb->_skb_refdst; 912 skb_dst_set_noref(skb, &ovs_dst); 913 IPCB(skb)->frag_max_size = mru; 914 915 ip_do_fragment(net, skb->sk, skb, ovs_vport_output); 916 refdst_drop(orig_dst); 917 } else if (key->eth.type == htons(ETH_P_IPV6)) { 918 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); 919 unsigned long orig_dst; 920 struct rt6_info ovs_rt; 921 922 if (!v6ops) 923 goto err; 924 925 prepare_frag(vport, skb, orig_network_offset, 926 ovs_key_mac_proto(key)); 927 memset(&ovs_rt, 0, sizeof(ovs_rt)); 928 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1, 929 DST_OBSOLETE_NONE, DST_NOCOUNT); 930 ovs_rt.dst.dev = vport->dev; 931 932 orig_dst = skb->_skb_refdst; 933 skb_dst_set_noref(skb, &ovs_rt.dst); 934 IP6CB(skb)->frag_max_size = mru; 935 936 v6ops->fragment(net, skb->sk, skb, ovs_vport_output); 937 refdst_drop(orig_dst); 938 } else { 939 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.", 940 ovs_vport_name(vport), ntohs(key->eth.type), mru, 941 vport->dev->mtu); 942 goto err; 943 } 944 945 return; 946 err: 947 kfree_skb(skb); 948 } 949 950 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, 951 struct sw_flow_key *key) 952 { 953 struct vport *vport = ovs_vport_rcu(dp, out_port); 954 955 if (likely(vport)) { 956 u16 mru = OVS_CB(skb)->mru; 957 u32 cutlen = OVS_CB(skb)->cutlen; 958 959 if (unlikely(cutlen > 0)) { 960 if (skb->len - cutlen > ovs_mac_header_len(key)) 961 pskb_trim(skb, skb->len - cutlen); 962 else 963 pskb_trim(skb, ovs_mac_header_len(key)); 964 } 965 966 if (likely(!mru || 967 (skb->len <= mru + vport->dev->hard_header_len))) { 968 ovs_vport_send(vport, skb, ovs_key_mac_proto(key)); 969 } else if (mru <= vport->dev->mtu) { 970 struct net *net = read_pnet(&dp->net); 971 972 ovs_fragment(net, vport, skb, mru, key); 973 } else { 974 kfree_skb(skb); 975 } 976 } else { 977 kfree_skb(skb); 978 } 979 } 980 981 static int output_userspace(struct datapath *dp, struct sk_buff *skb, 982 struct sw_flow_key *key, const struct nlattr *attr, 983 const struct nlattr *actions, int actions_len, 984 uint32_t cutlen) 985 { 986 struct dp_upcall_info upcall; 987 const struct nlattr *a; 988 int rem; 989 990 memset(&upcall, 0, sizeof(upcall)); 991 upcall.cmd = OVS_PACKET_CMD_ACTION; 992 upcall.mru = OVS_CB(skb)->mru; 993 994 for (a = nla_data(attr), rem = nla_len(attr); rem > 0; 995 a = nla_next(a, &rem)) { 996 switch (nla_type(a)) { 997 case OVS_USERSPACE_ATTR_USERDATA: 998 upcall.userdata = a; 999 break; 1000 1001 case OVS_USERSPACE_ATTR_PID: 1002 upcall.portid = nla_get_u32(a); 1003 break; 1004 1005 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: { 1006 /* Get out tunnel info. */ 1007 struct vport *vport; 1008 1009 vport = ovs_vport_rcu(dp, nla_get_u32(a)); 1010 if (vport) { 1011 int err; 1012 1013 err = dev_fill_metadata_dst(vport->dev, skb); 1014 if (!err) 1015 upcall.egress_tun_info = skb_tunnel_info(skb); 1016 } 1017 1018 break; 1019 } 1020 1021 case OVS_USERSPACE_ATTR_ACTIONS: { 1022 /* Include actions. */ 1023 upcall.actions = actions; 1024 upcall.actions_len = actions_len; 1025 break; 1026 } 1027 1028 } /* End of switch. */ 1029 } 1030 1031 return ovs_dp_upcall(dp, skb, key, &upcall, cutlen); 1032 } 1033 1034 /* When 'last' is true, sample() should always consume the 'skb'. 1035 * Otherwise, sample() should keep 'skb' intact regardless what 1036 * actions are executed within sample(). 1037 */ 1038 static int sample(struct datapath *dp, struct sk_buff *skb, 1039 struct sw_flow_key *key, const struct nlattr *attr, 1040 bool last) 1041 { 1042 struct nlattr *actions; 1043 struct nlattr *sample_arg; 1044 int rem = nla_len(attr); 1045 const struct sample_arg *arg; 1046 bool clone_flow_key; 1047 1048 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */ 1049 sample_arg = nla_data(attr); 1050 arg = nla_data(sample_arg); 1051 actions = nla_next(sample_arg, &rem); 1052 1053 if ((arg->probability != U32_MAX) && 1054 (!arg->probability || prandom_u32() > arg->probability)) { 1055 if (last) 1056 consume_skb(skb); 1057 return 0; 1058 } 1059 1060 clone_flow_key = !arg->exec; 1061 return clone_execute(dp, skb, key, 0, actions, rem, last, 1062 clone_flow_key); 1063 } 1064 1065 /* When 'last' is true, clone() should always consume the 'skb'. 1066 * Otherwise, clone() should keep 'skb' intact regardless what 1067 * actions are executed within clone(). 1068 */ 1069 static int clone(struct datapath *dp, struct sk_buff *skb, 1070 struct sw_flow_key *key, const struct nlattr *attr, 1071 bool last) 1072 { 1073 struct nlattr *actions; 1074 struct nlattr *clone_arg; 1075 int rem = nla_len(attr); 1076 bool dont_clone_flow_key; 1077 1078 /* The first action is always 'OVS_CLONE_ATTR_ARG'. */ 1079 clone_arg = nla_data(attr); 1080 dont_clone_flow_key = nla_get_u32(clone_arg); 1081 actions = nla_next(clone_arg, &rem); 1082 1083 return clone_execute(dp, skb, key, 0, actions, rem, last, 1084 !dont_clone_flow_key); 1085 } 1086 1087 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, 1088 const struct nlattr *attr) 1089 { 1090 struct ovs_action_hash *hash_act = nla_data(attr); 1091 u32 hash = 0; 1092 1093 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */ 1094 hash = skb_get_hash(skb); 1095 hash = jhash_1word(hash, hash_act->hash_basis); 1096 if (!hash) 1097 hash = 0x1; 1098 1099 key->ovs_flow_hash = hash; 1100 } 1101 1102 static int execute_set_action(struct sk_buff *skb, 1103 struct sw_flow_key *flow_key, 1104 const struct nlattr *a) 1105 { 1106 /* Only tunnel set execution is supported without a mask. */ 1107 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) { 1108 struct ovs_tunnel_info *tun = nla_data(a); 1109 1110 skb_dst_drop(skb); 1111 dst_hold((struct dst_entry *)tun->tun_dst); 1112 skb_dst_set(skb, (struct dst_entry *)tun->tun_dst); 1113 return 0; 1114 } 1115 1116 return -EINVAL; 1117 } 1118 1119 /* Mask is at the midpoint of the data. */ 1120 #define get_mask(a, type) ((const type)nla_data(a) + 1) 1121 1122 static int execute_masked_set_action(struct sk_buff *skb, 1123 struct sw_flow_key *flow_key, 1124 const struct nlattr *a) 1125 { 1126 int err = 0; 1127 1128 switch (nla_type(a)) { 1129 case OVS_KEY_ATTR_PRIORITY: 1130 OVS_SET_MASKED(skb->priority, nla_get_u32(a), 1131 *get_mask(a, u32 *)); 1132 flow_key->phy.priority = skb->priority; 1133 break; 1134 1135 case OVS_KEY_ATTR_SKB_MARK: 1136 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *)); 1137 flow_key->phy.skb_mark = skb->mark; 1138 break; 1139 1140 case OVS_KEY_ATTR_TUNNEL_INFO: 1141 /* Masked data not supported for tunnel. */ 1142 err = -EINVAL; 1143 break; 1144 1145 case OVS_KEY_ATTR_ETHERNET: 1146 err = set_eth_addr(skb, flow_key, nla_data(a), 1147 get_mask(a, struct ovs_key_ethernet *)); 1148 break; 1149 1150 case OVS_KEY_ATTR_NSH: 1151 err = set_nsh(skb, flow_key, a); 1152 break; 1153 1154 case OVS_KEY_ATTR_IPV4: 1155 err = set_ipv4(skb, flow_key, nla_data(a), 1156 get_mask(a, struct ovs_key_ipv4 *)); 1157 break; 1158 1159 case OVS_KEY_ATTR_IPV6: 1160 err = set_ipv6(skb, flow_key, nla_data(a), 1161 get_mask(a, struct ovs_key_ipv6 *)); 1162 break; 1163 1164 case OVS_KEY_ATTR_TCP: 1165 err = set_tcp(skb, flow_key, nla_data(a), 1166 get_mask(a, struct ovs_key_tcp *)); 1167 break; 1168 1169 case OVS_KEY_ATTR_UDP: 1170 err = set_udp(skb, flow_key, nla_data(a), 1171 get_mask(a, struct ovs_key_udp *)); 1172 break; 1173 1174 case OVS_KEY_ATTR_SCTP: 1175 err = set_sctp(skb, flow_key, nla_data(a), 1176 get_mask(a, struct ovs_key_sctp *)); 1177 break; 1178 1179 case OVS_KEY_ATTR_MPLS: 1180 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a, 1181 __be32 *)); 1182 break; 1183 1184 case OVS_KEY_ATTR_CT_STATE: 1185 case OVS_KEY_ATTR_CT_ZONE: 1186 case OVS_KEY_ATTR_CT_MARK: 1187 case OVS_KEY_ATTR_CT_LABELS: 1188 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4: 1189 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6: 1190 err = -EINVAL; 1191 break; 1192 } 1193 1194 return err; 1195 } 1196 1197 static int execute_recirc(struct datapath *dp, struct sk_buff *skb, 1198 struct sw_flow_key *key, 1199 const struct nlattr *a, bool last) 1200 { 1201 u32 recirc_id; 1202 1203 if (!is_flow_key_valid(key)) { 1204 int err; 1205 1206 err = ovs_flow_key_update(skb, key); 1207 if (err) 1208 return err; 1209 } 1210 BUG_ON(!is_flow_key_valid(key)); 1211 1212 recirc_id = nla_get_u32(a); 1213 return clone_execute(dp, skb, key, recirc_id, NULL, 0, last, true); 1214 } 1215 1216 /* Execute a list of actions against 'skb'. */ 1217 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, 1218 struct sw_flow_key *key, 1219 const struct nlattr *attr, int len) 1220 { 1221 const struct nlattr *a; 1222 int rem; 1223 1224 for (a = attr, rem = len; rem > 0; 1225 a = nla_next(a, &rem)) { 1226 int err = 0; 1227 1228 switch (nla_type(a)) { 1229 case OVS_ACTION_ATTR_OUTPUT: { 1230 int port = nla_get_u32(a); 1231 struct sk_buff *clone; 1232 1233 /* Every output action needs a separate clone 1234 * of 'skb', In case the output action is the 1235 * last action, cloning can be avoided. 1236 */ 1237 if (nla_is_last(a, rem)) { 1238 do_output(dp, skb, port, key); 1239 /* 'skb' has been used for output. 1240 */ 1241 return 0; 1242 } 1243 1244 clone = skb_clone(skb, GFP_ATOMIC); 1245 if (clone) 1246 do_output(dp, clone, port, key); 1247 OVS_CB(skb)->cutlen = 0; 1248 break; 1249 } 1250 1251 case OVS_ACTION_ATTR_TRUNC: { 1252 struct ovs_action_trunc *trunc = nla_data(a); 1253 1254 if (skb->len > trunc->max_len) 1255 OVS_CB(skb)->cutlen = skb->len - trunc->max_len; 1256 break; 1257 } 1258 1259 case OVS_ACTION_ATTR_USERSPACE: 1260 output_userspace(dp, skb, key, a, attr, 1261 len, OVS_CB(skb)->cutlen); 1262 OVS_CB(skb)->cutlen = 0; 1263 break; 1264 1265 case OVS_ACTION_ATTR_HASH: 1266 execute_hash(skb, key, a); 1267 break; 1268 1269 case OVS_ACTION_ATTR_PUSH_MPLS: 1270 err = push_mpls(skb, key, nla_data(a)); 1271 break; 1272 1273 case OVS_ACTION_ATTR_POP_MPLS: 1274 err = pop_mpls(skb, key, nla_get_be16(a)); 1275 break; 1276 1277 case OVS_ACTION_ATTR_PUSH_VLAN: 1278 err = push_vlan(skb, key, nla_data(a)); 1279 break; 1280 1281 case OVS_ACTION_ATTR_POP_VLAN: 1282 err = pop_vlan(skb, key); 1283 break; 1284 1285 case OVS_ACTION_ATTR_RECIRC: { 1286 bool last = nla_is_last(a, rem); 1287 1288 err = execute_recirc(dp, skb, key, a, last); 1289 if (last) { 1290 /* If this is the last action, the skb has 1291 * been consumed or freed. 1292 * Return immediately. 1293 */ 1294 return err; 1295 } 1296 break; 1297 } 1298 1299 case OVS_ACTION_ATTR_SET: 1300 err = execute_set_action(skb, key, nla_data(a)); 1301 break; 1302 1303 case OVS_ACTION_ATTR_SET_MASKED: 1304 case OVS_ACTION_ATTR_SET_TO_MASKED: 1305 err = execute_masked_set_action(skb, key, nla_data(a)); 1306 break; 1307 1308 case OVS_ACTION_ATTR_SAMPLE: { 1309 bool last = nla_is_last(a, rem); 1310 1311 err = sample(dp, skb, key, a, last); 1312 if (last) 1313 return err; 1314 1315 break; 1316 } 1317 1318 case OVS_ACTION_ATTR_CT: 1319 if (!is_flow_key_valid(key)) { 1320 err = ovs_flow_key_update(skb, key); 1321 if (err) 1322 return err; 1323 } 1324 1325 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key, 1326 nla_data(a)); 1327 1328 /* Hide stolen IP fragments from user space. */ 1329 if (err) 1330 return err == -EINPROGRESS ? 0 : err; 1331 break; 1332 1333 case OVS_ACTION_ATTR_CT_CLEAR: 1334 err = ovs_ct_clear(skb, key); 1335 break; 1336 1337 case OVS_ACTION_ATTR_PUSH_ETH: 1338 err = push_eth(skb, key, nla_data(a)); 1339 break; 1340 1341 case OVS_ACTION_ATTR_POP_ETH: 1342 err = pop_eth(skb, key); 1343 break; 1344 1345 case OVS_ACTION_ATTR_PUSH_NSH: { 1346 u8 buffer[NSH_HDR_MAX_LEN]; 1347 struct nshhdr *nh = (struct nshhdr *)buffer; 1348 1349 err = nsh_hdr_from_nlattr(nla_data(a), nh, 1350 NSH_HDR_MAX_LEN); 1351 if (unlikely(err)) 1352 break; 1353 err = push_nsh(skb, key, nh); 1354 break; 1355 } 1356 1357 case OVS_ACTION_ATTR_POP_NSH: 1358 err = pop_nsh(skb, key); 1359 break; 1360 1361 case OVS_ACTION_ATTR_METER: 1362 if (ovs_meter_execute(dp, skb, key, nla_get_u32(a))) { 1363 consume_skb(skb); 1364 return 0; 1365 } 1366 break; 1367 1368 case OVS_ACTION_ATTR_CLONE: { 1369 bool last = nla_is_last(a, rem); 1370 1371 err = clone(dp, skb, key, a, last); 1372 if (last) 1373 return err; 1374 1375 break; 1376 } 1377 } 1378 1379 if (unlikely(err)) { 1380 kfree_skb(skb); 1381 return err; 1382 } 1383 } 1384 1385 consume_skb(skb); 1386 return 0; 1387 } 1388 1389 /* Execute the actions on the clone of the packet. The effect of the 1390 * execution does not affect the original 'skb' nor the original 'key'. 1391 * 1392 * The execution may be deferred in case the actions can not be executed 1393 * immediately. 1394 */ 1395 static int clone_execute(struct datapath *dp, struct sk_buff *skb, 1396 struct sw_flow_key *key, u32 recirc_id, 1397 const struct nlattr *actions, int len, 1398 bool last, bool clone_flow_key) 1399 { 1400 struct deferred_action *da; 1401 struct sw_flow_key *clone; 1402 1403 skb = last ? skb : skb_clone(skb, GFP_ATOMIC); 1404 if (!skb) { 1405 /* Out of memory, skip this action. 1406 */ 1407 return 0; 1408 } 1409 1410 /* When clone_flow_key is false, the 'key' will not be change 1411 * by the actions, then the 'key' can be used directly. 1412 * Otherwise, try to clone key from the next recursion level of 1413 * 'flow_keys'. If clone is successful, execute the actions 1414 * without deferring. 1415 */ 1416 clone = clone_flow_key ? clone_key(key) : key; 1417 if (clone) { 1418 int err = 0; 1419 1420 if (actions) { /* Sample action */ 1421 if (clone_flow_key) 1422 __this_cpu_inc(exec_actions_level); 1423 1424 err = do_execute_actions(dp, skb, clone, 1425 actions, len); 1426 1427 if (clone_flow_key) 1428 __this_cpu_dec(exec_actions_level); 1429 } else { /* Recirc action */ 1430 clone->recirc_id = recirc_id; 1431 ovs_dp_process_packet(skb, clone); 1432 } 1433 return err; 1434 } 1435 1436 /* Out of 'flow_keys' space. Defer actions */ 1437 da = add_deferred_actions(skb, key, actions, len); 1438 if (da) { 1439 if (!actions) { /* Recirc action */ 1440 key = &da->pkt_key; 1441 key->recirc_id = recirc_id; 1442 } 1443 } else { 1444 /* Out of per CPU action FIFO space. Drop the 'skb' and 1445 * log an error. 1446 */ 1447 kfree_skb(skb); 1448 1449 if (net_ratelimit()) { 1450 if (actions) { /* Sample action */ 1451 pr_warn("%s: deferred action limit reached, drop sample action\n", 1452 ovs_dp_name(dp)); 1453 } else { /* Recirc action */ 1454 pr_warn("%s: deferred action limit reached, drop recirc action\n", 1455 ovs_dp_name(dp)); 1456 } 1457 } 1458 } 1459 return 0; 1460 } 1461 1462 static void process_deferred_actions(struct datapath *dp) 1463 { 1464 struct action_fifo *fifo = this_cpu_ptr(action_fifos); 1465 1466 /* Do not touch the FIFO in case there is no deferred actions. */ 1467 if (action_fifo_is_empty(fifo)) 1468 return; 1469 1470 /* Finishing executing all deferred actions. */ 1471 do { 1472 struct deferred_action *da = action_fifo_get(fifo); 1473 struct sk_buff *skb = da->skb; 1474 struct sw_flow_key *key = &da->pkt_key; 1475 const struct nlattr *actions = da->actions; 1476 int actions_len = da->actions_len; 1477 1478 if (actions) 1479 do_execute_actions(dp, skb, key, actions, actions_len); 1480 else 1481 ovs_dp_process_packet(skb, key); 1482 } while (!action_fifo_is_empty(fifo)); 1483 1484 /* Reset FIFO for the next packet. */ 1485 action_fifo_init(fifo); 1486 } 1487 1488 /* Execute a list of actions against 'skb'. */ 1489 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, 1490 const struct sw_flow_actions *acts, 1491 struct sw_flow_key *key) 1492 { 1493 int err, level; 1494 1495 level = __this_cpu_inc_return(exec_actions_level); 1496 if (unlikely(level > OVS_RECURSION_LIMIT)) { 1497 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n", 1498 ovs_dp_name(dp)); 1499 kfree_skb(skb); 1500 err = -ENETDOWN; 1501 goto out; 1502 } 1503 1504 OVS_CB(skb)->acts_origlen = acts->orig_len; 1505 err = do_execute_actions(dp, skb, key, 1506 acts->actions, acts->actions_len); 1507 1508 if (level == 1) 1509 process_deferred_actions(dp); 1510 1511 out: 1512 __this_cpu_dec(exec_actions_level); 1513 return err; 1514 } 1515 1516 int action_fifos_init(void) 1517 { 1518 action_fifos = alloc_percpu(struct action_fifo); 1519 if (!action_fifos) 1520 return -ENOMEM; 1521 1522 flow_keys = alloc_percpu(struct action_flow_keys); 1523 if (!flow_keys) { 1524 free_percpu(action_fifos); 1525 return -ENOMEM; 1526 } 1527 1528 return 0; 1529 } 1530 1531 void action_fifos_exit(void) 1532 { 1533 free_percpu(action_fifos); 1534 free_percpu(flow_keys); 1535 } 1536