xref: /openbmc/linux/net/openvswitch/actions.c (revision 23c2b932)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/skbuff.h>
22 #include <linux/in.h>
23 #include <linux/ip.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
32 
33 #include <net/dst.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/ip6_fib.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/mpls.h>
40 #include <net/sctp/checksum.h>
41 
42 #include "datapath.h"
43 #include "flow.h"
44 #include "conntrack.h"
45 #include "vport.h"
46 
47 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48 			      struct sw_flow_key *key,
49 			      const struct nlattr *attr, int len);
50 
51 struct deferred_action {
52 	struct sk_buff *skb;
53 	const struct nlattr *actions;
54 
55 	/* Store pkt_key clone when creating deferred action. */
56 	struct sw_flow_key pkt_key;
57 };
58 
59 #define MAX_L2_LEN	(VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60 struct ovs_frag_data {
61 	unsigned long dst;
62 	struct vport *vport;
63 	struct ovs_skb_cb cb;
64 	__be16 inner_protocol;
65 	__u16 vlan_tci;
66 	__be16 vlan_proto;
67 	unsigned int l2_len;
68 	u8 l2_data[MAX_L2_LEN];
69 };
70 
71 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
72 
73 #define DEFERRED_ACTION_FIFO_SIZE 10
74 struct action_fifo {
75 	int head;
76 	int tail;
77 	/* Deferred action fifo queue storage. */
78 	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
79 };
80 
81 static struct action_fifo __percpu *action_fifos;
82 static DEFINE_PER_CPU(int, exec_actions_level);
83 
84 static void action_fifo_init(struct action_fifo *fifo)
85 {
86 	fifo->head = 0;
87 	fifo->tail = 0;
88 }
89 
90 static bool action_fifo_is_empty(const struct action_fifo *fifo)
91 {
92 	return (fifo->head == fifo->tail);
93 }
94 
95 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
96 {
97 	if (action_fifo_is_empty(fifo))
98 		return NULL;
99 
100 	return &fifo->fifo[fifo->tail++];
101 }
102 
103 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
104 {
105 	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
106 		return NULL;
107 
108 	return &fifo->fifo[fifo->head++];
109 }
110 
111 /* Return true if fifo is not full */
112 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
113 						    const struct sw_flow_key *key,
114 						    const struct nlattr *attr)
115 {
116 	struct action_fifo *fifo;
117 	struct deferred_action *da;
118 
119 	fifo = this_cpu_ptr(action_fifos);
120 	da = action_fifo_put(fifo);
121 	if (da) {
122 		da->skb = skb;
123 		da->actions = attr;
124 		da->pkt_key = *key;
125 	}
126 
127 	return da;
128 }
129 
130 static void invalidate_flow_key(struct sw_flow_key *key)
131 {
132 	key->eth.type = htons(0);
133 }
134 
135 static bool is_flow_key_valid(const struct sw_flow_key *key)
136 {
137 	return !!key->eth.type;
138 }
139 
140 static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
141 			     __be16 ethertype)
142 {
143 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
144 		__be16 diff[] = { ~(hdr->h_proto), ethertype };
145 
146 		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
147 					~skb->csum);
148 	}
149 
150 	hdr->h_proto = ethertype;
151 }
152 
153 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
154 		     const struct ovs_action_push_mpls *mpls)
155 {
156 	__be32 *new_mpls_lse;
157 
158 	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
159 	if (skb->encapsulation)
160 		return -ENOTSUPP;
161 
162 	if (skb_cow_head(skb, MPLS_HLEN) < 0)
163 		return -ENOMEM;
164 
165 	skb_push(skb, MPLS_HLEN);
166 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
167 		skb->mac_len);
168 	skb_reset_mac_header(skb);
169 
170 	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
171 	*new_mpls_lse = mpls->mpls_lse;
172 
173 	skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN);
174 
175 	update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype);
176 	if (!skb->inner_protocol)
177 		skb_set_inner_protocol(skb, skb->protocol);
178 	skb->protocol = mpls->mpls_ethertype;
179 
180 	invalidate_flow_key(key);
181 	return 0;
182 }
183 
184 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
185 		    const __be16 ethertype)
186 {
187 	struct ethhdr *hdr;
188 	int err;
189 
190 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
191 	if (unlikely(err))
192 		return err;
193 
194 	skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
195 
196 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
197 		skb->mac_len);
198 
199 	__skb_pull(skb, MPLS_HLEN);
200 	skb_reset_mac_header(skb);
201 
202 	/* skb_mpls_header() is used to locate the ethertype
203 	 * field correctly in the presence of VLAN tags.
204 	 */
205 	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
206 	update_ethertype(skb, hdr, ethertype);
207 	if (eth_p_mpls(skb->protocol))
208 		skb->protocol = ethertype;
209 
210 	invalidate_flow_key(key);
211 	return 0;
212 }
213 
214 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
215 		    const __be32 *mpls_lse, const __be32 *mask)
216 {
217 	__be32 *stack;
218 	__be32 lse;
219 	int err;
220 
221 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
222 	if (unlikely(err))
223 		return err;
224 
225 	stack = (__be32 *)skb_mpls_header(skb);
226 	lse = OVS_MASKED(*stack, *mpls_lse, *mask);
227 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
228 		__be32 diff[] = { ~(*stack), lse };
229 
230 		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
231 					  ~skb->csum);
232 	}
233 
234 	*stack = lse;
235 	flow_key->mpls.top_lse = lse;
236 	return 0;
237 }
238 
239 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
240 {
241 	int err;
242 
243 	err = skb_vlan_pop(skb);
244 	if (skb_vlan_tag_present(skb))
245 		invalidate_flow_key(key);
246 	else
247 		key->eth.tci = 0;
248 	return err;
249 }
250 
251 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
252 		     const struct ovs_action_push_vlan *vlan)
253 {
254 	if (skb_vlan_tag_present(skb))
255 		invalidate_flow_key(key);
256 	else
257 		key->eth.tci = vlan->vlan_tci;
258 	return skb_vlan_push(skb, vlan->vlan_tpid,
259 			     ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
260 }
261 
262 /* 'src' is already properly masked. */
263 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
264 {
265 	u16 *dst = (u16 *)dst_;
266 	const u16 *src = (const u16 *)src_;
267 	const u16 *mask = (const u16 *)mask_;
268 
269 	OVS_SET_MASKED(dst[0], src[0], mask[0]);
270 	OVS_SET_MASKED(dst[1], src[1], mask[1]);
271 	OVS_SET_MASKED(dst[2], src[2], mask[2]);
272 }
273 
274 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
275 			const struct ovs_key_ethernet *key,
276 			const struct ovs_key_ethernet *mask)
277 {
278 	int err;
279 
280 	err = skb_ensure_writable(skb, ETH_HLEN);
281 	if (unlikely(err))
282 		return err;
283 
284 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
285 
286 	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
287 			       mask->eth_src);
288 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
289 			       mask->eth_dst);
290 
291 	skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
292 
293 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
294 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
295 	return 0;
296 }
297 
298 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
299 				  __be32 addr, __be32 new_addr)
300 {
301 	int transport_len = skb->len - skb_transport_offset(skb);
302 
303 	if (nh->frag_off & htons(IP_OFFSET))
304 		return;
305 
306 	if (nh->protocol == IPPROTO_TCP) {
307 		if (likely(transport_len >= sizeof(struct tcphdr)))
308 			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
309 						 addr, new_addr, true);
310 	} else if (nh->protocol == IPPROTO_UDP) {
311 		if (likely(transport_len >= sizeof(struct udphdr))) {
312 			struct udphdr *uh = udp_hdr(skb);
313 
314 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
315 				inet_proto_csum_replace4(&uh->check, skb,
316 							 addr, new_addr, true);
317 				if (!uh->check)
318 					uh->check = CSUM_MANGLED_0;
319 			}
320 		}
321 	}
322 }
323 
324 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
325 			__be32 *addr, __be32 new_addr)
326 {
327 	update_ip_l4_checksum(skb, nh, *addr, new_addr);
328 	csum_replace4(&nh->check, *addr, new_addr);
329 	skb_clear_hash(skb);
330 	*addr = new_addr;
331 }
332 
333 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
334 				 __be32 addr[4], const __be32 new_addr[4])
335 {
336 	int transport_len = skb->len - skb_transport_offset(skb);
337 
338 	if (l4_proto == NEXTHDR_TCP) {
339 		if (likely(transport_len >= sizeof(struct tcphdr)))
340 			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
341 						  addr, new_addr, true);
342 	} else if (l4_proto == NEXTHDR_UDP) {
343 		if (likely(transport_len >= sizeof(struct udphdr))) {
344 			struct udphdr *uh = udp_hdr(skb);
345 
346 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
347 				inet_proto_csum_replace16(&uh->check, skb,
348 							  addr, new_addr, true);
349 				if (!uh->check)
350 					uh->check = CSUM_MANGLED_0;
351 			}
352 		}
353 	} else if (l4_proto == NEXTHDR_ICMP) {
354 		if (likely(transport_len >= sizeof(struct icmp6hdr)))
355 			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
356 						  skb, addr, new_addr, true);
357 	}
358 }
359 
360 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
361 			   const __be32 mask[4], __be32 masked[4])
362 {
363 	masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
364 	masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
365 	masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
366 	masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
367 }
368 
369 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
370 			  __be32 addr[4], const __be32 new_addr[4],
371 			  bool recalculate_csum)
372 {
373 	if (recalculate_csum)
374 		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
375 
376 	skb_clear_hash(skb);
377 	memcpy(addr, new_addr, sizeof(__be32[4]));
378 }
379 
380 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
381 {
382 	/* Bits 21-24 are always unmasked, so this retains their values. */
383 	OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
384 	OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
385 	OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
386 }
387 
388 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
389 		       u8 mask)
390 {
391 	new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
392 
393 	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
394 	nh->ttl = new_ttl;
395 }
396 
397 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
398 		    const struct ovs_key_ipv4 *key,
399 		    const struct ovs_key_ipv4 *mask)
400 {
401 	struct iphdr *nh;
402 	__be32 new_addr;
403 	int err;
404 
405 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
406 				  sizeof(struct iphdr));
407 	if (unlikely(err))
408 		return err;
409 
410 	nh = ip_hdr(skb);
411 
412 	/* Setting an IP addresses is typically only a side effect of
413 	 * matching on them in the current userspace implementation, so it
414 	 * makes sense to check if the value actually changed.
415 	 */
416 	if (mask->ipv4_src) {
417 		new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
418 
419 		if (unlikely(new_addr != nh->saddr)) {
420 			set_ip_addr(skb, nh, &nh->saddr, new_addr);
421 			flow_key->ipv4.addr.src = new_addr;
422 		}
423 	}
424 	if (mask->ipv4_dst) {
425 		new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
426 
427 		if (unlikely(new_addr != nh->daddr)) {
428 			set_ip_addr(skb, nh, &nh->daddr, new_addr);
429 			flow_key->ipv4.addr.dst = new_addr;
430 		}
431 	}
432 	if (mask->ipv4_tos) {
433 		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
434 		flow_key->ip.tos = nh->tos;
435 	}
436 	if (mask->ipv4_ttl) {
437 		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
438 		flow_key->ip.ttl = nh->ttl;
439 	}
440 
441 	return 0;
442 }
443 
444 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
445 {
446 	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
447 }
448 
449 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
450 		    const struct ovs_key_ipv6 *key,
451 		    const struct ovs_key_ipv6 *mask)
452 {
453 	struct ipv6hdr *nh;
454 	int err;
455 
456 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
457 				  sizeof(struct ipv6hdr));
458 	if (unlikely(err))
459 		return err;
460 
461 	nh = ipv6_hdr(skb);
462 
463 	/* Setting an IP addresses is typically only a side effect of
464 	 * matching on them in the current userspace implementation, so it
465 	 * makes sense to check if the value actually changed.
466 	 */
467 	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
468 		__be32 *saddr = (__be32 *)&nh->saddr;
469 		__be32 masked[4];
470 
471 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
472 
473 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
474 			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
475 				      true);
476 			memcpy(&flow_key->ipv6.addr.src, masked,
477 			       sizeof(flow_key->ipv6.addr.src));
478 		}
479 	}
480 	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
481 		unsigned int offset = 0;
482 		int flags = IP6_FH_F_SKIP_RH;
483 		bool recalc_csum = true;
484 		__be32 *daddr = (__be32 *)&nh->daddr;
485 		__be32 masked[4];
486 
487 		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
488 
489 		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
490 			if (ipv6_ext_hdr(nh->nexthdr))
491 				recalc_csum = (ipv6_find_hdr(skb, &offset,
492 							     NEXTHDR_ROUTING,
493 							     NULL, &flags)
494 					       != NEXTHDR_ROUTING);
495 
496 			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
497 				      recalc_csum);
498 			memcpy(&flow_key->ipv6.addr.dst, masked,
499 			       sizeof(flow_key->ipv6.addr.dst));
500 		}
501 	}
502 	if (mask->ipv6_tclass) {
503 		ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
504 		flow_key->ip.tos = ipv6_get_dsfield(nh);
505 	}
506 	if (mask->ipv6_label) {
507 		set_ipv6_fl(nh, ntohl(key->ipv6_label),
508 			    ntohl(mask->ipv6_label));
509 		flow_key->ipv6.label =
510 		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
511 	}
512 	if (mask->ipv6_hlimit) {
513 		OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
514 			       mask->ipv6_hlimit);
515 		flow_key->ip.ttl = nh->hop_limit;
516 	}
517 	return 0;
518 }
519 
520 /* Must follow skb_ensure_writable() since that can move the skb data. */
521 static void set_tp_port(struct sk_buff *skb, __be16 *port,
522 			__be16 new_port, __sum16 *check)
523 {
524 	inet_proto_csum_replace2(check, skb, *port, new_port, false);
525 	*port = new_port;
526 }
527 
528 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
529 		   const struct ovs_key_udp *key,
530 		   const struct ovs_key_udp *mask)
531 {
532 	struct udphdr *uh;
533 	__be16 src, dst;
534 	int err;
535 
536 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
537 				  sizeof(struct udphdr));
538 	if (unlikely(err))
539 		return err;
540 
541 	uh = udp_hdr(skb);
542 	/* Either of the masks is non-zero, so do not bother checking them. */
543 	src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
544 	dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
545 
546 	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
547 		if (likely(src != uh->source)) {
548 			set_tp_port(skb, &uh->source, src, &uh->check);
549 			flow_key->tp.src = src;
550 		}
551 		if (likely(dst != uh->dest)) {
552 			set_tp_port(skb, &uh->dest, dst, &uh->check);
553 			flow_key->tp.dst = dst;
554 		}
555 
556 		if (unlikely(!uh->check))
557 			uh->check = CSUM_MANGLED_0;
558 	} else {
559 		uh->source = src;
560 		uh->dest = dst;
561 		flow_key->tp.src = src;
562 		flow_key->tp.dst = dst;
563 	}
564 
565 	skb_clear_hash(skb);
566 
567 	return 0;
568 }
569 
570 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
571 		   const struct ovs_key_tcp *key,
572 		   const struct ovs_key_tcp *mask)
573 {
574 	struct tcphdr *th;
575 	__be16 src, dst;
576 	int err;
577 
578 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
579 				  sizeof(struct tcphdr));
580 	if (unlikely(err))
581 		return err;
582 
583 	th = tcp_hdr(skb);
584 	src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
585 	if (likely(src != th->source)) {
586 		set_tp_port(skb, &th->source, src, &th->check);
587 		flow_key->tp.src = src;
588 	}
589 	dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
590 	if (likely(dst != th->dest)) {
591 		set_tp_port(skb, &th->dest, dst, &th->check);
592 		flow_key->tp.dst = dst;
593 	}
594 	skb_clear_hash(skb);
595 
596 	return 0;
597 }
598 
599 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
600 		    const struct ovs_key_sctp *key,
601 		    const struct ovs_key_sctp *mask)
602 {
603 	unsigned int sctphoff = skb_transport_offset(skb);
604 	struct sctphdr *sh;
605 	__le32 old_correct_csum, new_csum, old_csum;
606 	int err;
607 
608 	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
609 	if (unlikely(err))
610 		return err;
611 
612 	sh = sctp_hdr(skb);
613 	old_csum = sh->checksum;
614 	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
615 
616 	sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
617 	sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
618 
619 	new_csum = sctp_compute_cksum(skb, sctphoff);
620 
621 	/* Carry any checksum errors through. */
622 	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
623 
624 	skb_clear_hash(skb);
625 	flow_key->tp.src = sh->source;
626 	flow_key->tp.dst = sh->dest;
627 
628 	return 0;
629 }
630 
631 static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
632 {
633 	struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
634 	struct vport *vport = data->vport;
635 
636 	if (skb_cow_head(skb, data->l2_len) < 0) {
637 		kfree_skb(skb);
638 		return -ENOMEM;
639 	}
640 
641 	__skb_dst_copy(skb, data->dst);
642 	*OVS_CB(skb) = data->cb;
643 	skb->inner_protocol = data->inner_protocol;
644 	skb->vlan_tci = data->vlan_tci;
645 	skb->vlan_proto = data->vlan_proto;
646 
647 	/* Reconstruct the MAC header.  */
648 	skb_push(skb, data->l2_len);
649 	memcpy(skb->data, &data->l2_data, data->l2_len);
650 	skb_postpush_rcsum(skb, skb->data, data->l2_len);
651 	skb_reset_mac_header(skb);
652 
653 	ovs_vport_send(vport, skb);
654 	return 0;
655 }
656 
657 static unsigned int
658 ovs_dst_get_mtu(const struct dst_entry *dst)
659 {
660 	return dst->dev->mtu;
661 }
662 
663 static struct dst_ops ovs_dst_ops = {
664 	.family = AF_UNSPEC,
665 	.mtu = ovs_dst_get_mtu,
666 };
667 
668 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
669  * ovs_vport_output(), which is called once per fragmented packet.
670  */
671 static void prepare_frag(struct vport *vport, struct sk_buff *skb)
672 {
673 	unsigned int hlen = skb_network_offset(skb);
674 	struct ovs_frag_data *data;
675 
676 	data = this_cpu_ptr(&ovs_frag_data_storage);
677 	data->dst = skb->_skb_refdst;
678 	data->vport = vport;
679 	data->cb = *OVS_CB(skb);
680 	data->inner_protocol = skb->inner_protocol;
681 	data->vlan_tci = skb->vlan_tci;
682 	data->vlan_proto = skb->vlan_proto;
683 	data->l2_len = hlen;
684 	memcpy(&data->l2_data, skb->data, hlen);
685 
686 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
687 	skb_pull(skb, hlen);
688 }
689 
690 static void ovs_fragment(struct net *net, struct vport *vport,
691 			 struct sk_buff *skb, u16 mru, __be16 ethertype)
692 {
693 	if (skb_network_offset(skb) > MAX_L2_LEN) {
694 		OVS_NLERR(1, "L2 header too long to fragment");
695 		goto err;
696 	}
697 
698 	if (ethertype == htons(ETH_P_IP)) {
699 		struct dst_entry ovs_dst;
700 		unsigned long orig_dst;
701 
702 		prepare_frag(vport, skb);
703 		dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
704 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
705 		ovs_dst.dev = vport->dev;
706 
707 		orig_dst = skb->_skb_refdst;
708 		skb_dst_set_noref(skb, &ovs_dst);
709 		IPCB(skb)->frag_max_size = mru;
710 
711 		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
712 		refdst_drop(orig_dst);
713 	} else if (ethertype == htons(ETH_P_IPV6)) {
714 		const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
715 		unsigned long orig_dst;
716 		struct rt6_info ovs_rt;
717 
718 		if (!v6ops) {
719 			goto err;
720 		}
721 
722 		prepare_frag(vport, skb);
723 		memset(&ovs_rt, 0, sizeof(ovs_rt));
724 		dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
725 			 DST_OBSOLETE_NONE, DST_NOCOUNT);
726 		ovs_rt.dst.dev = vport->dev;
727 
728 		orig_dst = skb->_skb_refdst;
729 		skb_dst_set_noref(skb, &ovs_rt.dst);
730 		IP6CB(skb)->frag_max_size = mru;
731 
732 		v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
733 		refdst_drop(orig_dst);
734 	} else {
735 		WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
736 			  ovs_vport_name(vport), ntohs(ethertype), mru,
737 			  vport->dev->mtu);
738 		goto err;
739 	}
740 
741 	return;
742 err:
743 	kfree_skb(skb);
744 }
745 
746 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
747 		      struct sw_flow_key *key)
748 {
749 	struct vport *vport = ovs_vport_rcu(dp, out_port);
750 
751 	if (likely(vport)) {
752 		u16 mru = OVS_CB(skb)->mru;
753 
754 		if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
755 			ovs_vport_send(vport, skb);
756 		} else if (mru <= vport->dev->mtu) {
757 			struct net *net = read_pnet(&dp->net);
758 			__be16 ethertype = key->eth.type;
759 
760 			if (!is_flow_key_valid(key)) {
761 				if (eth_p_mpls(skb->protocol))
762 					ethertype = skb->inner_protocol;
763 				else
764 					ethertype = vlan_get_protocol(skb);
765 			}
766 
767 			ovs_fragment(net, vport, skb, mru, ethertype);
768 		} else {
769 			kfree_skb(skb);
770 		}
771 	} else {
772 		kfree_skb(skb);
773 	}
774 }
775 
776 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
777 			    struct sw_flow_key *key, const struct nlattr *attr,
778 			    const struct nlattr *actions, int actions_len)
779 {
780 	struct dp_upcall_info upcall;
781 	const struct nlattr *a;
782 	int rem;
783 
784 	memset(&upcall, 0, sizeof(upcall));
785 	upcall.cmd = OVS_PACKET_CMD_ACTION;
786 	upcall.mru = OVS_CB(skb)->mru;
787 
788 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
789 		 a = nla_next(a, &rem)) {
790 		switch (nla_type(a)) {
791 		case OVS_USERSPACE_ATTR_USERDATA:
792 			upcall.userdata = a;
793 			break;
794 
795 		case OVS_USERSPACE_ATTR_PID:
796 			upcall.portid = nla_get_u32(a);
797 			break;
798 
799 		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
800 			/* Get out tunnel info. */
801 			struct vport *vport;
802 
803 			vport = ovs_vport_rcu(dp, nla_get_u32(a));
804 			if (vport) {
805 				int err;
806 
807 				err = dev_fill_metadata_dst(vport->dev, skb);
808 				if (!err)
809 					upcall.egress_tun_info = skb_tunnel_info(skb);
810 			}
811 
812 			break;
813 		}
814 
815 		case OVS_USERSPACE_ATTR_ACTIONS: {
816 			/* Include actions. */
817 			upcall.actions = actions;
818 			upcall.actions_len = actions_len;
819 			break;
820 		}
821 
822 		} /* End of switch. */
823 	}
824 
825 	return ovs_dp_upcall(dp, skb, key, &upcall);
826 }
827 
828 static int sample(struct datapath *dp, struct sk_buff *skb,
829 		  struct sw_flow_key *key, const struct nlattr *attr,
830 		  const struct nlattr *actions, int actions_len)
831 {
832 	const struct nlattr *acts_list = NULL;
833 	const struct nlattr *a;
834 	int rem;
835 
836 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
837 		 a = nla_next(a, &rem)) {
838 		u32 probability;
839 
840 		switch (nla_type(a)) {
841 		case OVS_SAMPLE_ATTR_PROBABILITY:
842 			probability = nla_get_u32(a);
843 			if (!probability || prandom_u32() > probability)
844 				return 0;
845 			break;
846 
847 		case OVS_SAMPLE_ATTR_ACTIONS:
848 			acts_list = a;
849 			break;
850 		}
851 	}
852 
853 	rem = nla_len(acts_list);
854 	a = nla_data(acts_list);
855 
856 	/* Actions list is empty, do nothing */
857 	if (unlikely(!rem))
858 		return 0;
859 
860 	/* The only known usage of sample action is having a single user-space
861 	 * action. Treat this usage as a special case.
862 	 * The output_userspace() should clone the skb to be sent to the
863 	 * user space. This skb will be consumed by its caller.
864 	 */
865 	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
866 		   nla_is_last(a, rem)))
867 		return output_userspace(dp, skb, key, a, actions, actions_len);
868 
869 	skb = skb_clone(skb, GFP_ATOMIC);
870 	if (!skb)
871 		/* Skip the sample action when out of memory. */
872 		return 0;
873 
874 	if (!add_deferred_actions(skb, key, a)) {
875 		if (net_ratelimit())
876 			pr_warn("%s: deferred actions limit reached, dropping sample action\n",
877 				ovs_dp_name(dp));
878 
879 		kfree_skb(skb);
880 	}
881 	return 0;
882 }
883 
884 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
885 			 const struct nlattr *attr)
886 {
887 	struct ovs_action_hash *hash_act = nla_data(attr);
888 	u32 hash = 0;
889 
890 	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
891 	hash = skb_get_hash(skb);
892 	hash = jhash_1word(hash, hash_act->hash_basis);
893 	if (!hash)
894 		hash = 0x1;
895 
896 	key->ovs_flow_hash = hash;
897 }
898 
899 static int execute_set_action(struct sk_buff *skb,
900 			      struct sw_flow_key *flow_key,
901 			      const struct nlattr *a)
902 {
903 	/* Only tunnel set execution is supported without a mask. */
904 	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
905 		struct ovs_tunnel_info *tun = nla_data(a);
906 
907 		skb_dst_drop(skb);
908 		dst_hold((struct dst_entry *)tun->tun_dst);
909 		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
910 		return 0;
911 	}
912 
913 	return -EINVAL;
914 }
915 
916 /* Mask is at the midpoint of the data. */
917 #define get_mask(a, type) ((const type)nla_data(a) + 1)
918 
919 static int execute_masked_set_action(struct sk_buff *skb,
920 				     struct sw_flow_key *flow_key,
921 				     const struct nlattr *a)
922 {
923 	int err = 0;
924 
925 	switch (nla_type(a)) {
926 	case OVS_KEY_ATTR_PRIORITY:
927 		OVS_SET_MASKED(skb->priority, nla_get_u32(a),
928 			       *get_mask(a, u32 *));
929 		flow_key->phy.priority = skb->priority;
930 		break;
931 
932 	case OVS_KEY_ATTR_SKB_MARK:
933 		OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
934 		flow_key->phy.skb_mark = skb->mark;
935 		break;
936 
937 	case OVS_KEY_ATTR_TUNNEL_INFO:
938 		/* Masked data not supported for tunnel. */
939 		err = -EINVAL;
940 		break;
941 
942 	case OVS_KEY_ATTR_ETHERNET:
943 		err = set_eth_addr(skb, flow_key, nla_data(a),
944 				   get_mask(a, struct ovs_key_ethernet *));
945 		break;
946 
947 	case OVS_KEY_ATTR_IPV4:
948 		err = set_ipv4(skb, flow_key, nla_data(a),
949 			       get_mask(a, struct ovs_key_ipv4 *));
950 		break;
951 
952 	case OVS_KEY_ATTR_IPV6:
953 		err = set_ipv6(skb, flow_key, nla_data(a),
954 			       get_mask(a, struct ovs_key_ipv6 *));
955 		break;
956 
957 	case OVS_KEY_ATTR_TCP:
958 		err = set_tcp(skb, flow_key, nla_data(a),
959 			      get_mask(a, struct ovs_key_tcp *));
960 		break;
961 
962 	case OVS_KEY_ATTR_UDP:
963 		err = set_udp(skb, flow_key, nla_data(a),
964 			      get_mask(a, struct ovs_key_udp *));
965 		break;
966 
967 	case OVS_KEY_ATTR_SCTP:
968 		err = set_sctp(skb, flow_key, nla_data(a),
969 			       get_mask(a, struct ovs_key_sctp *));
970 		break;
971 
972 	case OVS_KEY_ATTR_MPLS:
973 		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
974 								    __be32 *));
975 		break;
976 
977 	case OVS_KEY_ATTR_CT_STATE:
978 	case OVS_KEY_ATTR_CT_ZONE:
979 	case OVS_KEY_ATTR_CT_MARK:
980 	case OVS_KEY_ATTR_CT_LABELS:
981 		err = -EINVAL;
982 		break;
983 	}
984 
985 	return err;
986 }
987 
988 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
989 			  struct sw_flow_key *key,
990 			  const struct nlattr *a, int rem)
991 {
992 	struct deferred_action *da;
993 
994 	if (!is_flow_key_valid(key)) {
995 		int err;
996 
997 		err = ovs_flow_key_update(skb, key);
998 		if (err)
999 			return err;
1000 	}
1001 	BUG_ON(!is_flow_key_valid(key));
1002 
1003 	if (!nla_is_last(a, rem)) {
1004 		/* Recirc action is the not the last action
1005 		 * of the action list, need to clone the skb.
1006 		 */
1007 		skb = skb_clone(skb, GFP_ATOMIC);
1008 
1009 		/* Skip the recirc action when out of memory, but
1010 		 * continue on with the rest of the action list.
1011 		 */
1012 		if (!skb)
1013 			return 0;
1014 	}
1015 
1016 	da = add_deferred_actions(skb, key, NULL);
1017 	if (da) {
1018 		da->pkt_key.recirc_id = nla_get_u32(a);
1019 	} else {
1020 		kfree_skb(skb);
1021 
1022 		if (net_ratelimit())
1023 			pr_warn("%s: deferred action limit reached, drop recirc action\n",
1024 				ovs_dp_name(dp));
1025 	}
1026 
1027 	return 0;
1028 }
1029 
1030 /* Execute a list of actions against 'skb'. */
1031 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1032 			      struct sw_flow_key *key,
1033 			      const struct nlattr *attr, int len)
1034 {
1035 	/* Every output action needs a separate clone of 'skb', but the common
1036 	 * case is just a single output action, so that doing a clone and
1037 	 * then freeing the original skbuff is wasteful.  So the following code
1038 	 * is slightly obscure just to avoid that.
1039 	 */
1040 	int prev_port = -1;
1041 	const struct nlattr *a;
1042 	int rem;
1043 
1044 	for (a = attr, rem = len; rem > 0;
1045 	     a = nla_next(a, &rem)) {
1046 		int err = 0;
1047 
1048 		if (unlikely(prev_port != -1)) {
1049 			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1050 
1051 			if (out_skb)
1052 				do_output(dp, out_skb, prev_port, key);
1053 
1054 			prev_port = -1;
1055 		}
1056 
1057 		switch (nla_type(a)) {
1058 		case OVS_ACTION_ATTR_OUTPUT:
1059 			prev_port = nla_get_u32(a);
1060 			break;
1061 
1062 		case OVS_ACTION_ATTR_USERSPACE:
1063 			output_userspace(dp, skb, key, a, attr, len);
1064 			break;
1065 
1066 		case OVS_ACTION_ATTR_HASH:
1067 			execute_hash(skb, key, a);
1068 			break;
1069 
1070 		case OVS_ACTION_ATTR_PUSH_MPLS:
1071 			err = push_mpls(skb, key, nla_data(a));
1072 			break;
1073 
1074 		case OVS_ACTION_ATTR_POP_MPLS:
1075 			err = pop_mpls(skb, key, nla_get_be16(a));
1076 			break;
1077 
1078 		case OVS_ACTION_ATTR_PUSH_VLAN:
1079 			err = push_vlan(skb, key, nla_data(a));
1080 			break;
1081 
1082 		case OVS_ACTION_ATTR_POP_VLAN:
1083 			err = pop_vlan(skb, key);
1084 			break;
1085 
1086 		case OVS_ACTION_ATTR_RECIRC:
1087 			err = execute_recirc(dp, skb, key, a, rem);
1088 			if (nla_is_last(a, rem)) {
1089 				/* If this is the last action, the skb has
1090 				 * been consumed or freed.
1091 				 * Return immediately.
1092 				 */
1093 				return err;
1094 			}
1095 			break;
1096 
1097 		case OVS_ACTION_ATTR_SET:
1098 			err = execute_set_action(skb, key, nla_data(a));
1099 			break;
1100 
1101 		case OVS_ACTION_ATTR_SET_MASKED:
1102 		case OVS_ACTION_ATTR_SET_TO_MASKED:
1103 			err = execute_masked_set_action(skb, key, nla_data(a));
1104 			break;
1105 
1106 		case OVS_ACTION_ATTR_SAMPLE:
1107 			err = sample(dp, skb, key, a, attr, len);
1108 			break;
1109 
1110 		case OVS_ACTION_ATTR_CT:
1111 			if (!is_flow_key_valid(key)) {
1112 				err = ovs_flow_key_update(skb, key);
1113 				if (err)
1114 					return err;
1115 			}
1116 
1117 			err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1118 					     nla_data(a));
1119 
1120 			/* Hide stolen IP fragments from user space. */
1121 			if (err)
1122 				return err == -EINPROGRESS ? 0 : err;
1123 			break;
1124 		}
1125 
1126 		if (unlikely(err)) {
1127 			kfree_skb(skb);
1128 			return err;
1129 		}
1130 	}
1131 
1132 	if (prev_port != -1)
1133 		do_output(dp, skb, prev_port, key);
1134 	else
1135 		consume_skb(skb);
1136 
1137 	return 0;
1138 }
1139 
1140 static void process_deferred_actions(struct datapath *dp)
1141 {
1142 	struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1143 
1144 	/* Do not touch the FIFO in case there is no deferred actions. */
1145 	if (action_fifo_is_empty(fifo))
1146 		return;
1147 
1148 	/* Finishing executing all deferred actions. */
1149 	do {
1150 		struct deferred_action *da = action_fifo_get(fifo);
1151 		struct sk_buff *skb = da->skb;
1152 		struct sw_flow_key *key = &da->pkt_key;
1153 		const struct nlattr *actions = da->actions;
1154 
1155 		if (actions)
1156 			do_execute_actions(dp, skb, key, actions,
1157 					   nla_len(actions));
1158 		else
1159 			ovs_dp_process_packet(skb, key);
1160 	} while (!action_fifo_is_empty(fifo));
1161 
1162 	/* Reset FIFO for the next packet.  */
1163 	action_fifo_init(fifo);
1164 }
1165 
1166 /* Execute a list of actions against 'skb'. */
1167 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1168 			const struct sw_flow_actions *acts,
1169 			struct sw_flow_key *key)
1170 {
1171 	static const int ovs_recursion_limit = 5;
1172 	int err, level;
1173 
1174 	level = __this_cpu_inc_return(exec_actions_level);
1175 	if (unlikely(level > ovs_recursion_limit)) {
1176 		net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1177 				     ovs_dp_name(dp));
1178 		kfree_skb(skb);
1179 		err = -ENETDOWN;
1180 		goto out;
1181 	}
1182 
1183 	err = do_execute_actions(dp, skb, key,
1184 				 acts->actions, acts->actions_len);
1185 
1186 	if (level == 1)
1187 		process_deferred_actions(dp);
1188 
1189 out:
1190 	__this_cpu_dec(exec_actions_level);
1191 	return err;
1192 }
1193 
1194 int action_fifos_init(void)
1195 {
1196 	action_fifos = alloc_percpu(struct action_fifo);
1197 	if (!action_fifos)
1198 		return -ENOMEM;
1199 
1200 	return 0;
1201 }
1202 
1203 void action_fifos_exit(void)
1204 {
1205 	free_percpu(action_fifos);
1206 }
1207