xref: /openbmc/linux/net/openvswitch/actions.c (revision bbde9fc1824aab58bc78c084163007dd6c03fe5b)
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/skbuff.h>
22 #include <linux/in.h>
23 #include <linux/ip.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
31 
32 #include <net/ip.h>
33 #include <net/ipv6.h>
34 #include <net/checksum.h>
35 #include <net/dsfield.h>
36 #include <net/mpls.h>
37 #include <net/sctp/checksum.h>
38 
39 #include "datapath.h"
40 #include "flow.h"
41 #include "vport.h"
42 
43 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
44 			      struct sw_flow_key *key,
45 			      const struct nlattr *attr, int len);
46 
47 struct deferred_action {
48 	struct sk_buff *skb;
49 	const struct nlattr *actions;
50 
51 	/* Store pkt_key clone when creating deferred action. */
52 	struct sw_flow_key pkt_key;
53 };
54 
55 #define DEFERRED_ACTION_FIFO_SIZE 10
56 struct action_fifo {
57 	int head;
58 	int tail;
59 	/* Deferred action fifo queue storage. */
60 	struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
61 };
62 
63 static struct action_fifo __percpu *action_fifos;
64 static DEFINE_PER_CPU(int, exec_actions_level);
65 
66 static void action_fifo_init(struct action_fifo *fifo)
67 {
68 	fifo->head = 0;
69 	fifo->tail = 0;
70 }
71 
72 static bool action_fifo_is_empty(const struct action_fifo *fifo)
73 {
74 	return (fifo->head == fifo->tail);
75 }
76 
77 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
78 {
79 	if (action_fifo_is_empty(fifo))
80 		return NULL;
81 
82 	return &fifo->fifo[fifo->tail++];
83 }
84 
85 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
86 {
87 	if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
88 		return NULL;
89 
90 	return &fifo->fifo[fifo->head++];
91 }
92 
93 /* Return true if fifo is not full */
94 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
95 						    const struct sw_flow_key *key,
96 						    const struct nlattr *attr)
97 {
98 	struct action_fifo *fifo;
99 	struct deferred_action *da;
100 
101 	fifo = this_cpu_ptr(action_fifos);
102 	da = action_fifo_put(fifo);
103 	if (da) {
104 		da->skb = skb;
105 		da->actions = attr;
106 		da->pkt_key = *key;
107 	}
108 
109 	return da;
110 }
111 
112 static void invalidate_flow_key(struct sw_flow_key *key)
113 {
114 	key->eth.type = htons(0);
115 }
116 
117 static bool is_flow_key_valid(const struct sw_flow_key *key)
118 {
119 	return !!key->eth.type;
120 }
121 
122 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
123 		     const struct ovs_action_push_mpls *mpls)
124 {
125 	__be32 *new_mpls_lse;
126 	struct ethhdr *hdr;
127 
128 	/* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
129 	if (skb->encapsulation)
130 		return -ENOTSUPP;
131 
132 	if (skb_cow_head(skb, MPLS_HLEN) < 0)
133 		return -ENOMEM;
134 
135 	skb_push(skb, MPLS_HLEN);
136 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
137 		skb->mac_len);
138 	skb_reset_mac_header(skb);
139 
140 	new_mpls_lse = (__be32 *)skb_mpls_header(skb);
141 	*new_mpls_lse = mpls->mpls_lse;
142 
143 	if (skb->ip_summed == CHECKSUM_COMPLETE)
144 		skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
145 							     MPLS_HLEN, 0));
146 
147 	hdr = eth_hdr(skb);
148 	hdr->h_proto = mpls->mpls_ethertype;
149 
150 	if (!skb->inner_protocol)
151 		skb_set_inner_protocol(skb, skb->protocol);
152 	skb->protocol = mpls->mpls_ethertype;
153 
154 	invalidate_flow_key(key);
155 	return 0;
156 }
157 
158 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
159 		    const __be16 ethertype)
160 {
161 	struct ethhdr *hdr;
162 	int err;
163 
164 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
165 	if (unlikely(err))
166 		return err;
167 
168 	skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
169 
170 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
171 		skb->mac_len);
172 
173 	__skb_pull(skb, MPLS_HLEN);
174 	skb_reset_mac_header(skb);
175 
176 	/* skb_mpls_header() is used to locate the ethertype
177 	 * field correctly in the presence of VLAN tags.
178 	 */
179 	hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
180 	hdr->h_proto = ethertype;
181 	if (eth_p_mpls(skb->protocol))
182 		skb->protocol = ethertype;
183 
184 	invalidate_flow_key(key);
185 	return 0;
186 }
187 
188 /* 'KEY' must not have any bits set outside of the 'MASK' */
189 #define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
190 #define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
191 
192 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
193 		    const __be32 *mpls_lse, const __be32 *mask)
194 {
195 	__be32 *stack;
196 	__be32 lse;
197 	int err;
198 
199 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
200 	if (unlikely(err))
201 		return err;
202 
203 	stack = (__be32 *)skb_mpls_header(skb);
204 	lse = MASKED(*stack, *mpls_lse, *mask);
205 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
206 		__be32 diff[] = { ~(*stack), lse };
207 
208 		skb->csum = ~csum_partial((char *)diff, sizeof(diff),
209 					  ~skb->csum);
210 	}
211 
212 	*stack = lse;
213 	flow_key->mpls.top_lse = lse;
214 	return 0;
215 }
216 
217 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
218 {
219 	int err;
220 
221 	err = skb_vlan_pop(skb);
222 	if (skb_vlan_tag_present(skb))
223 		invalidate_flow_key(key);
224 	else
225 		key->eth.tci = 0;
226 	return err;
227 }
228 
229 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
230 		     const struct ovs_action_push_vlan *vlan)
231 {
232 	if (skb_vlan_tag_present(skb))
233 		invalidate_flow_key(key);
234 	else
235 		key->eth.tci = vlan->vlan_tci;
236 	return skb_vlan_push(skb, vlan->vlan_tpid,
237 			     ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
238 }
239 
240 /* 'src' is already properly masked. */
241 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
242 {
243 	u16 *dst = (u16 *)dst_;
244 	const u16 *src = (const u16 *)src_;
245 	const u16 *mask = (const u16 *)mask_;
246 
247 	SET_MASKED(dst[0], src[0], mask[0]);
248 	SET_MASKED(dst[1], src[1], mask[1]);
249 	SET_MASKED(dst[2], src[2], mask[2]);
250 }
251 
252 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
253 			const struct ovs_key_ethernet *key,
254 			const struct ovs_key_ethernet *mask)
255 {
256 	int err;
257 
258 	err = skb_ensure_writable(skb, ETH_HLEN);
259 	if (unlikely(err))
260 		return err;
261 
262 	skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
263 
264 	ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
265 			       mask->eth_src);
266 	ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
267 			       mask->eth_dst);
268 
269 	ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
270 
271 	ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
272 	ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
273 	return 0;
274 }
275 
276 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
277 			__be32 *addr, __be32 new_addr)
278 {
279 	int transport_len = skb->len - skb_transport_offset(skb);
280 
281 	if (nh->protocol == IPPROTO_TCP) {
282 		if (likely(transport_len >= sizeof(struct tcphdr)))
283 			inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
284 						 *addr, new_addr, 1);
285 	} else if (nh->protocol == IPPROTO_UDP) {
286 		if (likely(transport_len >= sizeof(struct udphdr))) {
287 			struct udphdr *uh = udp_hdr(skb);
288 
289 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
290 				inet_proto_csum_replace4(&uh->check, skb,
291 							 *addr, new_addr, 1);
292 				if (!uh->check)
293 					uh->check = CSUM_MANGLED_0;
294 			}
295 		}
296 	}
297 
298 	csum_replace4(&nh->check, *addr, new_addr);
299 	skb_clear_hash(skb);
300 	*addr = new_addr;
301 }
302 
303 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
304 				 __be32 addr[4], const __be32 new_addr[4])
305 {
306 	int transport_len = skb->len - skb_transport_offset(skb);
307 
308 	if (l4_proto == NEXTHDR_TCP) {
309 		if (likely(transport_len >= sizeof(struct tcphdr)))
310 			inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
311 						  addr, new_addr, 1);
312 	} else if (l4_proto == NEXTHDR_UDP) {
313 		if (likely(transport_len >= sizeof(struct udphdr))) {
314 			struct udphdr *uh = udp_hdr(skb);
315 
316 			if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
317 				inet_proto_csum_replace16(&uh->check, skb,
318 							  addr, new_addr, 1);
319 				if (!uh->check)
320 					uh->check = CSUM_MANGLED_0;
321 			}
322 		}
323 	} else if (l4_proto == NEXTHDR_ICMP) {
324 		if (likely(transport_len >= sizeof(struct icmp6hdr)))
325 			inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
326 						  skb, addr, new_addr, 1);
327 	}
328 }
329 
330 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
331 			   const __be32 mask[4], __be32 masked[4])
332 {
333 	masked[0] = MASKED(old[0], addr[0], mask[0]);
334 	masked[1] = MASKED(old[1], addr[1], mask[1]);
335 	masked[2] = MASKED(old[2], addr[2], mask[2]);
336 	masked[3] = MASKED(old[3], addr[3], mask[3]);
337 }
338 
339 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
340 			  __be32 addr[4], const __be32 new_addr[4],
341 			  bool recalculate_csum)
342 {
343 	if (recalculate_csum)
344 		update_ipv6_checksum(skb, l4_proto, addr, new_addr);
345 
346 	skb_clear_hash(skb);
347 	memcpy(addr, new_addr, sizeof(__be32[4]));
348 }
349 
350 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
351 {
352 	/* Bits 21-24 are always unmasked, so this retains their values. */
353 	SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
354 	SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
355 	SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
356 }
357 
358 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
359 		       u8 mask)
360 {
361 	new_ttl = MASKED(nh->ttl, new_ttl, mask);
362 
363 	csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
364 	nh->ttl = new_ttl;
365 }
366 
367 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
368 		    const struct ovs_key_ipv4 *key,
369 		    const struct ovs_key_ipv4 *mask)
370 {
371 	struct iphdr *nh;
372 	__be32 new_addr;
373 	int err;
374 
375 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
376 				  sizeof(struct iphdr));
377 	if (unlikely(err))
378 		return err;
379 
380 	nh = ip_hdr(skb);
381 
382 	/* Setting an IP addresses is typically only a side effect of
383 	 * matching on them in the current userspace implementation, so it
384 	 * makes sense to check if the value actually changed.
385 	 */
386 	if (mask->ipv4_src) {
387 		new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
388 
389 		if (unlikely(new_addr != nh->saddr)) {
390 			set_ip_addr(skb, nh, &nh->saddr, new_addr);
391 			flow_key->ipv4.addr.src = new_addr;
392 		}
393 	}
394 	if (mask->ipv4_dst) {
395 		new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
396 
397 		if (unlikely(new_addr != nh->daddr)) {
398 			set_ip_addr(skb, nh, &nh->daddr, new_addr);
399 			flow_key->ipv4.addr.dst = new_addr;
400 		}
401 	}
402 	if (mask->ipv4_tos) {
403 		ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
404 		flow_key->ip.tos = nh->tos;
405 	}
406 	if (mask->ipv4_ttl) {
407 		set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
408 		flow_key->ip.ttl = nh->ttl;
409 	}
410 
411 	return 0;
412 }
413 
414 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
415 {
416 	return !!(addr[0] | addr[1] | addr[2] | addr[3]);
417 }
418 
419 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
420 		    const struct ovs_key_ipv6 *key,
421 		    const struct ovs_key_ipv6 *mask)
422 {
423 	struct ipv6hdr *nh;
424 	int err;
425 
426 	err = skb_ensure_writable(skb, skb_network_offset(skb) +
427 				  sizeof(struct ipv6hdr));
428 	if (unlikely(err))
429 		return err;
430 
431 	nh = ipv6_hdr(skb);
432 
433 	/* Setting an IP addresses is typically only a side effect of
434 	 * matching on them in the current userspace implementation, so it
435 	 * makes sense to check if the value actually changed.
436 	 */
437 	if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
438 		__be32 *saddr = (__be32 *)&nh->saddr;
439 		__be32 masked[4];
440 
441 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
442 
443 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
444 			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
445 				      true);
446 			memcpy(&flow_key->ipv6.addr.src, masked,
447 			       sizeof(flow_key->ipv6.addr.src));
448 		}
449 	}
450 	if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
451 		unsigned int offset = 0;
452 		int flags = IP6_FH_F_SKIP_RH;
453 		bool recalc_csum = true;
454 		__be32 *daddr = (__be32 *)&nh->daddr;
455 		__be32 masked[4];
456 
457 		mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
458 
459 		if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
460 			if (ipv6_ext_hdr(nh->nexthdr))
461 				recalc_csum = (ipv6_find_hdr(skb, &offset,
462 							     NEXTHDR_ROUTING,
463 							     NULL, &flags)
464 					       != NEXTHDR_ROUTING);
465 
466 			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
467 				      recalc_csum);
468 			memcpy(&flow_key->ipv6.addr.dst, masked,
469 			       sizeof(flow_key->ipv6.addr.dst));
470 		}
471 	}
472 	if (mask->ipv6_tclass) {
473 		ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
474 		flow_key->ip.tos = ipv6_get_dsfield(nh);
475 	}
476 	if (mask->ipv6_label) {
477 		set_ipv6_fl(nh, ntohl(key->ipv6_label),
478 			    ntohl(mask->ipv6_label));
479 		flow_key->ipv6.label =
480 		    *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
481 	}
482 	if (mask->ipv6_hlimit) {
483 		SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
484 		flow_key->ip.ttl = nh->hop_limit;
485 	}
486 	return 0;
487 }
488 
489 /* Must follow skb_ensure_writable() since that can move the skb data. */
490 static void set_tp_port(struct sk_buff *skb, __be16 *port,
491 			__be16 new_port, __sum16 *check)
492 {
493 	inet_proto_csum_replace2(check, skb, *port, new_port, 0);
494 	*port = new_port;
495 }
496 
497 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
498 		   const struct ovs_key_udp *key,
499 		   const struct ovs_key_udp *mask)
500 {
501 	struct udphdr *uh;
502 	__be16 src, dst;
503 	int err;
504 
505 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
506 				  sizeof(struct udphdr));
507 	if (unlikely(err))
508 		return err;
509 
510 	uh = udp_hdr(skb);
511 	/* Either of the masks is non-zero, so do not bother checking them. */
512 	src = MASKED(uh->source, key->udp_src, mask->udp_src);
513 	dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
514 
515 	if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
516 		if (likely(src != uh->source)) {
517 			set_tp_port(skb, &uh->source, src, &uh->check);
518 			flow_key->tp.src = src;
519 		}
520 		if (likely(dst != uh->dest)) {
521 			set_tp_port(skb, &uh->dest, dst, &uh->check);
522 			flow_key->tp.dst = dst;
523 		}
524 
525 		if (unlikely(!uh->check))
526 			uh->check = CSUM_MANGLED_0;
527 	} else {
528 		uh->source = src;
529 		uh->dest = dst;
530 		flow_key->tp.src = src;
531 		flow_key->tp.dst = dst;
532 	}
533 
534 	skb_clear_hash(skb);
535 
536 	return 0;
537 }
538 
539 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
540 		   const struct ovs_key_tcp *key,
541 		   const struct ovs_key_tcp *mask)
542 {
543 	struct tcphdr *th;
544 	__be16 src, dst;
545 	int err;
546 
547 	err = skb_ensure_writable(skb, skb_transport_offset(skb) +
548 				  sizeof(struct tcphdr));
549 	if (unlikely(err))
550 		return err;
551 
552 	th = tcp_hdr(skb);
553 	src = MASKED(th->source, key->tcp_src, mask->tcp_src);
554 	if (likely(src != th->source)) {
555 		set_tp_port(skb, &th->source, src, &th->check);
556 		flow_key->tp.src = src;
557 	}
558 	dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
559 	if (likely(dst != th->dest)) {
560 		set_tp_port(skb, &th->dest, dst, &th->check);
561 		flow_key->tp.dst = dst;
562 	}
563 	skb_clear_hash(skb);
564 
565 	return 0;
566 }
567 
568 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
569 		    const struct ovs_key_sctp *key,
570 		    const struct ovs_key_sctp *mask)
571 {
572 	unsigned int sctphoff = skb_transport_offset(skb);
573 	struct sctphdr *sh;
574 	__le32 old_correct_csum, new_csum, old_csum;
575 	int err;
576 
577 	err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
578 	if (unlikely(err))
579 		return err;
580 
581 	sh = sctp_hdr(skb);
582 	old_csum = sh->checksum;
583 	old_correct_csum = sctp_compute_cksum(skb, sctphoff);
584 
585 	sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
586 	sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
587 
588 	new_csum = sctp_compute_cksum(skb, sctphoff);
589 
590 	/* Carry any checksum errors through. */
591 	sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
592 
593 	skb_clear_hash(skb);
594 	flow_key->tp.src = sh->source;
595 	flow_key->tp.dst = sh->dest;
596 
597 	return 0;
598 }
599 
600 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
601 {
602 	struct vport *vport = ovs_vport_rcu(dp, out_port);
603 
604 	if (likely(vport))
605 		ovs_vport_send(vport, skb);
606 	else
607 		kfree_skb(skb);
608 }
609 
610 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
611 			    struct sw_flow_key *key, const struct nlattr *attr,
612 			    const struct nlattr *actions, int actions_len)
613 {
614 	struct ip_tunnel_info info;
615 	struct dp_upcall_info upcall;
616 	const struct nlattr *a;
617 	int rem;
618 
619 	memset(&upcall, 0, sizeof(upcall));
620 	upcall.cmd = OVS_PACKET_CMD_ACTION;
621 
622 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
623 		 a = nla_next(a, &rem)) {
624 		switch (nla_type(a)) {
625 		case OVS_USERSPACE_ATTR_USERDATA:
626 			upcall.userdata = a;
627 			break;
628 
629 		case OVS_USERSPACE_ATTR_PID:
630 			upcall.portid = nla_get_u32(a);
631 			break;
632 
633 		case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
634 			/* Get out tunnel info. */
635 			struct vport *vport;
636 
637 			vport = ovs_vport_rcu(dp, nla_get_u32(a));
638 			if (vport) {
639 				int err;
640 
641 				err = ovs_vport_get_egress_tun_info(vport, skb,
642 								    &info);
643 				if (!err)
644 					upcall.egress_tun_info = &info;
645 			}
646 			break;
647 		}
648 
649 		case OVS_USERSPACE_ATTR_ACTIONS: {
650 			/* Include actions. */
651 			upcall.actions = actions;
652 			upcall.actions_len = actions_len;
653 			break;
654 		}
655 
656 		} /* End of switch. */
657 	}
658 
659 	return ovs_dp_upcall(dp, skb, key, &upcall);
660 }
661 
662 static int sample(struct datapath *dp, struct sk_buff *skb,
663 		  struct sw_flow_key *key, const struct nlattr *attr,
664 		  const struct nlattr *actions, int actions_len)
665 {
666 	const struct nlattr *acts_list = NULL;
667 	const struct nlattr *a;
668 	int rem;
669 
670 	for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
671 		 a = nla_next(a, &rem)) {
672 		switch (nla_type(a)) {
673 		case OVS_SAMPLE_ATTR_PROBABILITY:
674 			if (prandom_u32() >= nla_get_u32(a))
675 				return 0;
676 			break;
677 
678 		case OVS_SAMPLE_ATTR_ACTIONS:
679 			acts_list = a;
680 			break;
681 		}
682 	}
683 
684 	rem = nla_len(acts_list);
685 	a = nla_data(acts_list);
686 
687 	/* Actions list is empty, do nothing */
688 	if (unlikely(!rem))
689 		return 0;
690 
691 	/* The only known usage of sample action is having a single user-space
692 	 * action. Treat this usage as a special case.
693 	 * The output_userspace() should clone the skb to be sent to the
694 	 * user space. This skb will be consumed by its caller.
695 	 */
696 	if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
697 		   nla_is_last(a, rem)))
698 		return output_userspace(dp, skb, key, a, actions, actions_len);
699 
700 	skb = skb_clone(skb, GFP_ATOMIC);
701 	if (!skb)
702 		/* Skip the sample action when out of memory. */
703 		return 0;
704 
705 	if (!add_deferred_actions(skb, key, a)) {
706 		if (net_ratelimit())
707 			pr_warn("%s: deferred actions limit reached, dropping sample action\n",
708 				ovs_dp_name(dp));
709 
710 		kfree_skb(skb);
711 	}
712 	return 0;
713 }
714 
715 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
716 			 const struct nlattr *attr)
717 {
718 	struct ovs_action_hash *hash_act = nla_data(attr);
719 	u32 hash = 0;
720 
721 	/* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
722 	hash = skb_get_hash(skb);
723 	hash = jhash_1word(hash, hash_act->hash_basis);
724 	if (!hash)
725 		hash = 0x1;
726 
727 	key->ovs_flow_hash = hash;
728 }
729 
730 static int execute_set_action(struct sk_buff *skb,
731 			      struct sw_flow_key *flow_key,
732 			      const struct nlattr *a)
733 {
734 	/* Only tunnel set execution is supported without a mask. */
735 	if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
736 		struct ovs_tunnel_info *tun = nla_data(a);
737 
738 		skb_dst_drop(skb);
739 		dst_hold((struct dst_entry *)tun->tun_dst);
740 		skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
741 
742 		/* FIXME: Remove when all vports have been converted */
743 		OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;
744 
745 		return 0;
746 	}
747 
748 	return -EINVAL;
749 }
750 
751 /* Mask is at the midpoint of the data. */
752 #define get_mask(a, type) ((const type)nla_data(a) + 1)
753 
754 static int execute_masked_set_action(struct sk_buff *skb,
755 				     struct sw_flow_key *flow_key,
756 				     const struct nlattr *a)
757 {
758 	int err = 0;
759 
760 	switch (nla_type(a)) {
761 	case OVS_KEY_ATTR_PRIORITY:
762 		SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
763 		flow_key->phy.priority = skb->priority;
764 		break;
765 
766 	case OVS_KEY_ATTR_SKB_MARK:
767 		SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
768 		flow_key->phy.skb_mark = skb->mark;
769 		break;
770 
771 	case OVS_KEY_ATTR_TUNNEL_INFO:
772 		/* Masked data not supported for tunnel. */
773 		err = -EINVAL;
774 		break;
775 
776 	case OVS_KEY_ATTR_ETHERNET:
777 		err = set_eth_addr(skb, flow_key, nla_data(a),
778 				   get_mask(a, struct ovs_key_ethernet *));
779 		break;
780 
781 	case OVS_KEY_ATTR_IPV4:
782 		err = set_ipv4(skb, flow_key, nla_data(a),
783 			       get_mask(a, struct ovs_key_ipv4 *));
784 		break;
785 
786 	case OVS_KEY_ATTR_IPV6:
787 		err = set_ipv6(skb, flow_key, nla_data(a),
788 			       get_mask(a, struct ovs_key_ipv6 *));
789 		break;
790 
791 	case OVS_KEY_ATTR_TCP:
792 		err = set_tcp(skb, flow_key, nla_data(a),
793 			      get_mask(a, struct ovs_key_tcp *));
794 		break;
795 
796 	case OVS_KEY_ATTR_UDP:
797 		err = set_udp(skb, flow_key, nla_data(a),
798 			      get_mask(a, struct ovs_key_udp *));
799 		break;
800 
801 	case OVS_KEY_ATTR_SCTP:
802 		err = set_sctp(skb, flow_key, nla_data(a),
803 			       get_mask(a, struct ovs_key_sctp *));
804 		break;
805 
806 	case OVS_KEY_ATTR_MPLS:
807 		err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
808 								    __be32 *));
809 		break;
810 	}
811 
812 	return err;
813 }
814 
815 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
816 			  struct sw_flow_key *key,
817 			  const struct nlattr *a, int rem)
818 {
819 	struct deferred_action *da;
820 
821 	if (!is_flow_key_valid(key)) {
822 		int err;
823 
824 		err = ovs_flow_key_update(skb, key);
825 		if (err)
826 			return err;
827 	}
828 	BUG_ON(!is_flow_key_valid(key));
829 
830 	if (!nla_is_last(a, rem)) {
831 		/* Recirc action is the not the last action
832 		 * of the action list, need to clone the skb.
833 		 */
834 		skb = skb_clone(skb, GFP_ATOMIC);
835 
836 		/* Skip the recirc action when out of memory, but
837 		 * continue on with the rest of the action list.
838 		 */
839 		if (!skb)
840 			return 0;
841 	}
842 
843 	da = add_deferred_actions(skb, key, NULL);
844 	if (da) {
845 		da->pkt_key.recirc_id = nla_get_u32(a);
846 	} else {
847 		kfree_skb(skb);
848 
849 		if (net_ratelimit())
850 			pr_warn("%s: deferred action limit reached, drop recirc action\n",
851 				ovs_dp_name(dp));
852 	}
853 
854 	return 0;
855 }
856 
857 /* Execute a list of actions against 'skb'. */
858 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
859 			      struct sw_flow_key *key,
860 			      const struct nlattr *attr, int len)
861 {
862 	/* Every output action needs a separate clone of 'skb', but the common
863 	 * case is just a single output action, so that doing a clone and
864 	 * then freeing the original skbuff is wasteful.  So the following code
865 	 * is slightly obscure just to avoid that.
866 	 */
867 	int prev_port = -1;
868 	const struct nlattr *a;
869 	int rem;
870 
871 	for (a = attr, rem = len; rem > 0;
872 	     a = nla_next(a, &rem)) {
873 		int err = 0;
874 
875 		if (unlikely(prev_port != -1)) {
876 			struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
877 
878 			if (out_skb)
879 				do_output(dp, out_skb, prev_port);
880 
881 			prev_port = -1;
882 		}
883 
884 		switch (nla_type(a)) {
885 		case OVS_ACTION_ATTR_OUTPUT:
886 			prev_port = nla_get_u32(a);
887 			break;
888 
889 		case OVS_ACTION_ATTR_USERSPACE:
890 			output_userspace(dp, skb, key, a, attr, len);
891 			break;
892 
893 		case OVS_ACTION_ATTR_HASH:
894 			execute_hash(skb, key, a);
895 			break;
896 
897 		case OVS_ACTION_ATTR_PUSH_MPLS:
898 			err = push_mpls(skb, key, nla_data(a));
899 			break;
900 
901 		case OVS_ACTION_ATTR_POP_MPLS:
902 			err = pop_mpls(skb, key, nla_get_be16(a));
903 			break;
904 
905 		case OVS_ACTION_ATTR_PUSH_VLAN:
906 			err = push_vlan(skb, key, nla_data(a));
907 			break;
908 
909 		case OVS_ACTION_ATTR_POP_VLAN:
910 			err = pop_vlan(skb, key);
911 			break;
912 
913 		case OVS_ACTION_ATTR_RECIRC:
914 			err = execute_recirc(dp, skb, key, a, rem);
915 			if (nla_is_last(a, rem)) {
916 				/* If this is the last action, the skb has
917 				 * been consumed or freed.
918 				 * Return immediately.
919 				 */
920 				return err;
921 			}
922 			break;
923 
924 		case OVS_ACTION_ATTR_SET:
925 			err = execute_set_action(skb, key, nla_data(a));
926 			break;
927 
928 		case OVS_ACTION_ATTR_SET_MASKED:
929 		case OVS_ACTION_ATTR_SET_TO_MASKED:
930 			err = execute_masked_set_action(skb, key, nla_data(a));
931 			break;
932 
933 		case OVS_ACTION_ATTR_SAMPLE:
934 			err = sample(dp, skb, key, a, attr, len);
935 			break;
936 		}
937 
938 		if (unlikely(err)) {
939 			kfree_skb(skb);
940 			return err;
941 		}
942 	}
943 
944 	if (prev_port != -1)
945 		do_output(dp, skb, prev_port);
946 	else
947 		consume_skb(skb);
948 
949 	return 0;
950 }
951 
952 static void process_deferred_actions(struct datapath *dp)
953 {
954 	struct action_fifo *fifo = this_cpu_ptr(action_fifos);
955 
956 	/* Do not touch the FIFO in case there is no deferred actions. */
957 	if (action_fifo_is_empty(fifo))
958 		return;
959 
960 	/* Finishing executing all deferred actions. */
961 	do {
962 		struct deferred_action *da = action_fifo_get(fifo);
963 		struct sk_buff *skb = da->skb;
964 		struct sw_flow_key *key = &da->pkt_key;
965 		const struct nlattr *actions = da->actions;
966 
967 		if (actions)
968 			do_execute_actions(dp, skb, key, actions,
969 					   nla_len(actions));
970 		else
971 			ovs_dp_process_packet(skb, key);
972 	} while (!action_fifo_is_empty(fifo));
973 
974 	/* Reset FIFO for the next packet.  */
975 	action_fifo_init(fifo);
976 }
977 
978 /* Execute a list of actions against 'skb'. */
979 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
980 			const struct sw_flow_actions *acts,
981 			struct sw_flow_key *key)
982 {
983 	int level = this_cpu_read(exec_actions_level);
984 	int err;
985 
986 	this_cpu_inc(exec_actions_level);
987 	OVS_CB(skb)->egress_tun_info = NULL;
988 	err = do_execute_actions(dp, skb, key,
989 				 acts->actions, acts->actions_len);
990 
991 	if (!level)
992 		process_deferred_actions(dp);
993 
994 	this_cpu_dec(exec_actions_level);
995 	return err;
996 }
997 
998 int action_fifos_init(void)
999 {
1000 	action_fifos = alloc_percpu(struct action_fifo);
1001 	if (!action_fifos)
1002 		return -ENOMEM;
1003 
1004 	return 0;
1005 }
1006 
1007 void action_fifos_exit(void)
1008 {
1009 	free_percpu(action_fifos);
1010 }
1011