xref: /openbmc/linux/net/openvswitch/flow.c (revision fa5d824ce5dd8306c66f45c34fd78536e6ce2488)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2007-2014 Nicira, Inc.
4  */
5 
6 #include <linux/uaccess.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_ether.h>
10 #include <linux/if_vlan.h>
11 #include <net/llc_pdu.h>
12 #include <linux/kernel.h>
13 #include <linux/jhash.h>
14 #include <linux/jiffies.h>
15 #include <linux/llc.h>
16 #include <linux/module.h>
17 #include <linux/in.h>
18 #include <linux/rcupdate.h>
19 #include <linux/cpumask.h>
20 #include <linux/if_arp.h>
21 #include <linux/ip.h>
22 #include <linux/ipv6.h>
23 #include <linux/mpls.h>
24 #include <linux/sctp.h>
25 #include <linux/smp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/icmp.h>
29 #include <linux/icmpv6.h>
30 #include <linux/rculist.h>
31 #include <net/ip.h>
32 #include <net/ip_tunnels.h>
33 #include <net/ipv6.h>
34 #include <net/mpls.h>
35 #include <net/ndisc.h>
36 #include <net/nsh.h>
37 #include <net/pkt_cls.h>
38 #include <net/netfilter/nf_conntrack_zones.h>
39 
40 #include "conntrack.h"
41 #include "datapath.h"
42 #include "flow.h"
43 #include "flow_netlink.h"
44 #include "vport.h"
45 
46 u64 ovs_flow_used_time(unsigned long flow_jiffies)
47 {
48 	struct timespec64 cur_ts;
49 	u64 cur_ms, idle_ms;
50 
51 	ktime_get_ts64(&cur_ts);
52 	idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
53 	cur_ms = (u64)(u32)cur_ts.tv_sec * MSEC_PER_SEC +
54 		 cur_ts.tv_nsec / NSEC_PER_MSEC;
55 
56 	return cur_ms - idle_ms;
57 }
58 
59 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
60 
61 void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
62 			   const struct sk_buff *skb)
63 {
64 	struct sw_flow_stats *stats;
65 	unsigned int cpu = smp_processor_id();
66 	int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
67 
68 	stats = rcu_dereference(flow->stats[cpu]);
69 
70 	/* Check if already have CPU-specific stats. */
71 	if (likely(stats)) {
72 		spin_lock(&stats->lock);
73 		/* Mark if we write on the pre-allocated stats. */
74 		if (cpu == 0 && unlikely(flow->stats_last_writer != cpu))
75 			flow->stats_last_writer = cpu;
76 	} else {
77 		stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */
78 		spin_lock(&stats->lock);
79 
80 		/* If the current CPU is the only writer on the
81 		 * pre-allocated stats keep using them.
82 		 */
83 		if (unlikely(flow->stats_last_writer != cpu)) {
84 			/* A previous locker may have already allocated the
85 			 * stats, so we need to check again.  If CPU-specific
86 			 * stats were already allocated, we update the pre-
87 			 * allocated stats as we have already locked them.
88 			 */
89 			if (likely(flow->stats_last_writer != -1) &&
90 			    likely(!rcu_access_pointer(flow->stats[cpu]))) {
91 				/* Try to allocate CPU-specific stats. */
92 				struct sw_flow_stats *new_stats;
93 
94 				new_stats =
95 					kmem_cache_alloc_node(flow_stats_cache,
96 							      GFP_NOWAIT |
97 							      __GFP_THISNODE |
98 							      __GFP_NOWARN |
99 							      __GFP_NOMEMALLOC,
100 							      numa_node_id());
101 				if (likely(new_stats)) {
102 					new_stats->used = jiffies;
103 					new_stats->packet_count = 1;
104 					new_stats->byte_count = len;
105 					new_stats->tcp_flags = tcp_flags;
106 					spin_lock_init(&new_stats->lock);
107 
108 					rcu_assign_pointer(flow->stats[cpu],
109 							   new_stats);
110 					cpumask_set_cpu(cpu, &flow->cpu_used_mask);
111 					goto unlock;
112 				}
113 			}
114 			flow->stats_last_writer = cpu;
115 		}
116 	}
117 
118 	stats->used = jiffies;
119 	stats->packet_count++;
120 	stats->byte_count += len;
121 	stats->tcp_flags |= tcp_flags;
122 unlock:
123 	spin_unlock(&stats->lock);
124 }
125 
126 /* Must be called with rcu_read_lock or ovs_mutex. */
127 void ovs_flow_stats_get(const struct sw_flow *flow,
128 			struct ovs_flow_stats *ovs_stats,
129 			unsigned long *used, __be16 *tcp_flags)
130 {
131 	int cpu;
132 
133 	*used = 0;
134 	*tcp_flags = 0;
135 	memset(ovs_stats, 0, sizeof(*ovs_stats));
136 
137 	/* We open code this to make sure cpu 0 is always considered */
138 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
139 		struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
140 
141 		if (stats) {
142 			/* Local CPU may write on non-local stats, so we must
143 			 * block bottom-halves here.
144 			 */
145 			spin_lock_bh(&stats->lock);
146 			if (!*used || time_after(stats->used, *used))
147 				*used = stats->used;
148 			*tcp_flags |= stats->tcp_flags;
149 			ovs_stats->n_packets += stats->packet_count;
150 			ovs_stats->n_bytes += stats->byte_count;
151 			spin_unlock_bh(&stats->lock);
152 		}
153 	}
154 }
155 
156 /* Called with ovs_mutex. */
157 void ovs_flow_stats_clear(struct sw_flow *flow)
158 {
159 	int cpu;
160 
161 	/* We open code this to make sure cpu 0 is always considered */
162 	for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
163 		struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
164 
165 		if (stats) {
166 			spin_lock_bh(&stats->lock);
167 			stats->used = 0;
168 			stats->packet_count = 0;
169 			stats->byte_count = 0;
170 			stats->tcp_flags = 0;
171 			spin_unlock_bh(&stats->lock);
172 		}
173 	}
174 }
175 
176 static int check_header(struct sk_buff *skb, int len)
177 {
178 	if (unlikely(skb->len < len))
179 		return -EINVAL;
180 	if (unlikely(!pskb_may_pull(skb, len)))
181 		return -ENOMEM;
182 	return 0;
183 }
184 
185 static bool arphdr_ok(struct sk_buff *skb)
186 {
187 	return pskb_may_pull(skb, skb_network_offset(skb) +
188 				  sizeof(struct arp_eth_header));
189 }
190 
191 static int check_iphdr(struct sk_buff *skb)
192 {
193 	unsigned int nh_ofs = skb_network_offset(skb);
194 	unsigned int ip_len;
195 	int err;
196 
197 	err = check_header(skb, nh_ofs + sizeof(struct iphdr));
198 	if (unlikely(err))
199 		return err;
200 
201 	ip_len = ip_hdrlen(skb);
202 	if (unlikely(ip_len < sizeof(struct iphdr) ||
203 		     skb->len < nh_ofs + ip_len))
204 		return -EINVAL;
205 
206 	skb_set_transport_header(skb, nh_ofs + ip_len);
207 	return 0;
208 }
209 
210 static bool tcphdr_ok(struct sk_buff *skb)
211 {
212 	int th_ofs = skb_transport_offset(skb);
213 	int tcp_len;
214 
215 	if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
216 		return false;
217 
218 	tcp_len = tcp_hdrlen(skb);
219 	if (unlikely(tcp_len < sizeof(struct tcphdr) ||
220 		     skb->len < th_ofs + tcp_len))
221 		return false;
222 
223 	return true;
224 }
225 
226 static bool udphdr_ok(struct sk_buff *skb)
227 {
228 	return pskb_may_pull(skb, skb_transport_offset(skb) +
229 				  sizeof(struct udphdr));
230 }
231 
232 static bool sctphdr_ok(struct sk_buff *skb)
233 {
234 	return pskb_may_pull(skb, skb_transport_offset(skb) +
235 				  sizeof(struct sctphdr));
236 }
237 
238 static bool icmphdr_ok(struct sk_buff *skb)
239 {
240 	return pskb_may_pull(skb, skb_transport_offset(skb) +
241 				  sizeof(struct icmphdr));
242 }
243 
244 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
245 {
246 	unsigned short frag_off;
247 	unsigned int payload_ofs = 0;
248 	unsigned int nh_ofs = skb_network_offset(skb);
249 	unsigned int nh_len;
250 	struct ipv6hdr *nh;
251 	int err, nexthdr, flags = 0;
252 
253 	err = check_header(skb, nh_ofs + sizeof(*nh));
254 	if (unlikely(err))
255 		return err;
256 
257 	nh = ipv6_hdr(skb);
258 
259 	key->ip.proto = NEXTHDR_NONE;
260 	key->ip.tos = ipv6_get_dsfield(nh);
261 	key->ip.ttl = nh->hop_limit;
262 	key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
263 	key->ipv6.addr.src = nh->saddr;
264 	key->ipv6.addr.dst = nh->daddr;
265 
266 	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
267 	if (flags & IP6_FH_F_FRAG) {
268 		if (frag_off) {
269 			key->ip.frag = OVS_FRAG_TYPE_LATER;
270 			key->ip.proto = nexthdr;
271 			return 0;
272 		}
273 		key->ip.frag = OVS_FRAG_TYPE_FIRST;
274 	} else {
275 		key->ip.frag = OVS_FRAG_TYPE_NONE;
276 	}
277 
278 	/* Delayed handling of error in ipv6_find_hdr() as it
279 	 * always sets flags and frag_off to a valid value which may be
280 	 * used to set key->ip.frag above.
281 	 */
282 	if (unlikely(nexthdr < 0))
283 		return -EPROTO;
284 
285 	nh_len = payload_ofs - nh_ofs;
286 	skb_set_transport_header(skb, nh_ofs + nh_len);
287 	key->ip.proto = nexthdr;
288 	return nh_len;
289 }
290 
291 static bool icmp6hdr_ok(struct sk_buff *skb)
292 {
293 	return pskb_may_pull(skb, skb_transport_offset(skb) +
294 				  sizeof(struct icmp6hdr));
295 }
296 
297 /**
298  * parse_vlan_tag - Parse vlan tag from vlan header.
299  * @skb: skb containing frame to parse
300  * @key_vh: pointer to parsed vlan tag
301  * @untag_vlan: should the vlan header be removed from the frame
302  *
303  * Return: ERROR on memory error.
304  * %0 if it encounters a non-vlan or incomplete packet.
305  * %1 after successfully parsing vlan tag.
306  */
307 static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
308 			  bool untag_vlan)
309 {
310 	struct vlan_head *vh = (struct vlan_head *)skb->data;
311 
312 	if (likely(!eth_type_vlan(vh->tpid)))
313 		return 0;
314 
315 	if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16)))
316 		return 0;
317 
318 	if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) +
319 				 sizeof(__be16))))
320 		return -ENOMEM;
321 
322 	vh = (struct vlan_head *)skb->data;
323 	key_vh->tci = vh->tci | htons(VLAN_CFI_MASK);
324 	key_vh->tpid = vh->tpid;
325 
326 	if (unlikely(untag_vlan)) {
327 		int offset = skb->data - skb_mac_header(skb);
328 		u16 tci;
329 		int err;
330 
331 		__skb_push(skb, offset);
332 		err = __skb_vlan_pop(skb, &tci);
333 		__skb_pull(skb, offset);
334 		if (err)
335 			return err;
336 		__vlan_hwaccel_put_tag(skb, key_vh->tpid, tci);
337 	} else {
338 		__skb_pull(skb, sizeof(struct vlan_head));
339 	}
340 	return 1;
341 }
342 
343 static void clear_vlan(struct sw_flow_key *key)
344 {
345 	key->eth.vlan.tci = 0;
346 	key->eth.vlan.tpid = 0;
347 	key->eth.cvlan.tci = 0;
348 	key->eth.cvlan.tpid = 0;
349 }
350 
351 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
352 {
353 	int res;
354 
355 	if (skb_vlan_tag_present(skb)) {
356 		key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK);
357 		key->eth.vlan.tpid = skb->vlan_proto;
358 	} else {
359 		/* Parse outer vlan tag in the non-accelerated case. */
360 		res = parse_vlan_tag(skb, &key->eth.vlan, true);
361 		if (res <= 0)
362 			return res;
363 	}
364 
365 	/* Parse inner vlan tag. */
366 	res = parse_vlan_tag(skb, &key->eth.cvlan, false);
367 	if (res <= 0)
368 		return res;
369 
370 	return 0;
371 }
372 
373 static __be16 parse_ethertype(struct sk_buff *skb)
374 {
375 	struct llc_snap_hdr {
376 		u8  dsap;  /* Always 0xAA */
377 		u8  ssap;  /* Always 0xAA */
378 		u8  ctrl;
379 		u8  oui[3];
380 		__be16 ethertype;
381 	};
382 	struct llc_snap_hdr *llc;
383 	__be16 proto;
384 
385 	proto = *(__be16 *) skb->data;
386 	__skb_pull(skb, sizeof(__be16));
387 
388 	if (eth_proto_is_802_3(proto))
389 		return proto;
390 
391 	if (skb->len < sizeof(struct llc_snap_hdr))
392 		return htons(ETH_P_802_2);
393 
394 	if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
395 		return htons(0);
396 
397 	llc = (struct llc_snap_hdr *) skb->data;
398 	if (llc->dsap != LLC_SAP_SNAP ||
399 	    llc->ssap != LLC_SAP_SNAP ||
400 	    (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
401 		return htons(ETH_P_802_2);
402 
403 	__skb_pull(skb, sizeof(struct llc_snap_hdr));
404 
405 	if (eth_proto_is_802_3(llc->ethertype))
406 		return llc->ethertype;
407 
408 	return htons(ETH_P_802_2);
409 }
410 
411 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
412 			int nh_len)
413 {
414 	struct icmp6hdr *icmp = icmp6_hdr(skb);
415 
416 	/* The ICMPv6 type and code fields use the 16-bit transport port
417 	 * fields, so we need to store them in 16-bit network byte order.
418 	 */
419 	key->tp.src = htons(icmp->icmp6_type);
420 	key->tp.dst = htons(icmp->icmp6_code);
421 	memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
422 
423 	if (icmp->icmp6_code == 0 &&
424 	    (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
425 	     icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
426 		int icmp_len = skb->len - skb_transport_offset(skb);
427 		struct nd_msg *nd;
428 		int offset;
429 
430 		/* In order to process neighbor discovery options, we need the
431 		 * entire packet.
432 		 */
433 		if (unlikely(icmp_len < sizeof(*nd)))
434 			return 0;
435 
436 		if (unlikely(skb_linearize(skb)))
437 			return -ENOMEM;
438 
439 		nd = (struct nd_msg *)skb_transport_header(skb);
440 		key->ipv6.nd.target = nd->target;
441 
442 		icmp_len -= sizeof(*nd);
443 		offset = 0;
444 		while (icmp_len >= 8) {
445 			struct nd_opt_hdr *nd_opt =
446 				 (struct nd_opt_hdr *)(nd->opt + offset);
447 			int opt_len = nd_opt->nd_opt_len * 8;
448 
449 			if (unlikely(!opt_len || opt_len > icmp_len))
450 				return 0;
451 
452 			/* Store the link layer address if the appropriate
453 			 * option is provided.  It is considered an error if
454 			 * the same link layer option is specified twice.
455 			 */
456 			if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
457 			    && opt_len == 8) {
458 				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
459 					goto invalid;
460 				ether_addr_copy(key->ipv6.nd.sll,
461 						&nd->opt[offset+sizeof(*nd_opt)]);
462 			} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
463 				   && opt_len == 8) {
464 				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
465 					goto invalid;
466 				ether_addr_copy(key->ipv6.nd.tll,
467 						&nd->opt[offset+sizeof(*nd_opt)]);
468 			}
469 
470 			icmp_len -= opt_len;
471 			offset += opt_len;
472 		}
473 	}
474 
475 	return 0;
476 
477 invalid:
478 	memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
479 	memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
480 	memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
481 
482 	return 0;
483 }
484 
485 static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key)
486 {
487 	struct nshhdr *nh;
488 	unsigned int nh_ofs = skb_network_offset(skb);
489 	u8 version, length;
490 	int err;
491 
492 	err = check_header(skb, nh_ofs + NSH_BASE_HDR_LEN);
493 	if (unlikely(err))
494 		return err;
495 
496 	nh = nsh_hdr(skb);
497 	version = nsh_get_ver(nh);
498 	length = nsh_hdr_len(nh);
499 
500 	if (version != 0)
501 		return -EINVAL;
502 
503 	err = check_header(skb, nh_ofs + length);
504 	if (unlikely(err))
505 		return err;
506 
507 	nh = nsh_hdr(skb);
508 	key->nsh.base.flags = nsh_get_flags(nh);
509 	key->nsh.base.ttl = nsh_get_ttl(nh);
510 	key->nsh.base.mdtype = nh->mdtype;
511 	key->nsh.base.np = nh->np;
512 	key->nsh.base.path_hdr = nh->path_hdr;
513 	switch (key->nsh.base.mdtype) {
514 	case NSH_M_TYPE1:
515 		if (length != NSH_M_TYPE1_LEN)
516 			return -EINVAL;
517 		memcpy(key->nsh.context, nh->md1.context,
518 		       sizeof(nh->md1));
519 		break;
520 	case NSH_M_TYPE2:
521 		memset(key->nsh.context, 0,
522 		       sizeof(nh->md1));
523 		break;
524 	default:
525 		return -EINVAL;
526 	}
527 
528 	return 0;
529 }
530 
531 /**
532  * key_extract_l3l4 - extracts L3/L4 header information.
533  * @skb: sk_buff that contains the frame, with skb->data pointing to the
534  *       L3 header
535  * @key: output flow key
536  *
537  * Return: %0 if successful, otherwise a negative errno value.
538  */
539 static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
540 {
541 	int error;
542 
543 	/* Network layer. */
544 	if (key->eth.type == htons(ETH_P_IP)) {
545 		struct iphdr *nh;
546 		__be16 offset;
547 
548 		error = check_iphdr(skb);
549 		if (unlikely(error)) {
550 			memset(&key->ip, 0, sizeof(key->ip));
551 			memset(&key->ipv4, 0, sizeof(key->ipv4));
552 			if (error == -EINVAL) {
553 				skb->transport_header = skb->network_header;
554 				error = 0;
555 			}
556 			return error;
557 		}
558 
559 		nh = ip_hdr(skb);
560 		key->ipv4.addr.src = nh->saddr;
561 		key->ipv4.addr.dst = nh->daddr;
562 
563 		key->ip.proto = nh->protocol;
564 		key->ip.tos = nh->tos;
565 		key->ip.ttl = nh->ttl;
566 
567 		offset = nh->frag_off & htons(IP_OFFSET);
568 		if (offset) {
569 			key->ip.frag = OVS_FRAG_TYPE_LATER;
570 			memset(&key->tp, 0, sizeof(key->tp));
571 			return 0;
572 		}
573 		if (nh->frag_off & htons(IP_MF) ||
574 			skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
575 			key->ip.frag = OVS_FRAG_TYPE_FIRST;
576 		else
577 			key->ip.frag = OVS_FRAG_TYPE_NONE;
578 
579 		/* Transport layer. */
580 		if (key->ip.proto == IPPROTO_TCP) {
581 			if (tcphdr_ok(skb)) {
582 				struct tcphdr *tcp = tcp_hdr(skb);
583 				key->tp.src = tcp->source;
584 				key->tp.dst = tcp->dest;
585 				key->tp.flags = TCP_FLAGS_BE16(tcp);
586 			} else {
587 				memset(&key->tp, 0, sizeof(key->tp));
588 			}
589 
590 		} else if (key->ip.proto == IPPROTO_UDP) {
591 			if (udphdr_ok(skb)) {
592 				struct udphdr *udp = udp_hdr(skb);
593 				key->tp.src = udp->source;
594 				key->tp.dst = udp->dest;
595 			} else {
596 				memset(&key->tp, 0, sizeof(key->tp));
597 			}
598 		} else if (key->ip.proto == IPPROTO_SCTP) {
599 			if (sctphdr_ok(skb)) {
600 				struct sctphdr *sctp = sctp_hdr(skb);
601 				key->tp.src = sctp->source;
602 				key->tp.dst = sctp->dest;
603 			} else {
604 				memset(&key->tp, 0, sizeof(key->tp));
605 			}
606 		} else if (key->ip.proto == IPPROTO_ICMP) {
607 			if (icmphdr_ok(skb)) {
608 				struct icmphdr *icmp = icmp_hdr(skb);
609 				/* The ICMP type and code fields use the 16-bit
610 				 * transport port fields, so we need to store
611 				 * them in 16-bit network byte order. */
612 				key->tp.src = htons(icmp->type);
613 				key->tp.dst = htons(icmp->code);
614 			} else {
615 				memset(&key->tp, 0, sizeof(key->tp));
616 			}
617 		}
618 
619 	} else if (key->eth.type == htons(ETH_P_ARP) ||
620 		   key->eth.type == htons(ETH_P_RARP)) {
621 		struct arp_eth_header *arp;
622 		bool arp_available = arphdr_ok(skb);
623 
624 		arp = (struct arp_eth_header *)skb_network_header(skb);
625 
626 		if (arp_available &&
627 		    arp->ar_hrd == htons(ARPHRD_ETHER) &&
628 		    arp->ar_pro == htons(ETH_P_IP) &&
629 		    arp->ar_hln == ETH_ALEN &&
630 		    arp->ar_pln == 4) {
631 
632 			/* We only match on the lower 8 bits of the opcode. */
633 			if (ntohs(arp->ar_op) <= 0xff)
634 				key->ip.proto = ntohs(arp->ar_op);
635 			else
636 				key->ip.proto = 0;
637 
638 			memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
639 			memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
640 			ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
641 			ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
642 		} else {
643 			memset(&key->ip, 0, sizeof(key->ip));
644 			memset(&key->ipv4, 0, sizeof(key->ipv4));
645 		}
646 	} else if (eth_p_mpls(key->eth.type)) {
647 		u8 label_count = 1;
648 
649 		memset(&key->mpls, 0, sizeof(key->mpls));
650 		skb_set_inner_network_header(skb, skb->mac_len);
651 		while (1) {
652 			__be32 lse;
653 
654 			error = check_header(skb, skb->mac_len +
655 					     label_count * MPLS_HLEN);
656 			if (unlikely(error))
657 				return 0;
658 
659 			memcpy(&lse, skb_inner_network_header(skb), MPLS_HLEN);
660 
661 			if (label_count <= MPLS_LABEL_DEPTH)
662 				memcpy(&key->mpls.lse[label_count - 1], &lse,
663 				       MPLS_HLEN);
664 
665 			skb_set_inner_network_header(skb, skb->mac_len +
666 						     label_count * MPLS_HLEN);
667 			if (lse & htonl(MPLS_LS_S_MASK))
668 				break;
669 
670 			label_count++;
671 		}
672 		if (label_count > MPLS_LABEL_DEPTH)
673 			label_count = MPLS_LABEL_DEPTH;
674 
675 		key->mpls.num_labels_mask = GENMASK(label_count - 1, 0);
676 	} else if (key->eth.type == htons(ETH_P_IPV6)) {
677 		int nh_len;             /* IPv6 Header + Extensions */
678 
679 		nh_len = parse_ipv6hdr(skb, key);
680 		if (unlikely(nh_len < 0)) {
681 			switch (nh_len) {
682 			case -EINVAL:
683 				memset(&key->ip, 0, sizeof(key->ip));
684 				memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
685 				fallthrough;
686 			case -EPROTO:
687 				skb->transport_header = skb->network_header;
688 				error = 0;
689 				break;
690 			default:
691 				error = nh_len;
692 			}
693 			return error;
694 		}
695 
696 		if (key->ip.frag == OVS_FRAG_TYPE_LATER) {
697 			memset(&key->tp, 0, sizeof(key->tp));
698 			return 0;
699 		}
700 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
701 			key->ip.frag = OVS_FRAG_TYPE_FIRST;
702 
703 		/* Transport layer. */
704 		if (key->ip.proto == NEXTHDR_TCP) {
705 			if (tcphdr_ok(skb)) {
706 				struct tcphdr *tcp = tcp_hdr(skb);
707 				key->tp.src = tcp->source;
708 				key->tp.dst = tcp->dest;
709 				key->tp.flags = TCP_FLAGS_BE16(tcp);
710 			} else {
711 				memset(&key->tp, 0, sizeof(key->tp));
712 			}
713 		} else if (key->ip.proto == NEXTHDR_UDP) {
714 			if (udphdr_ok(skb)) {
715 				struct udphdr *udp = udp_hdr(skb);
716 				key->tp.src = udp->source;
717 				key->tp.dst = udp->dest;
718 			} else {
719 				memset(&key->tp, 0, sizeof(key->tp));
720 			}
721 		} else if (key->ip.proto == NEXTHDR_SCTP) {
722 			if (sctphdr_ok(skb)) {
723 				struct sctphdr *sctp = sctp_hdr(skb);
724 				key->tp.src = sctp->source;
725 				key->tp.dst = sctp->dest;
726 			} else {
727 				memset(&key->tp, 0, sizeof(key->tp));
728 			}
729 		} else if (key->ip.proto == NEXTHDR_ICMP) {
730 			if (icmp6hdr_ok(skb)) {
731 				error = parse_icmpv6(skb, key, nh_len);
732 				if (error)
733 					return error;
734 			} else {
735 				memset(&key->tp, 0, sizeof(key->tp));
736 			}
737 		}
738 	} else if (key->eth.type == htons(ETH_P_NSH)) {
739 		error = parse_nsh(skb, key);
740 		if (error)
741 			return error;
742 	}
743 	return 0;
744 }
745 
746 /**
747  * key_extract - extracts a flow key from an Ethernet frame.
748  * @skb: sk_buff that contains the frame, with skb->data pointing to the
749  * Ethernet header
750  * @key: output flow key
751  *
752  * The caller must ensure that skb->len >= ETH_HLEN.
753  *
754  * Initializes @skb header fields as follows:
755  *
756  *    - skb->mac_header: the L2 header.
757  *
758  *    - skb->network_header: just past the L2 header, or just past the
759  *      VLAN header, to the first byte of the L2 payload.
760  *
761  *    - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
762  *      on output, then just past the IP header, if one is present and
763  *      of a correct length, otherwise the same as skb->network_header.
764  *      For other key->eth.type values it is left untouched.
765  *
766  *    - skb->protocol: the type of the data starting at skb->network_header.
767  *      Equals to key->eth.type.
768  *
769  * Return: %0 if successful, otherwise a negative errno value.
770  */
771 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
772 {
773 	struct ethhdr *eth;
774 
775 	/* Flags are always used as part of stats */
776 	key->tp.flags = 0;
777 
778 	skb_reset_mac_header(skb);
779 
780 	/* Link layer. */
781 	clear_vlan(key);
782 	if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) {
783 		if (unlikely(eth_type_vlan(skb->protocol)))
784 			return -EINVAL;
785 
786 		skb_reset_network_header(skb);
787 		key->eth.type = skb->protocol;
788 	} else {
789 		eth = eth_hdr(skb);
790 		ether_addr_copy(key->eth.src, eth->h_source);
791 		ether_addr_copy(key->eth.dst, eth->h_dest);
792 
793 		__skb_pull(skb, 2 * ETH_ALEN);
794 		/* We are going to push all headers that we pull, so no need to
795 		 * update skb->csum here.
796 		 */
797 
798 		if (unlikely(parse_vlan(skb, key)))
799 			return -ENOMEM;
800 
801 		key->eth.type = parse_ethertype(skb);
802 		if (unlikely(key->eth.type == htons(0)))
803 			return -ENOMEM;
804 
805 		/* Multiple tagged packets need to retain TPID to satisfy
806 		 * skb_vlan_pop(), which will later shift the ethertype into
807 		 * skb->protocol.
808 		 */
809 		if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
810 			skb->protocol = key->eth.cvlan.tpid;
811 		else
812 			skb->protocol = key->eth.type;
813 
814 		skb_reset_network_header(skb);
815 		__skb_push(skb, skb->data - skb_mac_header(skb));
816 	}
817 
818 	skb_reset_mac_len(skb);
819 
820 	/* Fill out L3/L4 key info, if any */
821 	return key_extract_l3l4(skb, key);
822 }
823 
824 /* In the case of conntrack fragment handling it expects L3 headers,
825  * add a helper.
826  */
827 int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
828 {
829 	return key_extract_l3l4(skb, key);
830 }
831 
832 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
833 {
834 	int res;
835 
836 	res = key_extract(skb, key);
837 	if (!res)
838 		key->mac_proto &= ~SW_FLOW_KEY_INVALID;
839 
840 	return res;
841 }
842 
843 static int key_extract_mac_proto(struct sk_buff *skb)
844 {
845 	switch (skb->dev->type) {
846 	case ARPHRD_ETHER:
847 		return MAC_PROTO_ETHERNET;
848 	case ARPHRD_NONE:
849 		if (skb->protocol == htons(ETH_P_TEB))
850 			return MAC_PROTO_ETHERNET;
851 		return MAC_PROTO_NONE;
852 	}
853 	WARN_ON_ONCE(1);
854 	return -EINVAL;
855 }
856 
857 int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
858 			 struct sk_buff *skb, struct sw_flow_key *key)
859 {
860 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
861 	struct tc_skb_ext *tc_ext;
862 #endif
863 	bool post_ct = false, post_ct_snat = false, post_ct_dnat = false;
864 	int res, err;
865 	u16 zone = 0;
866 
867 	/* Extract metadata from packet. */
868 	if (tun_info) {
869 		key->tun_proto = ip_tunnel_info_af(tun_info);
870 		memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
871 
872 		if (tun_info->options_len) {
873 			BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
874 						   8)) - 1
875 					> sizeof(key->tun_opts));
876 
877 			ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len),
878 						tun_info);
879 			key->tun_opts_len = tun_info->options_len;
880 		} else {
881 			key->tun_opts_len = 0;
882 		}
883 	} else  {
884 		key->tun_proto = 0;
885 		key->tun_opts_len = 0;
886 		memset(&key->tun_key, 0, sizeof(key->tun_key));
887 	}
888 
889 	key->phy.priority = skb->priority;
890 	key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
891 	key->phy.skb_mark = skb->mark;
892 	key->ovs_flow_hash = 0;
893 	res = key_extract_mac_proto(skb);
894 	if (res < 0)
895 		return res;
896 	key->mac_proto = res;
897 
898 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
899 	if (tc_skb_ext_tc_enabled()) {
900 		tc_ext = skb_ext_find(skb, TC_SKB_EXT);
901 		key->recirc_id = tc_ext ? tc_ext->chain : 0;
902 		OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
903 		post_ct = tc_ext ? tc_ext->post_ct : false;
904 		post_ct_snat = post_ct ? tc_ext->post_ct_snat : false;
905 		post_ct_dnat = post_ct ? tc_ext->post_ct_dnat : false;
906 		zone = post_ct ? tc_ext->zone : 0;
907 	} else {
908 		key->recirc_id = 0;
909 	}
910 #else
911 	key->recirc_id = 0;
912 #endif
913 
914 	err = key_extract(skb, key);
915 	if (!err) {
916 		ovs_ct_fill_key(skb, key, post_ct);   /* Must be after key_extract(). */
917 		if (post_ct) {
918 			if (!skb_get_nfct(skb)) {
919 				key->ct_zone = zone;
920 			} else {
921 				if (!post_ct_dnat)
922 					key->ct_state &= ~OVS_CS_F_DST_NAT;
923 				if (!post_ct_snat)
924 					key->ct_state &= ~OVS_CS_F_SRC_NAT;
925 			}
926 		}
927 	}
928 	return err;
929 }
930 
931 int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
932 				   struct sk_buff *skb,
933 				   struct sw_flow_key *key, bool log)
934 {
935 	const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
936 	u64 attrs = 0;
937 	int err;
938 
939 	err = parse_flow_nlattrs(attr, a, &attrs, log);
940 	if (err)
941 		return -EINVAL;
942 
943 	/* Extract metadata from netlink attributes. */
944 	err = ovs_nla_get_flow_metadata(net, a, attrs, key, log);
945 	if (err)
946 		return err;
947 
948 	/* key_extract assumes that skb->protocol is set-up for
949 	 * layer 3 packets which is the case for other callers,
950 	 * in particular packets received from the network stack.
951 	 * Here the correct value can be set from the metadata
952 	 * extracted above.
953 	 * For L2 packet key eth type would be zero. skb protocol
954 	 * would be set to correct value later during key-extact.
955 	 */
956 
957 	skb->protocol = key->eth.type;
958 	err = key_extract(skb, key);
959 	if (err)
960 		return err;
961 
962 	/* Check that we have conntrack original direction tuple metadata only
963 	 * for packets for which it makes sense.  Otherwise the key may be
964 	 * corrupted due to overlapping key fields.
965 	 */
966 	if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) &&
967 	    key->eth.type != htons(ETH_P_IP))
968 		return -EINVAL;
969 	if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) &&
970 	    (key->eth.type != htons(ETH_P_IPV6) ||
971 	     sw_flow_key_is_nd(key)))
972 		return -EINVAL;
973 
974 	return 0;
975 }
976