xref: /openbmc/linux/net/core/flow_dissector.c (revision 31eeb6b0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/skbuff.h>
4 #include <linux/export.h>
5 #include <linux/ip.h>
6 #include <linux/ipv6.h>
7 #include <linux/if_vlan.h>
8 #include <linux/filter.h>
9 #include <net/dsa.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip.h>
12 #include <net/ipv6.h>
13 #include <net/gre.h>
14 #include <net/pptp.h>
15 #include <net/tipc.h>
16 #include <linux/igmp.h>
17 #include <linux/icmp.h>
18 #include <linux/sctp.h>
19 #include <linux/dccp.h>
20 #include <linux/if_tunnel.h>
21 #include <linux/if_pppox.h>
22 #include <linux/ppp_defs.h>
23 #include <linux/stddef.h>
24 #include <linux/if_ether.h>
25 #include <linux/mpls.h>
26 #include <linux/tcp.h>
27 #include <linux/ptp_classify.h>
28 #include <net/flow_dissector.h>
29 #include <scsi/fc/fc_fcoe.h>
30 #include <uapi/linux/batadv_packet.h>
31 #include <linux/bpf.h>
32 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
33 #include <net/netfilter/nf_conntrack_core.h>
34 #include <net/netfilter/nf_conntrack_labels.h>
35 #endif
36 #include <linux/bpf-netns.h>
37 
38 static void dissector_set_key(struct flow_dissector *flow_dissector,
39 			      enum flow_dissector_key_id key_id)
40 {
41 	flow_dissector->used_keys |= (1 << key_id);
42 }
43 
44 void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
45 			     const struct flow_dissector_key *key,
46 			     unsigned int key_count)
47 {
48 	unsigned int i;
49 
50 	memset(flow_dissector, 0, sizeof(*flow_dissector));
51 
52 	for (i = 0; i < key_count; i++, key++) {
53 		/* User should make sure that every key target offset is within
54 		 * boundaries of unsigned short.
55 		 */
56 		BUG_ON(key->offset > USHRT_MAX);
57 		BUG_ON(dissector_uses_key(flow_dissector,
58 					  key->key_id));
59 
60 		dissector_set_key(flow_dissector, key->key_id);
61 		flow_dissector->offset[key->key_id] = key->offset;
62 	}
63 
64 	/* Ensure that the dissector always includes control and basic key.
65 	 * That way we are able to avoid handling lack of these in fast path.
66 	 */
67 	BUG_ON(!dissector_uses_key(flow_dissector,
68 				   FLOW_DISSECTOR_KEY_CONTROL));
69 	BUG_ON(!dissector_uses_key(flow_dissector,
70 				   FLOW_DISSECTOR_KEY_BASIC));
71 }
72 EXPORT_SYMBOL(skb_flow_dissector_init);
73 
74 #ifdef CONFIG_BPF_SYSCALL
75 int flow_dissector_bpf_prog_attach_check(struct net *net,
76 					 struct bpf_prog *prog)
77 {
78 	enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
79 
80 	if (net == &init_net) {
81 		/* BPF flow dissector in the root namespace overrides
82 		 * any per-net-namespace one. When attaching to root,
83 		 * make sure we don't have any BPF program attached
84 		 * to the non-root namespaces.
85 		 */
86 		struct net *ns;
87 
88 		for_each_net(ns) {
89 			if (ns == &init_net)
90 				continue;
91 			if (rcu_access_pointer(ns->bpf.run_array[type]))
92 				return -EEXIST;
93 		}
94 	} else {
95 		/* Make sure root flow dissector is not attached
96 		 * when attaching to the non-root namespace.
97 		 */
98 		if (rcu_access_pointer(init_net.bpf.run_array[type]))
99 			return -EEXIST;
100 	}
101 
102 	return 0;
103 }
104 #endif /* CONFIG_BPF_SYSCALL */
105 
106 /**
107  * __skb_flow_get_ports - extract the upper layer ports and return them
108  * @skb: sk_buff to extract the ports from
109  * @thoff: transport header offset
110  * @ip_proto: protocol for which to get port offset
111  * @data: raw buffer pointer to the packet, if NULL use skb->data
112  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
113  *
114  * The function will try to retrieve the ports at offset thoff + poff where poff
115  * is the protocol port offset returned from proto_ports_offset
116  */
117 __be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
118 			    const void *data, int hlen)
119 {
120 	int poff = proto_ports_offset(ip_proto);
121 
122 	if (!data) {
123 		data = skb->data;
124 		hlen = skb_headlen(skb);
125 	}
126 
127 	if (poff >= 0) {
128 		__be32 *ports, _ports;
129 
130 		ports = __skb_header_pointer(skb, thoff + poff,
131 					     sizeof(_ports), data, hlen, &_ports);
132 		if (ports)
133 			return *ports;
134 	}
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL(__skb_flow_get_ports);
139 
140 static bool icmp_has_id(u8 type)
141 {
142 	switch (type) {
143 	case ICMP_ECHO:
144 	case ICMP_ECHOREPLY:
145 	case ICMP_TIMESTAMP:
146 	case ICMP_TIMESTAMPREPLY:
147 	case ICMPV6_ECHO_REQUEST:
148 	case ICMPV6_ECHO_REPLY:
149 		return true;
150 	}
151 
152 	return false;
153 }
154 
155 /**
156  * skb_flow_get_icmp_tci - extract ICMP(6) Type, Code and Identifier fields
157  * @skb: sk_buff to extract from
158  * @key_icmp: struct flow_dissector_key_icmp to fill
159  * @data: raw buffer pointer to the packet
160  * @thoff: offset to extract at
161  * @hlen: packet header length
162  */
163 void skb_flow_get_icmp_tci(const struct sk_buff *skb,
164 			   struct flow_dissector_key_icmp *key_icmp,
165 			   const void *data, int thoff, int hlen)
166 {
167 	struct icmphdr *ih, _ih;
168 
169 	ih = __skb_header_pointer(skb, thoff, sizeof(_ih), data, hlen, &_ih);
170 	if (!ih)
171 		return;
172 
173 	key_icmp->type = ih->type;
174 	key_icmp->code = ih->code;
175 
176 	/* As we use 0 to signal that the Id field is not present,
177 	 * avoid confusion with packets without such field
178 	 */
179 	if (icmp_has_id(ih->type))
180 		key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
181 	else
182 		key_icmp->id = 0;
183 }
184 EXPORT_SYMBOL(skb_flow_get_icmp_tci);
185 
186 /* If FLOW_DISSECTOR_KEY_ICMP is set, dissect an ICMP packet
187  * using skb_flow_get_icmp_tci().
188  */
189 static void __skb_flow_dissect_icmp(const struct sk_buff *skb,
190 				    struct flow_dissector *flow_dissector,
191 				    void *target_container, const void *data,
192 				    int thoff, int hlen)
193 {
194 	struct flow_dissector_key_icmp *key_icmp;
195 
196 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ICMP))
197 		return;
198 
199 	key_icmp = skb_flow_dissector_target(flow_dissector,
200 					     FLOW_DISSECTOR_KEY_ICMP,
201 					     target_container);
202 
203 	skb_flow_get_icmp_tci(skb, key_icmp, data, thoff, hlen);
204 }
205 
206 void skb_flow_dissect_meta(const struct sk_buff *skb,
207 			   struct flow_dissector *flow_dissector,
208 			   void *target_container)
209 {
210 	struct flow_dissector_key_meta *meta;
211 
212 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_META))
213 		return;
214 
215 	meta = skb_flow_dissector_target(flow_dissector,
216 					 FLOW_DISSECTOR_KEY_META,
217 					 target_container);
218 	meta->ingress_ifindex = skb->skb_iif;
219 }
220 EXPORT_SYMBOL(skb_flow_dissect_meta);
221 
222 static void
223 skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type,
224 				   struct flow_dissector *flow_dissector,
225 				   void *target_container)
226 {
227 	struct flow_dissector_key_control *ctrl;
228 
229 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL))
230 		return;
231 
232 	ctrl = skb_flow_dissector_target(flow_dissector,
233 					 FLOW_DISSECTOR_KEY_ENC_CONTROL,
234 					 target_container);
235 	ctrl->addr_type = type;
236 }
237 
238 void
239 skb_flow_dissect_ct(const struct sk_buff *skb,
240 		    struct flow_dissector *flow_dissector,
241 		    void *target_container, u16 *ctinfo_map,
242 		    size_t mapsize, bool post_ct, u16 zone)
243 {
244 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
245 	struct flow_dissector_key_ct *key;
246 	enum ip_conntrack_info ctinfo;
247 	struct nf_conn_labels *cl;
248 	struct nf_conn *ct;
249 
250 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_CT))
251 		return;
252 
253 	ct = nf_ct_get(skb, &ctinfo);
254 	if (!ct && !post_ct)
255 		return;
256 
257 	key = skb_flow_dissector_target(flow_dissector,
258 					FLOW_DISSECTOR_KEY_CT,
259 					target_container);
260 
261 	if (!ct) {
262 		key->ct_state = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
263 				TCA_FLOWER_KEY_CT_FLAGS_INVALID;
264 		key->ct_zone = zone;
265 		return;
266 	}
267 
268 	if (ctinfo < mapsize)
269 		key->ct_state = ctinfo_map[ctinfo];
270 #if IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)
271 	key->ct_zone = ct->zone.id;
272 #endif
273 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
274 	key->ct_mark = ct->mark;
275 #endif
276 
277 	cl = nf_ct_labels_find(ct);
278 	if (cl)
279 		memcpy(key->ct_labels, cl->bits, sizeof(key->ct_labels));
280 #endif /* CONFIG_NF_CONNTRACK */
281 }
282 EXPORT_SYMBOL(skb_flow_dissect_ct);
283 
284 void
285 skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
286 			     struct flow_dissector *flow_dissector,
287 			     void *target_container)
288 {
289 	struct ip_tunnel_info *info;
290 	struct ip_tunnel_key *key;
291 
292 	/* A quick check to see if there might be something to do. */
293 	if (!dissector_uses_key(flow_dissector,
294 				FLOW_DISSECTOR_KEY_ENC_KEYID) &&
295 	    !dissector_uses_key(flow_dissector,
296 				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) &&
297 	    !dissector_uses_key(flow_dissector,
298 				FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) &&
299 	    !dissector_uses_key(flow_dissector,
300 				FLOW_DISSECTOR_KEY_ENC_CONTROL) &&
301 	    !dissector_uses_key(flow_dissector,
302 				FLOW_DISSECTOR_KEY_ENC_PORTS) &&
303 	    !dissector_uses_key(flow_dissector,
304 				FLOW_DISSECTOR_KEY_ENC_IP) &&
305 	    !dissector_uses_key(flow_dissector,
306 				FLOW_DISSECTOR_KEY_ENC_OPTS))
307 		return;
308 
309 	info = skb_tunnel_info(skb);
310 	if (!info)
311 		return;
312 
313 	key = &info->key;
314 
315 	switch (ip_tunnel_info_af(info)) {
316 	case AF_INET:
317 		skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS,
318 						   flow_dissector,
319 						   target_container);
320 		if (dissector_uses_key(flow_dissector,
321 				       FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
322 			struct flow_dissector_key_ipv4_addrs *ipv4;
323 
324 			ipv4 = skb_flow_dissector_target(flow_dissector,
325 							 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
326 							 target_container);
327 			ipv4->src = key->u.ipv4.src;
328 			ipv4->dst = key->u.ipv4.dst;
329 		}
330 		break;
331 	case AF_INET6:
332 		skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS,
333 						   flow_dissector,
334 						   target_container);
335 		if (dissector_uses_key(flow_dissector,
336 				       FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
337 			struct flow_dissector_key_ipv6_addrs *ipv6;
338 
339 			ipv6 = skb_flow_dissector_target(flow_dissector,
340 							 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
341 							 target_container);
342 			ipv6->src = key->u.ipv6.src;
343 			ipv6->dst = key->u.ipv6.dst;
344 		}
345 		break;
346 	}
347 
348 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
349 		struct flow_dissector_key_keyid *keyid;
350 
351 		keyid = skb_flow_dissector_target(flow_dissector,
352 						  FLOW_DISSECTOR_KEY_ENC_KEYID,
353 						  target_container);
354 		keyid->keyid = tunnel_id_to_key32(key->tun_id);
355 	}
356 
357 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
358 		struct flow_dissector_key_ports *tp;
359 
360 		tp = skb_flow_dissector_target(flow_dissector,
361 					       FLOW_DISSECTOR_KEY_ENC_PORTS,
362 					       target_container);
363 		tp->src = key->tp_src;
364 		tp->dst = key->tp_dst;
365 	}
366 
367 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
368 		struct flow_dissector_key_ip *ip;
369 
370 		ip = skb_flow_dissector_target(flow_dissector,
371 					       FLOW_DISSECTOR_KEY_ENC_IP,
372 					       target_container);
373 		ip->tos = key->tos;
374 		ip->ttl = key->ttl;
375 	}
376 
377 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
378 		struct flow_dissector_key_enc_opts *enc_opt;
379 
380 		enc_opt = skb_flow_dissector_target(flow_dissector,
381 						    FLOW_DISSECTOR_KEY_ENC_OPTS,
382 						    target_container);
383 
384 		if (info->options_len) {
385 			enc_opt->len = info->options_len;
386 			ip_tunnel_info_opts_get(enc_opt->data, info);
387 			enc_opt->dst_opt_type = info->key.tun_flags &
388 						TUNNEL_OPTIONS_PRESENT;
389 		}
390 	}
391 }
392 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
393 
394 void skb_flow_dissect_hash(const struct sk_buff *skb,
395 			   struct flow_dissector *flow_dissector,
396 			   void *target_container)
397 {
398 	struct flow_dissector_key_hash *key;
399 
400 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH))
401 		return;
402 
403 	key = skb_flow_dissector_target(flow_dissector,
404 					FLOW_DISSECTOR_KEY_HASH,
405 					target_container);
406 
407 	key->hash = skb_get_hash_raw(skb);
408 }
409 EXPORT_SYMBOL(skb_flow_dissect_hash);
410 
411 static enum flow_dissect_ret
412 __skb_flow_dissect_mpls(const struct sk_buff *skb,
413 			struct flow_dissector *flow_dissector,
414 			void *target_container, const void *data, int nhoff,
415 			int hlen, int lse_index, bool *entropy_label)
416 {
417 	struct mpls_label *hdr, _hdr;
418 	u32 entry, label, bos;
419 
420 	if (!dissector_uses_key(flow_dissector,
421 				FLOW_DISSECTOR_KEY_MPLS_ENTROPY) &&
422 	    !dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS))
423 		return FLOW_DISSECT_RET_OUT_GOOD;
424 
425 	if (lse_index >= FLOW_DIS_MPLS_MAX)
426 		return FLOW_DISSECT_RET_OUT_GOOD;
427 
428 	hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
429 				   hlen, &_hdr);
430 	if (!hdr)
431 		return FLOW_DISSECT_RET_OUT_BAD;
432 
433 	entry = ntohl(hdr->entry);
434 	label = (entry & MPLS_LS_LABEL_MASK) >> MPLS_LS_LABEL_SHIFT;
435 	bos = (entry & MPLS_LS_S_MASK) >> MPLS_LS_S_SHIFT;
436 
437 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_MPLS)) {
438 		struct flow_dissector_key_mpls *key_mpls;
439 		struct flow_dissector_mpls_lse *lse;
440 
441 		key_mpls = skb_flow_dissector_target(flow_dissector,
442 						     FLOW_DISSECTOR_KEY_MPLS,
443 						     target_container);
444 		lse = &key_mpls->ls[lse_index];
445 
446 		lse->mpls_ttl = (entry & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
447 		lse->mpls_bos = bos;
448 		lse->mpls_tc = (entry & MPLS_LS_TC_MASK) >> MPLS_LS_TC_SHIFT;
449 		lse->mpls_label = label;
450 		dissector_set_mpls_lse(key_mpls, lse_index);
451 	}
452 
453 	if (*entropy_label &&
454 	    dissector_uses_key(flow_dissector,
455 			       FLOW_DISSECTOR_KEY_MPLS_ENTROPY)) {
456 		struct flow_dissector_key_keyid *key_keyid;
457 
458 		key_keyid = skb_flow_dissector_target(flow_dissector,
459 						      FLOW_DISSECTOR_KEY_MPLS_ENTROPY,
460 						      target_container);
461 		key_keyid->keyid = cpu_to_be32(label);
462 	}
463 
464 	*entropy_label = label == MPLS_LABEL_ENTROPY;
465 
466 	return bos ? FLOW_DISSECT_RET_OUT_GOOD : FLOW_DISSECT_RET_PROTO_AGAIN;
467 }
468 
469 static enum flow_dissect_ret
470 __skb_flow_dissect_arp(const struct sk_buff *skb,
471 		       struct flow_dissector *flow_dissector,
472 		       void *target_container, const void *data,
473 		       int nhoff, int hlen)
474 {
475 	struct flow_dissector_key_arp *key_arp;
476 	struct {
477 		unsigned char ar_sha[ETH_ALEN];
478 		unsigned char ar_sip[4];
479 		unsigned char ar_tha[ETH_ALEN];
480 		unsigned char ar_tip[4];
481 	} *arp_eth, _arp_eth;
482 	const struct arphdr *arp;
483 	struct arphdr _arp;
484 
485 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ARP))
486 		return FLOW_DISSECT_RET_OUT_GOOD;
487 
488 	arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
489 				   hlen, &_arp);
490 	if (!arp)
491 		return FLOW_DISSECT_RET_OUT_BAD;
492 
493 	if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
494 	    arp->ar_pro != htons(ETH_P_IP) ||
495 	    arp->ar_hln != ETH_ALEN ||
496 	    arp->ar_pln != 4 ||
497 	    (arp->ar_op != htons(ARPOP_REPLY) &&
498 	     arp->ar_op != htons(ARPOP_REQUEST)))
499 		return FLOW_DISSECT_RET_OUT_BAD;
500 
501 	arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
502 				       sizeof(_arp_eth), data,
503 				       hlen, &_arp_eth);
504 	if (!arp_eth)
505 		return FLOW_DISSECT_RET_OUT_BAD;
506 
507 	key_arp = skb_flow_dissector_target(flow_dissector,
508 					    FLOW_DISSECTOR_KEY_ARP,
509 					    target_container);
510 
511 	memcpy(&key_arp->sip, arp_eth->ar_sip, sizeof(key_arp->sip));
512 	memcpy(&key_arp->tip, arp_eth->ar_tip, sizeof(key_arp->tip));
513 
514 	/* Only store the lower byte of the opcode;
515 	 * this covers ARPOP_REPLY and ARPOP_REQUEST.
516 	 */
517 	key_arp->op = ntohs(arp->ar_op) & 0xff;
518 
519 	ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
520 	ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
521 
522 	return FLOW_DISSECT_RET_OUT_GOOD;
523 }
524 
525 static enum flow_dissect_ret
526 __skb_flow_dissect_gre(const struct sk_buff *skb,
527 		       struct flow_dissector_key_control *key_control,
528 		       struct flow_dissector *flow_dissector,
529 		       void *target_container, const void *data,
530 		       __be16 *p_proto, int *p_nhoff, int *p_hlen,
531 		       unsigned int flags)
532 {
533 	struct flow_dissector_key_keyid *key_keyid;
534 	struct gre_base_hdr *hdr, _hdr;
535 	int offset = 0;
536 	u16 gre_ver;
537 
538 	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr),
539 				   data, *p_hlen, &_hdr);
540 	if (!hdr)
541 		return FLOW_DISSECT_RET_OUT_BAD;
542 
543 	/* Only look inside GRE without routing */
544 	if (hdr->flags & GRE_ROUTING)
545 		return FLOW_DISSECT_RET_OUT_GOOD;
546 
547 	/* Only look inside GRE for version 0 and 1 */
548 	gre_ver = ntohs(hdr->flags & GRE_VERSION);
549 	if (gre_ver > 1)
550 		return FLOW_DISSECT_RET_OUT_GOOD;
551 
552 	*p_proto = hdr->protocol;
553 	if (gre_ver) {
554 		/* Version1 must be PPTP, and check the flags */
555 		if (!(*p_proto == GRE_PROTO_PPP && (hdr->flags & GRE_KEY)))
556 			return FLOW_DISSECT_RET_OUT_GOOD;
557 	}
558 
559 	offset += sizeof(struct gre_base_hdr);
560 
561 	if (hdr->flags & GRE_CSUM)
562 		offset += sizeof_field(struct gre_full_hdr, csum) +
563 			  sizeof_field(struct gre_full_hdr, reserved1);
564 
565 	if (hdr->flags & GRE_KEY) {
566 		const __be32 *keyid;
567 		__be32 _keyid;
568 
569 		keyid = __skb_header_pointer(skb, *p_nhoff + offset,
570 					     sizeof(_keyid),
571 					     data, *p_hlen, &_keyid);
572 		if (!keyid)
573 			return FLOW_DISSECT_RET_OUT_BAD;
574 
575 		if (dissector_uses_key(flow_dissector,
576 				       FLOW_DISSECTOR_KEY_GRE_KEYID)) {
577 			key_keyid = skb_flow_dissector_target(flow_dissector,
578 							      FLOW_DISSECTOR_KEY_GRE_KEYID,
579 							      target_container);
580 			if (gre_ver == 0)
581 				key_keyid->keyid = *keyid;
582 			else
583 				key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK;
584 		}
585 		offset += sizeof_field(struct gre_full_hdr, key);
586 	}
587 
588 	if (hdr->flags & GRE_SEQ)
589 		offset += sizeof_field(struct pptp_gre_header, seq);
590 
591 	if (gre_ver == 0) {
592 		if (*p_proto == htons(ETH_P_TEB)) {
593 			const struct ethhdr *eth;
594 			struct ethhdr _eth;
595 
596 			eth = __skb_header_pointer(skb, *p_nhoff + offset,
597 						   sizeof(_eth),
598 						   data, *p_hlen, &_eth);
599 			if (!eth)
600 				return FLOW_DISSECT_RET_OUT_BAD;
601 			*p_proto = eth->h_proto;
602 			offset += sizeof(*eth);
603 
604 			/* Cap headers that we access via pointers at the
605 			 * end of the Ethernet header as our maximum alignment
606 			 * at that point is only 2 bytes.
607 			 */
608 			if (NET_IP_ALIGN)
609 				*p_hlen = *p_nhoff + offset;
610 		}
611 	} else { /* version 1, must be PPTP */
612 		u8 _ppp_hdr[PPP_HDRLEN];
613 		u8 *ppp_hdr;
614 
615 		if (hdr->flags & GRE_ACK)
616 			offset += sizeof_field(struct pptp_gre_header, ack);
617 
618 		ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset,
619 					       sizeof(_ppp_hdr),
620 					       data, *p_hlen, _ppp_hdr);
621 		if (!ppp_hdr)
622 			return FLOW_DISSECT_RET_OUT_BAD;
623 
624 		switch (PPP_PROTOCOL(ppp_hdr)) {
625 		case PPP_IP:
626 			*p_proto = htons(ETH_P_IP);
627 			break;
628 		case PPP_IPV6:
629 			*p_proto = htons(ETH_P_IPV6);
630 			break;
631 		default:
632 			/* Could probably catch some more like MPLS */
633 			break;
634 		}
635 
636 		offset += PPP_HDRLEN;
637 	}
638 
639 	*p_nhoff += offset;
640 	key_control->flags |= FLOW_DIS_ENCAPSULATION;
641 	if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
642 		return FLOW_DISSECT_RET_OUT_GOOD;
643 
644 	return FLOW_DISSECT_RET_PROTO_AGAIN;
645 }
646 
647 /**
648  * __skb_flow_dissect_batadv() - dissect batman-adv header
649  * @skb: sk_buff to with the batman-adv header
650  * @key_control: flow dissectors control key
651  * @data: raw buffer pointer to the packet, if NULL use skb->data
652  * @p_proto: pointer used to update the protocol to process next
653  * @p_nhoff: pointer used to update inner network header offset
654  * @hlen: packet header length
655  * @flags: any combination of FLOW_DISSECTOR_F_*
656  *
657  * ETH_P_BATMAN packets are tried to be dissected. Only
658  * &struct batadv_unicast packets are actually processed because they contain an
659  * inner ethernet header and are usually followed by actual network header. This
660  * allows the flow dissector to continue processing the packet.
661  *
662  * Return: FLOW_DISSECT_RET_PROTO_AGAIN when &struct batadv_unicast was found,
663  *  FLOW_DISSECT_RET_OUT_GOOD when dissector should stop after encapsulation,
664  *  otherwise FLOW_DISSECT_RET_OUT_BAD
665  */
666 static enum flow_dissect_ret
667 __skb_flow_dissect_batadv(const struct sk_buff *skb,
668 			  struct flow_dissector_key_control *key_control,
669 			  const void *data, __be16 *p_proto, int *p_nhoff,
670 			  int hlen, unsigned int flags)
671 {
672 	struct {
673 		struct batadv_unicast_packet batadv_unicast;
674 		struct ethhdr eth;
675 	} *hdr, _hdr;
676 
677 	hdr = __skb_header_pointer(skb, *p_nhoff, sizeof(_hdr), data, hlen,
678 				   &_hdr);
679 	if (!hdr)
680 		return FLOW_DISSECT_RET_OUT_BAD;
681 
682 	if (hdr->batadv_unicast.version != BATADV_COMPAT_VERSION)
683 		return FLOW_DISSECT_RET_OUT_BAD;
684 
685 	if (hdr->batadv_unicast.packet_type != BATADV_UNICAST)
686 		return FLOW_DISSECT_RET_OUT_BAD;
687 
688 	*p_proto = hdr->eth.h_proto;
689 	*p_nhoff += sizeof(*hdr);
690 
691 	key_control->flags |= FLOW_DIS_ENCAPSULATION;
692 	if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP)
693 		return FLOW_DISSECT_RET_OUT_GOOD;
694 
695 	return FLOW_DISSECT_RET_PROTO_AGAIN;
696 }
697 
698 static void
699 __skb_flow_dissect_tcp(const struct sk_buff *skb,
700 		       struct flow_dissector *flow_dissector,
701 		       void *target_container, const void *data,
702 		       int thoff, int hlen)
703 {
704 	struct flow_dissector_key_tcp *key_tcp;
705 	struct tcphdr *th, _th;
706 
707 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_TCP))
708 		return;
709 
710 	th = __skb_header_pointer(skb, thoff, sizeof(_th), data, hlen, &_th);
711 	if (!th)
712 		return;
713 
714 	if (unlikely(__tcp_hdrlen(th) < sizeof(_th)))
715 		return;
716 
717 	key_tcp = skb_flow_dissector_target(flow_dissector,
718 					    FLOW_DISSECTOR_KEY_TCP,
719 					    target_container);
720 	key_tcp->flags = (*(__be16 *) &tcp_flag_word(th) & htons(0x0FFF));
721 }
722 
723 static void
724 __skb_flow_dissect_ports(const struct sk_buff *skb,
725 			 struct flow_dissector *flow_dissector,
726 			 void *target_container, const void *data,
727 			 int nhoff, u8 ip_proto, int hlen)
728 {
729 	enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX;
730 	struct flow_dissector_key_ports *key_ports;
731 
732 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
733 		dissector_ports = FLOW_DISSECTOR_KEY_PORTS;
734 	else if (dissector_uses_key(flow_dissector,
735 				    FLOW_DISSECTOR_KEY_PORTS_RANGE))
736 		dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE;
737 
738 	if (dissector_ports == FLOW_DISSECTOR_KEY_MAX)
739 		return;
740 
741 	key_ports = skb_flow_dissector_target(flow_dissector,
742 					      dissector_ports,
743 					      target_container);
744 	key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto,
745 						data, hlen);
746 }
747 
748 static void
749 __skb_flow_dissect_ipv4(const struct sk_buff *skb,
750 			struct flow_dissector *flow_dissector,
751 			void *target_container, const void *data,
752 			const struct iphdr *iph)
753 {
754 	struct flow_dissector_key_ip *key_ip;
755 
756 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
757 		return;
758 
759 	key_ip = skb_flow_dissector_target(flow_dissector,
760 					   FLOW_DISSECTOR_KEY_IP,
761 					   target_container);
762 	key_ip->tos = iph->tos;
763 	key_ip->ttl = iph->ttl;
764 }
765 
766 static void
767 __skb_flow_dissect_ipv6(const struct sk_buff *skb,
768 			struct flow_dissector *flow_dissector,
769 			void *target_container, const void *data,
770 			const struct ipv6hdr *iph)
771 {
772 	struct flow_dissector_key_ip *key_ip;
773 
774 	if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IP))
775 		return;
776 
777 	key_ip = skb_flow_dissector_target(flow_dissector,
778 					   FLOW_DISSECTOR_KEY_IP,
779 					   target_container);
780 	key_ip->tos = ipv6_get_dsfield(iph);
781 	key_ip->ttl = iph->hop_limit;
782 }
783 
784 /* Maximum number of protocol headers that can be parsed in
785  * __skb_flow_dissect
786  */
787 #define MAX_FLOW_DISSECT_HDRS	15
788 
789 static bool skb_flow_dissect_allowed(int *num_hdrs)
790 {
791 	++*num_hdrs;
792 
793 	return (*num_hdrs <= MAX_FLOW_DISSECT_HDRS);
794 }
795 
796 static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys,
797 				     struct flow_dissector *flow_dissector,
798 				     void *target_container)
799 {
800 	struct flow_dissector_key_ports *key_ports = NULL;
801 	struct flow_dissector_key_control *key_control;
802 	struct flow_dissector_key_basic *key_basic;
803 	struct flow_dissector_key_addrs *key_addrs;
804 	struct flow_dissector_key_tags *key_tags;
805 
806 	key_control = skb_flow_dissector_target(flow_dissector,
807 						FLOW_DISSECTOR_KEY_CONTROL,
808 						target_container);
809 	key_control->thoff = flow_keys->thoff;
810 	if (flow_keys->is_frag)
811 		key_control->flags |= FLOW_DIS_IS_FRAGMENT;
812 	if (flow_keys->is_first_frag)
813 		key_control->flags |= FLOW_DIS_FIRST_FRAG;
814 	if (flow_keys->is_encap)
815 		key_control->flags |= FLOW_DIS_ENCAPSULATION;
816 
817 	key_basic = skb_flow_dissector_target(flow_dissector,
818 					      FLOW_DISSECTOR_KEY_BASIC,
819 					      target_container);
820 	key_basic->n_proto = flow_keys->n_proto;
821 	key_basic->ip_proto = flow_keys->ip_proto;
822 
823 	if (flow_keys->addr_proto == ETH_P_IP &&
824 	    dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
825 		key_addrs = skb_flow_dissector_target(flow_dissector,
826 						      FLOW_DISSECTOR_KEY_IPV4_ADDRS,
827 						      target_container);
828 		key_addrs->v4addrs.src = flow_keys->ipv4_src;
829 		key_addrs->v4addrs.dst = flow_keys->ipv4_dst;
830 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
831 	} else if (flow_keys->addr_proto == ETH_P_IPV6 &&
832 		   dissector_uses_key(flow_dissector,
833 				      FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
834 		key_addrs = skb_flow_dissector_target(flow_dissector,
835 						      FLOW_DISSECTOR_KEY_IPV6_ADDRS,
836 						      target_container);
837 		memcpy(&key_addrs->v6addrs.src, &flow_keys->ipv6_src,
838 		       sizeof(key_addrs->v6addrs.src));
839 		memcpy(&key_addrs->v6addrs.dst, &flow_keys->ipv6_dst,
840 		       sizeof(key_addrs->v6addrs.dst));
841 		key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
842 	}
843 
844 	if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS))
845 		key_ports = skb_flow_dissector_target(flow_dissector,
846 						      FLOW_DISSECTOR_KEY_PORTS,
847 						      target_container);
848 	else if (dissector_uses_key(flow_dissector,
849 				    FLOW_DISSECTOR_KEY_PORTS_RANGE))
850 		key_ports = skb_flow_dissector_target(flow_dissector,
851 						      FLOW_DISSECTOR_KEY_PORTS_RANGE,
852 						      target_container);
853 
854 	if (key_ports) {
855 		key_ports->src = flow_keys->sport;
856 		key_ports->dst = flow_keys->dport;
857 	}
858 
859 	if (dissector_uses_key(flow_dissector,
860 			       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
861 		key_tags = skb_flow_dissector_target(flow_dissector,
862 						     FLOW_DISSECTOR_KEY_FLOW_LABEL,
863 						     target_container);
864 		key_tags->flow_label = ntohl(flow_keys->flow_label);
865 	}
866 }
867 
868 bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
869 		      __be16 proto, int nhoff, int hlen, unsigned int flags)
870 {
871 	struct bpf_flow_keys *flow_keys = ctx->flow_keys;
872 	u32 result;
873 
874 	/* Pass parameters to the BPF program */
875 	memset(flow_keys, 0, sizeof(*flow_keys));
876 	flow_keys->n_proto = proto;
877 	flow_keys->nhoff = nhoff;
878 	flow_keys->thoff = flow_keys->nhoff;
879 
880 	BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG !=
881 		     (int)FLOW_DISSECTOR_F_PARSE_1ST_FRAG);
882 	BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL !=
883 		     (int)FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
884 	BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP !=
885 		     (int)FLOW_DISSECTOR_F_STOP_AT_ENCAP);
886 	flow_keys->flags = flags;
887 
888 	result = bpf_prog_run_pin_on_cpu(prog, ctx);
889 
890 	flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, nhoff, hlen);
891 	flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
892 				   flow_keys->nhoff, hlen);
893 
894 	return result == BPF_OK;
895 }
896 
897 /**
898  * __skb_flow_dissect - extract the flow_keys struct and return it
899  * @net: associated network namespace, derived from @skb if NULL
900  * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
901  * @flow_dissector: list of keys to dissect
902  * @target_container: target structure to put dissected values into
903  * @data: raw buffer pointer to the packet, if NULL use skb->data
904  * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
905  * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
906  * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
907  * @flags: flags that control the dissection process, e.g.
908  *         FLOW_DISSECTOR_F_STOP_AT_ENCAP.
909  *
910  * The function will try to retrieve individual keys into target specified
911  * by flow_dissector from either the skbuff or a raw buffer specified by the
912  * rest parameters.
913  *
914  * Caller must take care of zeroing target container memory.
915  */
916 bool __skb_flow_dissect(const struct net *net,
917 			const struct sk_buff *skb,
918 			struct flow_dissector *flow_dissector,
919 			void *target_container, const void *data,
920 			__be16 proto, int nhoff, int hlen, unsigned int flags)
921 {
922 	struct flow_dissector_key_control *key_control;
923 	struct flow_dissector_key_basic *key_basic;
924 	struct flow_dissector_key_addrs *key_addrs;
925 	struct flow_dissector_key_tags *key_tags;
926 	struct flow_dissector_key_vlan *key_vlan;
927 	enum flow_dissect_ret fdret;
928 	enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
929 	bool mpls_el = false;
930 	int mpls_lse = 0;
931 	int num_hdrs = 0;
932 	u8 ip_proto = 0;
933 	bool ret;
934 
935 	if (!data) {
936 		data = skb->data;
937 		proto = skb_vlan_tag_present(skb) ?
938 			 skb->vlan_proto : skb->protocol;
939 		nhoff = skb_network_offset(skb);
940 		hlen = skb_headlen(skb);
941 #if IS_ENABLED(CONFIG_NET_DSA)
942 		if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
943 			     proto == htons(ETH_P_XDSA))) {
944 			const struct dsa_device_ops *ops;
945 			int offset = 0;
946 
947 			ops = skb->dev->dsa_ptr->tag_ops;
948 			/* Only DSA header taggers break flow dissection */
949 			if (ops->needed_headroom) {
950 				if (ops->flow_dissect)
951 					ops->flow_dissect(skb, &proto, &offset);
952 				else
953 					dsa_tag_generic_flow_dissect(skb,
954 								     &proto,
955 								     &offset);
956 				hlen -= offset;
957 				nhoff += offset;
958 			}
959 		}
960 #endif
961 	}
962 
963 	/* It is ensured by skb_flow_dissector_init() that control key will
964 	 * be always present.
965 	 */
966 	key_control = skb_flow_dissector_target(flow_dissector,
967 						FLOW_DISSECTOR_KEY_CONTROL,
968 						target_container);
969 
970 	/* It is ensured by skb_flow_dissector_init() that basic key will
971 	 * be always present.
972 	 */
973 	key_basic = skb_flow_dissector_target(flow_dissector,
974 					      FLOW_DISSECTOR_KEY_BASIC,
975 					      target_container);
976 
977 	if (skb) {
978 		if (!net) {
979 			if (skb->dev)
980 				net = dev_net(skb->dev);
981 			else if (skb->sk)
982 				net = sock_net(skb->sk);
983 		}
984 	}
985 
986 	WARN_ON_ONCE(!net);
987 	if (net) {
988 		enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
989 		struct bpf_prog_array *run_array;
990 
991 		rcu_read_lock();
992 		run_array = rcu_dereference(init_net.bpf.run_array[type]);
993 		if (!run_array)
994 			run_array = rcu_dereference(net->bpf.run_array[type]);
995 
996 		if (run_array) {
997 			struct bpf_flow_keys flow_keys;
998 			struct bpf_flow_dissector ctx = {
999 				.flow_keys = &flow_keys,
1000 				.data = data,
1001 				.data_end = data + hlen,
1002 			};
1003 			__be16 n_proto = proto;
1004 			struct bpf_prog *prog;
1005 
1006 			if (skb) {
1007 				ctx.skb = skb;
1008 				/* we can't use 'proto' in the skb case
1009 				 * because it might be set to skb->vlan_proto
1010 				 * which has been pulled from the data
1011 				 */
1012 				n_proto = skb->protocol;
1013 			}
1014 
1015 			prog = READ_ONCE(run_array->items[0].prog);
1016 			ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
1017 					       hlen, flags);
1018 			__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
1019 						 target_container);
1020 			rcu_read_unlock();
1021 			return ret;
1022 		}
1023 		rcu_read_unlock();
1024 	}
1025 
1026 	if (dissector_uses_key(flow_dissector,
1027 			       FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1028 		struct ethhdr *eth = eth_hdr(skb);
1029 		struct flow_dissector_key_eth_addrs *key_eth_addrs;
1030 
1031 		key_eth_addrs = skb_flow_dissector_target(flow_dissector,
1032 							  FLOW_DISSECTOR_KEY_ETH_ADDRS,
1033 							  target_container);
1034 		memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
1035 	}
1036 
1037 proto_again:
1038 	fdret = FLOW_DISSECT_RET_CONTINUE;
1039 
1040 	switch (proto) {
1041 	case htons(ETH_P_IP): {
1042 		const struct iphdr *iph;
1043 		struct iphdr _iph;
1044 
1045 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
1046 		if (!iph || iph->ihl < 5) {
1047 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1048 			break;
1049 		}
1050 
1051 		nhoff += iph->ihl * 4;
1052 
1053 		ip_proto = iph->protocol;
1054 
1055 		if (dissector_uses_key(flow_dissector,
1056 				       FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1057 			key_addrs = skb_flow_dissector_target(flow_dissector,
1058 							      FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1059 							      target_container);
1060 
1061 			memcpy(&key_addrs->v4addrs.src, &iph->saddr,
1062 			       sizeof(key_addrs->v4addrs.src));
1063 			memcpy(&key_addrs->v4addrs.dst, &iph->daddr,
1064 			       sizeof(key_addrs->v4addrs.dst));
1065 			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1066 		}
1067 
1068 		__skb_flow_dissect_ipv4(skb, flow_dissector,
1069 					target_container, data, iph);
1070 
1071 		if (ip_is_fragment(iph)) {
1072 			key_control->flags |= FLOW_DIS_IS_FRAGMENT;
1073 
1074 			if (iph->frag_off & htons(IP_OFFSET)) {
1075 				fdret = FLOW_DISSECT_RET_OUT_GOOD;
1076 				break;
1077 			} else {
1078 				key_control->flags |= FLOW_DIS_FIRST_FRAG;
1079 				if (!(flags &
1080 				      FLOW_DISSECTOR_F_PARSE_1ST_FRAG)) {
1081 					fdret = FLOW_DISSECT_RET_OUT_GOOD;
1082 					break;
1083 				}
1084 			}
1085 		}
1086 
1087 		break;
1088 	}
1089 	case htons(ETH_P_IPV6): {
1090 		const struct ipv6hdr *iph;
1091 		struct ipv6hdr _iph;
1092 
1093 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
1094 		if (!iph) {
1095 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1096 			break;
1097 		}
1098 
1099 		ip_proto = iph->nexthdr;
1100 		nhoff += sizeof(struct ipv6hdr);
1101 
1102 		if (dissector_uses_key(flow_dissector,
1103 				       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1104 			key_addrs = skb_flow_dissector_target(flow_dissector,
1105 							      FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1106 							      target_container);
1107 
1108 			memcpy(&key_addrs->v6addrs.src, &iph->saddr,
1109 			       sizeof(key_addrs->v6addrs.src));
1110 			memcpy(&key_addrs->v6addrs.dst, &iph->daddr,
1111 			       sizeof(key_addrs->v6addrs.dst));
1112 			key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1113 		}
1114 
1115 		if ((dissector_uses_key(flow_dissector,
1116 					FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
1117 		     (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
1118 		    ip6_flowlabel(iph)) {
1119 			__be32 flow_label = ip6_flowlabel(iph);
1120 
1121 			if (dissector_uses_key(flow_dissector,
1122 					       FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
1123 				key_tags = skb_flow_dissector_target(flow_dissector,
1124 								     FLOW_DISSECTOR_KEY_FLOW_LABEL,
1125 								     target_container);
1126 				key_tags->flow_label = ntohl(flow_label);
1127 			}
1128 			if (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) {
1129 				fdret = FLOW_DISSECT_RET_OUT_GOOD;
1130 				break;
1131 			}
1132 		}
1133 
1134 		__skb_flow_dissect_ipv6(skb, flow_dissector,
1135 					target_container, data, iph);
1136 
1137 		break;
1138 	}
1139 	case htons(ETH_P_8021AD):
1140 	case htons(ETH_P_8021Q): {
1141 		const struct vlan_hdr *vlan = NULL;
1142 		struct vlan_hdr _vlan;
1143 		__be16 saved_vlan_tpid = proto;
1144 
1145 		if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX &&
1146 		    skb && skb_vlan_tag_present(skb)) {
1147 			proto = skb->protocol;
1148 		} else {
1149 			vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan),
1150 						    data, hlen, &_vlan);
1151 			if (!vlan) {
1152 				fdret = FLOW_DISSECT_RET_OUT_BAD;
1153 				break;
1154 			}
1155 
1156 			proto = vlan->h_vlan_encapsulated_proto;
1157 			nhoff += sizeof(*vlan);
1158 		}
1159 
1160 		if (dissector_vlan == FLOW_DISSECTOR_KEY_MAX) {
1161 			dissector_vlan = FLOW_DISSECTOR_KEY_VLAN;
1162 		} else if (dissector_vlan == FLOW_DISSECTOR_KEY_VLAN) {
1163 			dissector_vlan = FLOW_DISSECTOR_KEY_CVLAN;
1164 		} else {
1165 			fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1166 			break;
1167 		}
1168 
1169 		if (dissector_uses_key(flow_dissector, dissector_vlan)) {
1170 			key_vlan = skb_flow_dissector_target(flow_dissector,
1171 							     dissector_vlan,
1172 							     target_container);
1173 
1174 			if (!vlan) {
1175 				key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
1176 				key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
1177 			} else {
1178 				key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
1179 					VLAN_VID_MASK;
1180 				key_vlan->vlan_priority =
1181 					(ntohs(vlan->h_vlan_TCI) &
1182 					 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1183 			}
1184 			key_vlan->vlan_tpid = saved_vlan_tpid;
1185 		}
1186 
1187 		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1188 		break;
1189 	}
1190 	case htons(ETH_P_PPP_SES): {
1191 		struct {
1192 			struct pppoe_hdr hdr;
1193 			__be16 proto;
1194 		} *hdr, _hdr;
1195 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
1196 		if (!hdr) {
1197 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1198 			break;
1199 		}
1200 
1201 		nhoff += PPPOE_SES_HLEN;
1202 		switch (hdr->proto) {
1203 		case htons(PPP_IP):
1204 			proto = htons(ETH_P_IP);
1205 			fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1206 			break;
1207 		case htons(PPP_IPV6):
1208 			proto = htons(ETH_P_IPV6);
1209 			fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1210 			break;
1211 		default:
1212 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1213 			break;
1214 		}
1215 		break;
1216 	}
1217 	case htons(ETH_P_TIPC): {
1218 		struct tipc_basic_hdr *hdr, _hdr;
1219 
1220 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr),
1221 					   data, hlen, &_hdr);
1222 		if (!hdr) {
1223 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1224 			break;
1225 		}
1226 
1227 		if (dissector_uses_key(flow_dissector,
1228 				       FLOW_DISSECTOR_KEY_TIPC)) {
1229 			key_addrs = skb_flow_dissector_target(flow_dissector,
1230 							      FLOW_DISSECTOR_KEY_TIPC,
1231 							      target_container);
1232 			key_addrs->tipckey.key = tipc_hdr_rps_key(hdr);
1233 			key_control->addr_type = FLOW_DISSECTOR_KEY_TIPC;
1234 		}
1235 		fdret = FLOW_DISSECT_RET_OUT_GOOD;
1236 		break;
1237 	}
1238 
1239 	case htons(ETH_P_MPLS_UC):
1240 	case htons(ETH_P_MPLS_MC):
1241 		fdret = __skb_flow_dissect_mpls(skb, flow_dissector,
1242 						target_container, data,
1243 						nhoff, hlen, mpls_lse,
1244 						&mpls_el);
1245 		nhoff += sizeof(struct mpls_label);
1246 		mpls_lse++;
1247 		break;
1248 	case htons(ETH_P_FCOE):
1249 		if ((hlen - nhoff) < FCOE_HEADER_LEN) {
1250 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1251 			break;
1252 		}
1253 
1254 		nhoff += FCOE_HEADER_LEN;
1255 		fdret = FLOW_DISSECT_RET_OUT_GOOD;
1256 		break;
1257 
1258 	case htons(ETH_P_ARP):
1259 	case htons(ETH_P_RARP):
1260 		fdret = __skb_flow_dissect_arp(skb, flow_dissector,
1261 					       target_container, data,
1262 					       nhoff, hlen);
1263 		break;
1264 
1265 	case htons(ETH_P_BATMAN):
1266 		fdret = __skb_flow_dissect_batadv(skb, key_control, data,
1267 						  &proto, &nhoff, hlen, flags);
1268 		break;
1269 
1270 	case htons(ETH_P_1588): {
1271 		struct ptp_header *hdr, _hdr;
1272 
1273 		hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data,
1274 					   hlen, &_hdr);
1275 		if (!hdr) {
1276 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1277 			break;
1278 		}
1279 
1280 		nhoff += ntohs(hdr->message_length);
1281 		fdret = FLOW_DISSECT_RET_OUT_GOOD;
1282 		break;
1283 	}
1284 
1285 	default:
1286 		fdret = FLOW_DISSECT_RET_OUT_BAD;
1287 		break;
1288 	}
1289 
1290 	/* Process result of proto processing */
1291 	switch (fdret) {
1292 	case FLOW_DISSECT_RET_OUT_GOOD:
1293 		goto out_good;
1294 	case FLOW_DISSECT_RET_PROTO_AGAIN:
1295 		if (skb_flow_dissect_allowed(&num_hdrs))
1296 			goto proto_again;
1297 		goto out_good;
1298 	case FLOW_DISSECT_RET_CONTINUE:
1299 	case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1300 		break;
1301 	case FLOW_DISSECT_RET_OUT_BAD:
1302 	default:
1303 		goto out_bad;
1304 	}
1305 
1306 ip_proto_again:
1307 	fdret = FLOW_DISSECT_RET_CONTINUE;
1308 
1309 	switch (ip_proto) {
1310 	case IPPROTO_GRE:
1311 		if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
1312 			fdret = FLOW_DISSECT_RET_OUT_GOOD;
1313 			break;
1314 		}
1315 
1316 		fdret = __skb_flow_dissect_gre(skb, key_control, flow_dissector,
1317 					       target_container, data,
1318 					       &proto, &nhoff, &hlen, flags);
1319 		break;
1320 
1321 	case NEXTHDR_HOP:
1322 	case NEXTHDR_ROUTING:
1323 	case NEXTHDR_DEST: {
1324 		u8 _opthdr[2], *opthdr;
1325 
1326 		if (proto != htons(ETH_P_IPV6))
1327 			break;
1328 
1329 		opthdr = __skb_header_pointer(skb, nhoff, sizeof(_opthdr),
1330 					      data, hlen, &_opthdr);
1331 		if (!opthdr) {
1332 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1333 			break;
1334 		}
1335 
1336 		ip_proto = opthdr[0];
1337 		nhoff += (opthdr[1] + 1) << 3;
1338 
1339 		fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1340 		break;
1341 	}
1342 	case NEXTHDR_FRAGMENT: {
1343 		struct frag_hdr _fh, *fh;
1344 
1345 		if (proto != htons(ETH_P_IPV6))
1346 			break;
1347 
1348 		fh = __skb_header_pointer(skb, nhoff, sizeof(_fh),
1349 					  data, hlen, &_fh);
1350 
1351 		if (!fh) {
1352 			fdret = FLOW_DISSECT_RET_OUT_BAD;
1353 			break;
1354 		}
1355 
1356 		key_control->flags |= FLOW_DIS_IS_FRAGMENT;
1357 
1358 		nhoff += sizeof(_fh);
1359 		ip_proto = fh->nexthdr;
1360 
1361 		if (!(fh->frag_off & htons(IP6_OFFSET))) {
1362 			key_control->flags |= FLOW_DIS_FIRST_FRAG;
1363 			if (flags & FLOW_DISSECTOR_F_PARSE_1ST_FRAG) {
1364 				fdret = FLOW_DISSECT_RET_IPPROTO_AGAIN;
1365 				break;
1366 			}
1367 		}
1368 
1369 		fdret = FLOW_DISSECT_RET_OUT_GOOD;
1370 		break;
1371 	}
1372 	case IPPROTO_IPIP:
1373 		if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
1374 			fdret = FLOW_DISSECT_RET_OUT_GOOD;
1375 			break;
1376 		}
1377 
1378 		proto = htons(ETH_P_IP);
1379 
1380 		key_control->flags |= FLOW_DIS_ENCAPSULATION;
1381 		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1382 			fdret = FLOW_DISSECT_RET_OUT_GOOD;
1383 			break;
1384 		}
1385 
1386 		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1387 		break;
1388 
1389 	case IPPROTO_IPV6:
1390 		if (flags & FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP) {
1391 			fdret = FLOW_DISSECT_RET_OUT_GOOD;
1392 			break;
1393 		}
1394 
1395 		proto = htons(ETH_P_IPV6);
1396 
1397 		key_control->flags |= FLOW_DIS_ENCAPSULATION;
1398 		if (flags & FLOW_DISSECTOR_F_STOP_AT_ENCAP) {
1399 			fdret = FLOW_DISSECT_RET_OUT_GOOD;
1400 			break;
1401 		}
1402 
1403 		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1404 		break;
1405 
1406 
1407 	case IPPROTO_MPLS:
1408 		proto = htons(ETH_P_MPLS_UC);
1409 		fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
1410 		break;
1411 
1412 	case IPPROTO_TCP:
1413 		__skb_flow_dissect_tcp(skb, flow_dissector, target_container,
1414 				       data, nhoff, hlen);
1415 		break;
1416 
1417 	case IPPROTO_ICMP:
1418 	case IPPROTO_ICMPV6:
1419 		__skb_flow_dissect_icmp(skb, flow_dissector, target_container,
1420 					data, nhoff, hlen);
1421 		break;
1422 
1423 	default:
1424 		break;
1425 	}
1426 
1427 	if (!(key_control->flags & FLOW_DIS_IS_FRAGMENT))
1428 		__skb_flow_dissect_ports(skb, flow_dissector, target_container,
1429 					 data, nhoff, ip_proto, hlen);
1430 
1431 	/* Process result of IP proto processing */
1432 	switch (fdret) {
1433 	case FLOW_DISSECT_RET_PROTO_AGAIN:
1434 		if (skb_flow_dissect_allowed(&num_hdrs))
1435 			goto proto_again;
1436 		break;
1437 	case FLOW_DISSECT_RET_IPPROTO_AGAIN:
1438 		if (skb_flow_dissect_allowed(&num_hdrs))
1439 			goto ip_proto_again;
1440 		break;
1441 	case FLOW_DISSECT_RET_OUT_GOOD:
1442 	case FLOW_DISSECT_RET_CONTINUE:
1443 		break;
1444 	case FLOW_DISSECT_RET_OUT_BAD:
1445 	default:
1446 		goto out_bad;
1447 	}
1448 
1449 out_good:
1450 	ret = true;
1451 
1452 out:
1453 	key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1454 	key_basic->n_proto = proto;
1455 	key_basic->ip_proto = ip_proto;
1456 
1457 	return ret;
1458 
1459 out_bad:
1460 	ret = false;
1461 	goto out;
1462 }
1463 EXPORT_SYMBOL(__skb_flow_dissect);
1464 
1465 static siphash_aligned_key_t hashrnd;
1466 static __always_inline void __flow_hash_secret_init(void)
1467 {
1468 	net_get_random_once(&hashrnd, sizeof(hashrnd));
1469 }
1470 
1471 static const void *flow_keys_hash_start(const struct flow_keys *flow)
1472 {
1473 	BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
1474 	return &flow->FLOW_KEYS_HASH_START_FIELD;
1475 }
1476 
1477 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
1478 {
1479 	size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
1480 
1481 	BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
1482 
1483 	switch (flow->control.addr_type) {
1484 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1485 		diff -= sizeof(flow->addrs.v4addrs);
1486 		break;
1487 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1488 		diff -= sizeof(flow->addrs.v6addrs);
1489 		break;
1490 	case FLOW_DISSECTOR_KEY_TIPC:
1491 		diff -= sizeof(flow->addrs.tipckey);
1492 		break;
1493 	}
1494 	return sizeof(*flow) - diff;
1495 }
1496 
1497 __be32 flow_get_u32_src(const struct flow_keys *flow)
1498 {
1499 	switch (flow->control.addr_type) {
1500 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1501 		return flow->addrs.v4addrs.src;
1502 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1503 		return (__force __be32)ipv6_addr_hash(
1504 			&flow->addrs.v6addrs.src);
1505 	case FLOW_DISSECTOR_KEY_TIPC:
1506 		return flow->addrs.tipckey.key;
1507 	default:
1508 		return 0;
1509 	}
1510 }
1511 EXPORT_SYMBOL(flow_get_u32_src);
1512 
1513 __be32 flow_get_u32_dst(const struct flow_keys *flow)
1514 {
1515 	switch (flow->control.addr_type) {
1516 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1517 		return flow->addrs.v4addrs.dst;
1518 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1519 		return (__force __be32)ipv6_addr_hash(
1520 			&flow->addrs.v6addrs.dst);
1521 	default:
1522 		return 0;
1523 	}
1524 }
1525 EXPORT_SYMBOL(flow_get_u32_dst);
1526 
1527 /* Sort the source and destination IP and the ports,
1528  * to have consistent hash within the two directions
1529  */
1530 static inline void __flow_hash_consistentify(struct flow_keys *keys)
1531 {
1532 	int addr_diff, i;
1533 
1534 	switch (keys->control.addr_type) {
1535 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1536 		addr_diff = (__force u32)keys->addrs.v4addrs.dst -
1537 			    (__force u32)keys->addrs.v4addrs.src;
1538 		if (addr_diff < 0)
1539 			swap(keys->addrs.v4addrs.src, keys->addrs.v4addrs.dst);
1540 
1541 		if ((__force u16)keys->ports.dst <
1542 		    (__force u16)keys->ports.src) {
1543 			swap(keys->ports.src, keys->ports.dst);
1544 		}
1545 		break;
1546 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1547 		addr_diff = memcmp(&keys->addrs.v6addrs.dst,
1548 				   &keys->addrs.v6addrs.src,
1549 				   sizeof(keys->addrs.v6addrs.dst));
1550 		if (addr_diff < 0) {
1551 			for (i = 0; i < 4; i++)
1552 				swap(keys->addrs.v6addrs.src.s6_addr32[i],
1553 				     keys->addrs.v6addrs.dst.s6_addr32[i]);
1554 		}
1555 		if ((__force u16)keys->ports.dst <
1556 		    (__force u16)keys->ports.src) {
1557 			swap(keys->ports.src, keys->ports.dst);
1558 		}
1559 		break;
1560 	}
1561 }
1562 
1563 static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
1564 					const siphash_key_t *keyval)
1565 {
1566 	u32 hash;
1567 
1568 	__flow_hash_consistentify(keys);
1569 
1570 	hash = siphash(flow_keys_hash_start(keys),
1571 		       flow_keys_hash_length(keys), keyval);
1572 	if (!hash)
1573 		hash = 1;
1574 
1575 	return hash;
1576 }
1577 
1578 u32 flow_hash_from_keys(struct flow_keys *keys)
1579 {
1580 	__flow_hash_secret_init();
1581 	return __flow_hash_from_keys(keys, &hashrnd);
1582 }
1583 EXPORT_SYMBOL(flow_hash_from_keys);
1584 
1585 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
1586 				  struct flow_keys *keys,
1587 				  const siphash_key_t *keyval)
1588 {
1589 	skb_flow_dissect_flow_keys(skb, keys,
1590 				   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1591 
1592 	return __flow_hash_from_keys(keys, keyval);
1593 }
1594 
1595 struct _flow_keys_digest_data {
1596 	__be16	n_proto;
1597 	u8	ip_proto;
1598 	u8	padding;
1599 	__be32	ports;
1600 	__be32	src;
1601 	__be32	dst;
1602 };
1603 
1604 void make_flow_keys_digest(struct flow_keys_digest *digest,
1605 			   const struct flow_keys *flow)
1606 {
1607 	struct _flow_keys_digest_data *data =
1608 	    (struct _flow_keys_digest_data *)digest;
1609 
1610 	BUILD_BUG_ON(sizeof(*data) > sizeof(*digest));
1611 
1612 	memset(digest, 0, sizeof(*digest));
1613 
1614 	data->n_proto = flow->basic.n_proto;
1615 	data->ip_proto = flow->basic.ip_proto;
1616 	data->ports = flow->ports.ports;
1617 	data->src = flow->addrs.v4addrs.src;
1618 	data->dst = flow->addrs.v4addrs.dst;
1619 }
1620 EXPORT_SYMBOL(make_flow_keys_digest);
1621 
1622 static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
1623 
1624 u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
1625 {
1626 	struct flow_keys keys;
1627 
1628 	__flow_hash_secret_init();
1629 
1630 	memset(&keys, 0, sizeof(keys));
1631 	__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
1632 			   &keys, NULL, 0, 0, 0,
1633 			   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
1634 
1635 	return __flow_hash_from_keys(&keys, &hashrnd);
1636 }
1637 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
1638 
1639 /**
1640  * __skb_get_hash: calculate a flow hash
1641  * @skb: sk_buff to calculate flow hash from
1642  *
1643  * This function calculates a flow hash based on src/dst addresses
1644  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
1645  * on success, zero indicates no valid hash.  Also, sets l4_hash in skb
1646  * if hash is a canonical 4-tuple hash over transport ports.
1647  */
1648 void __skb_get_hash(struct sk_buff *skb)
1649 {
1650 	struct flow_keys keys;
1651 	u32 hash;
1652 
1653 	__flow_hash_secret_init();
1654 
1655 	hash = ___skb_get_hash(skb, &keys, &hashrnd);
1656 
1657 	__skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1658 }
1659 EXPORT_SYMBOL(__skb_get_hash);
1660 
1661 __u32 skb_get_hash_perturb(const struct sk_buff *skb,
1662 			   const siphash_key_t *perturb)
1663 {
1664 	struct flow_keys keys;
1665 
1666 	return ___skb_get_hash(skb, &keys, perturb);
1667 }
1668 EXPORT_SYMBOL(skb_get_hash_perturb);
1669 
1670 u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1671 		   const struct flow_keys_basic *keys, int hlen)
1672 {
1673 	u32 poff = keys->control.thoff;
1674 
1675 	/* skip L4 headers for fragments after the first */
1676 	if ((keys->control.flags & FLOW_DIS_IS_FRAGMENT) &&
1677 	    !(keys->control.flags & FLOW_DIS_FIRST_FRAG))
1678 		return poff;
1679 
1680 	switch (keys->basic.ip_proto) {
1681 	case IPPROTO_TCP: {
1682 		/* access doff as u8 to avoid unaligned access */
1683 		const u8 *doff;
1684 		u8 _doff;
1685 
1686 		doff = __skb_header_pointer(skb, poff + 12, sizeof(_doff),
1687 					    data, hlen, &_doff);
1688 		if (!doff)
1689 			return poff;
1690 
1691 		poff += max_t(u32, sizeof(struct tcphdr), (*doff & 0xF0) >> 2);
1692 		break;
1693 	}
1694 	case IPPROTO_UDP:
1695 	case IPPROTO_UDPLITE:
1696 		poff += sizeof(struct udphdr);
1697 		break;
1698 	/* For the rest, we do not really care about header
1699 	 * extensions at this point for now.
1700 	 */
1701 	case IPPROTO_ICMP:
1702 		poff += sizeof(struct icmphdr);
1703 		break;
1704 	case IPPROTO_ICMPV6:
1705 		poff += sizeof(struct icmp6hdr);
1706 		break;
1707 	case IPPROTO_IGMP:
1708 		poff += sizeof(struct igmphdr);
1709 		break;
1710 	case IPPROTO_DCCP:
1711 		poff += sizeof(struct dccp_hdr);
1712 		break;
1713 	case IPPROTO_SCTP:
1714 		poff += sizeof(struct sctphdr);
1715 		break;
1716 	}
1717 
1718 	return poff;
1719 }
1720 
1721 /**
1722  * skb_get_poff - get the offset to the payload
1723  * @skb: sk_buff to get the payload offset from
1724  *
1725  * The function will get the offset to the payload as far as it could
1726  * be dissected.  The main user is currently BPF, so that we can dynamically
1727  * truncate packets without needing to push actual payload to the user
1728  * space and can analyze headers only, instead.
1729  */
1730 u32 skb_get_poff(const struct sk_buff *skb)
1731 {
1732 	struct flow_keys_basic keys;
1733 
1734 	if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
1735 					      NULL, 0, 0, 0, 0))
1736 		return 0;
1737 
1738 	return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
1739 }
1740 
1741 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1742 {
1743 	memset(keys, 0, sizeof(*keys));
1744 
1745 	memcpy(&keys->addrs.v6addrs.src, &fl6->saddr,
1746 	    sizeof(keys->addrs.v6addrs.src));
1747 	memcpy(&keys->addrs.v6addrs.dst, &fl6->daddr,
1748 	    sizeof(keys->addrs.v6addrs.dst));
1749 	keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1750 	keys->ports.src = fl6->fl6_sport;
1751 	keys->ports.dst = fl6->fl6_dport;
1752 	keys->keyid.keyid = fl6->fl6_gre_key;
1753 	keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1754 	keys->basic.ip_proto = fl6->flowi6_proto;
1755 
1756 	return flow_hash_from_keys(keys);
1757 }
1758 EXPORT_SYMBOL(__get_hash_from_flowi6);
1759 
1760 static const struct flow_dissector_key flow_keys_dissector_keys[] = {
1761 	{
1762 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
1763 		.offset = offsetof(struct flow_keys, control),
1764 	},
1765 	{
1766 		.key_id = FLOW_DISSECTOR_KEY_BASIC,
1767 		.offset = offsetof(struct flow_keys, basic),
1768 	},
1769 	{
1770 		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1771 		.offset = offsetof(struct flow_keys, addrs.v4addrs),
1772 	},
1773 	{
1774 		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1775 		.offset = offsetof(struct flow_keys, addrs.v6addrs),
1776 	},
1777 	{
1778 		.key_id = FLOW_DISSECTOR_KEY_TIPC,
1779 		.offset = offsetof(struct flow_keys, addrs.tipckey),
1780 	},
1781 	{
1782 		.key_id = FLOW_DISSECTOR_KEY_PORTS,
1783 		.offset = offsetof(struct flow_keys, ports),
1784 	},
1785 	{
1786 		.key_id = FLOW_DISSECTOR_KEY_VLAN,
1787 		.offset = offsetof(struct flow_keys, vlan),
1788 	},
1789 	{
1790 		.key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
1791 		.offset = offsetof(struct flow_keys, tags),
1792 	},
1793 	{
1794 		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
1795 		.offset = offsetof(struct flow_keys, keyid),
1796 	},
1797 };
1798 
1799 static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
1800 	{
1801 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
1802 		.offset = offsetof(struct flow_keys, control),
1803 	},
1804 	{
1805 		.key_id = FLOW_DISSECTOR_KEY_BASIC,
1806 		.offset = offsetof(struct flow_keys, basic),
1807 	},
1808 	{
1809 		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1810 		.offset = offsetof(struct flow_keys, addrs.v4addrs),
1811 	},
1812 	{
1813 		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1814 		.offset = offsetof(struct flow_keys, addrs.v6addrs),
1815 	},
1816 	{
1817 		.key_id = FLOW_DISSECTOR_KEY_PORTS,
1818 		.offset = offsetof(struct flow_keys, ports),
1819 	},
1820 };
1821 
1822 static const struct flow_dissector_key flow_keys_basic_dissector_keys[] = {
1823 	{
1824 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
1825 		.offset = offsetof(struct flow_keys, control),
1826 	},
1827 	{
1828 		.key_id = FLOW_DISSECTOR_KEY_BASIC,
1829 		.offset = offsetof(struct flow_keys, basic),
1830 	},
1831 };
1832 
1833 struct flow_dissector flow_keys_dissector __read_mostly;
1834 EXPORT_SYMBOL(flow_keys_dissector);
1835 
1836 struct flow_dissector flow_keys_basic_dissector __read_mostly;
1837 EXPORT_SYMBOL(flow_keys_basic_dissector);
1838 
1839 static int __init init_default_flow_dissectors(void)
1840 {
1841 	skb_flow_dissector_init(&flow_keys_dissector,
1842 				flow_keys_dissector_keys,
1843 				ARRAY_SIZE(flow_keys_dissector_keys));
1844 	skb_flow_dissector_init(&flow_keys_dissector_symmetric,
1845 				flow_keys_dissector_symmetric_keys,
1846 				ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
1847 	skb_flow_dissector_init(&flow_keys_basic_dissector,
1848 				flow_keys_basic_dissector_keys,
1849 				ARRAY_SIZE(flow_keys_basic_dissector_keys));
1850 	return 0;
1851 }
1852 core_initcall(init_default_flow_dissectors);
1853