xref: /openbmc/linux/net/sched/cls_flower.c (revision cef69974)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/pkt_sched.h>
23 #include <net/ip.h>
24 #include <net/flow_dissector.h>
25 #include <net/geneve.h>
26 #include <net/vxlan.h>
27 #include <net/erspan.h>
28 #include <net/gtp.h>
29 
30 #include <net/dst.h>
31 #include <net/dst_metadata.h>
32 
33 #include <uapi/linux/netfilter/nf_conntrack_common.h>
34 
35 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
36 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
37 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
38 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
39 
40 struct fl_flow_key {
41 	struct flow_dissector_key_meta meta;
42 	struct flow_dissector_key_control control;
43 	struct flow_dissector_key_control enc_control;
44 	struct flow_dissector_key_basic basic;
45 	struct flow_dissector_key_eth_addrs eth;
46 	struct flow_dissector_key_vlan vlan;
47 	struct flow_dissector_key_vlan cvlan;
48 	union {
49 		struct flow_dissector_key_ipv4_addrs ipv4;
50 		struct flow_dissector_key_ipv6_addrs ipv6;
51 	};
52 	struct flow_dissector_key_ports tp;
53 	struct flow_dissector_key_icmp icmp;
54 	struct flow_dissector_key_arp arp;
55 	struct flow_dissector_key_keyid enc_key_id;
56 	union {
57 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
58 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
59 	};
60 	struct flow_dissector_key_ports enc_tp;
61 	struct flow_dissector_key_mpls mpls;
62 	struct flow_dissector_key_tcp tcp;
63 	struct flow_dissector_key_ip ip;
64 	struct flow_dissector_key_ip enc_ip;
65 	struct flow_dissector_key_enc_opts enc_opts;
66 	union {
67 		struct flow_dissector_key_ports tp;
68 		struct {
69 			struct flow_dissector_key_ports tp_min;
70 			struct flow_dissector_key_ports tp_max;
71 		};
72 	} tp_range;
73 	struct flow_dissector_key_ct ct;
74 	struct flow_dissector_key_hash hash;
75 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
76 
77 struct fl_flow_mask_range {
78 	unsigned short int start;
79 	unsigned short int end;
80 };
81 
82 struct fl_flow_mask {
83 	struct fl_flow_key key;
84 	struct fl_flow_mask_range range;
85 	u32 flags;
86 	struct rhash_head ht_node;
87 	struct rhashtable ht;
88 	struct rhashtable_params filter_ht_params;
89 	struct flow_dissector dissector;
90 	struct list_head filters;
91 	struct rcu_work rwork;
92 	struct list_head list;
93 	refcount_t refcnt;
94 };
95 
96 struct fl_flow_tmplt {
97 	struct fl_flow_key dummy_key;
98 	struct fl_flow_key mask;
99 	struct flow_dissector dissector;
100 	struct tcf_chain *chain;
101 };
102 
103 struct cls_fl_head {
104 	struct rhashtable ht;
105 	spinlock_t masks_lock; /* Protect masks list */
106 	struct list_head masks;
107 	struct list_head hw_filters;
108 	struct rcu_work rwork;
109 	struct idr handle_idr;
110 };
111 
112 struct cls_fl_filter {
113 	struct fl_flow_mask *mask;
114 	struct rhash_head ht_node;
115 	struct fl_flow_key mkey;
116 	struct tcf_exts exts;
117 	struct tcf_result res;
118 	struct fl_flow_key key;
119 	struct list_head list;
120 	struct list_head hw_list;
121 	u32 handle;
122 	u32 flags;
123 	u32 in_hw_count;
124 	struct rcu_work rwork;
125 	struct net_device *hw_dev;
126 	/* Flower classifier is unlocked, which means that its reference counter
127 	 * can be changed concurrently without any kind of external
128 	 * synchronization. Use atomic reference counter to be concurrency-safe.
129 	 */
130 	refcount_t refcnt;
131 	bool deleted;
132 };
133 
134 static const struct rhashtable_params mask_ht_params = {
135 	.key_offset = offsetof(struct fl_flow_mask, key),
136 	.key_len = sizeof(struct fl_flow_key),
137 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
138 	.automatic_shrinking = true,
139 };
140 
141 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
142 {
143 	return mask->range.end - mask->range.start;
144 }
145 
146 static void fl_mask_update_range(struct fl_flow_mask *mask)
147 {
148 	const u8 *bytes = (const u8 *) &mask->key;
149 	size_t size = sizeof(mask->key);
150 	size_t i, first = 0, last;
151 
152 	for (i = 0; i < size; i++) {
153 		if (bytes[i]) {
154 			first = i;
155 			break;
156 		}
157 	}
158 	last = first;
159 	for (i = size - 1; i != first; i--) {
160 		if (bytes[i]) {
161 			last = i;
162 			break;
163 		}
164 	}
165 	mask->range.start = rounddown(first, sizeof(long));
166 	mask->range.end = roundup(last + 1, sizeof(long));
167 }
168 
169 static void *fl_key_get_start(struct fl_flow_key *key,
170 			      const struct fl_flow_mask *mask)
171 {
172 	return (u8 *) key + mask->range.start;
173 }
174 
175 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
176 			      struct fl_flow_mask *mask)
177 {
178 	const long *lkey = fl_key_get_start(key, mask);
179 	const long *lmask = fl_key_get_start(&mask->key, mask);
180 	long *lmkey = fl_key_get_start(mkey, mask);
181 	int i;
182 
183 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
184 		*lmkey++ = *lkey++ & *lmask++;
185 }
186 
187 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
188 			       struct fl_flow_mask *mask)
189 {
190 	const long *lmask = fl_key_get_start(&mask->key, mask);
191 	const long *ltmplt;
192 	int i;
193 
194 	if (!tmplt)
195 		return true;
196 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
197 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
198 		if (~*ltmplt++ & *lmask++)
199 			return false;
200 	}
201 	return true;
202 }
203 
204 static void fl_clear_masked_range(struct fl_flow_key *key,
205 				  struct fl_flow_mask *mask)
206 {
207 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
208 }
209 
210 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
211 				  struct fl_flow_key *key,
212 				  struct fl_flow_key *mkey)
213 {
214 	u16 min_mask, max_mask, min_val, max_val;
215 
216 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
217 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
218 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
219 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
220 
221 	if (min_mask && max_mask) {
222 		if (ntohs(key->tp_range.tp.dst) < min_val ||
223 		    ntohs(key->tp_range.tp.dst) > max_val)
224 			return false;
225 
226 		/* skb does not have min and max values */
227 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
228 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
229 	}
230 	return true;
231 }
232 
233 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
234 				  struct fl_flow_key *key,
235 				  struct fl_flow_key *mkey)
236 {
237 	u16 min_mask, max_mask, min_val, max_val;
238 
239 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
240 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
241 	min_val = ntohs(filter->key.tp_range.tp_min.src);
242 	max_val = ntohs(filter->key.tp_range.tp_max.src);
243 
244 	if (min_mask && max_mask) {
245 		if (ntohs(key->tp_range.tp.src) < min_val ||
246 		    ntohs(key->tp_range.tp.src) > max_val)
247 			return false;
248 
249 		/* skb does not have min and max values */
250 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
251 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
252 	}
253 	return true;
254 }
255 
256 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
257 					 struct fl_flow_key *mkey)
258 {
259 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
260 				      mask->filter_ht_params);
261 }
262 
263 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
264 					     struct fl_flow_key *mkey,
265 					     struct fl_flow_key *key)
266 {
267 	struct cls_fl_filter *filter, *f;
268 
269 	list_for_each_entry_rcu(filter, &mask->filters, list) {
270 		if (!fl_range_port_dst_cmp(filter, key, mkey))
271 			continue;
272 
273 		if (!fl_range_port_src_cmp(filter, key, mkey))
274 			continue;
275 
276 		f = __fl_lookup(mask, mkey);
277 		if (f)
278 			return f;
279 	}
280 	return NULL;
281 }
282 
283 static noinline_for_stack
284 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
285 {
286 	struct fl_flow_key mkey;
287 
288 	fl_set_masked_key(&mkey, key, mask);
289 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
290 		return fl_lookup_range(mask, &mkey, key);
291 
292 	return __fl_lookup(mask, &mkey);
293 }
294 
295 static u16 fl_ct_info_to_flower_map[] = {
296 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
298 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
300 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
302 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
303 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
304 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
305 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
306 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
307 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
308 };
309 
310 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
311 		       struct tcf_result *res)
312 {
313 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
314 	bool post_ct = tc_skb_cb(skb)->post_ct;
315 	u16 zone = tc_skb_cb(skb)->zone;
316 	struct fl_flow_key skb_key;
317 	struct fl_flow_mask *mask;
318 	struct cls_fl_filter *f;
319 
320 	list_for_each_entry_rcu(mask, &head->masks, list) {
321 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
322 		fl_clear_masked_range(&skb_key, mask);
323 
324 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
325 		/* skb_flow_dissect() does not set n_proto in case an unknown
326 		 * protocol, so do it rather here.
327 		 */
328 		skb_key.basic.n_proto = skb_protocol(skb, false);
329 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
330 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
331 				    fl_ct_info_to_flower_map,
332 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
333 				    post_ct, zone);
334 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
335 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
336 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
337 
338 		f = fl_mask_lookup(mask, &skb_key);
339 		if (f && !tc_skip_sw(f->flags)) {
340 			*res = f->res;
341 			return tcf_exts_exec(skb, &f->exts, res);
342 		}
343 	}
344 	return -1;
345 }
346 
347 static int fl_init(struct tcf_proto *tp)
348 {
349 	struct cls_fl_head *head;
350 
351 	head = kzalloc(sizeof(*head), GFP_KERNEL);
352 	if (!head)
353 		return -ENOBUFS;
354 
355 	spin_lock_init(&head->masks_lock);
356 	INIT_LIST_HEAD_RCU(&head->masks);
357 	INIT_LIST_HEAD(&head->hw_filters);
358 	rcu_assign_pointer(tp->root, head);
359 	idr_init(&head->handle_idr);
360 
361 	return rhashtable_init(&head->ht, &mask_ht_params);
362 }
363 
364 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
365 {
366 	/* temporary masks don't have their filters list and ht initialized */
367 	if (mask_init_done) {
368 		WARN_ON(!list_empty(&mask->filters));
369 		rhashtable_destroy(&mask->ht);
370 	}
371 	kfree(mask);
372 }
373 
374 static void fl_mask_free_work(struct work_struct *work)
375 {
376 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
377 						 struct fl_flow_mask, rwork);
378 
379 	fl_mask_free(mask, true);
380 }
381 
382 static void fl_uninit_mask_free_work(struct work_struct *work)
383 {
384 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
385 						 struct fl_flow_mask, rwork);
386 
387 	fl_mask_free(mask, false);
388 }
389 
390 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
391 {
392 	if (!refcount_dec_and_test(&mask->refcnt))
393 		return false;
394 
395 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
396 
397 	spin_lock(&head->masks_lock);
398 	list_del_rcu(&mask->list);
399 	spin_unlock(&head->masks_lock);
400 
401 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
402 
403 	return true;
404 }
405 
406 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
407 {
408 	/* Flower classifier only changes root pointer during init and destroy.
409 	 * Users must obtain reference to tcf_proto instance before calling its
410 	 * API, so tp->root pointer is protected from concurrent call to
411 	 * fl_destroy() by reference counting.
412 	 */
413 	return rcu_dereference_raw(tp->root);
414 }
415 
416 static void __fl_destroy_filter(struct cls_fl_filter *f)
417 {
418 	tcf_exts_destroy(&f->exts);
419 	tcf_exts_put_net(&f->exts);
420 	kfree(f);
421 }
422 
423 static void fl_destroy_filter_work(struct work_struct *work)
424 {
425 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
426 					struct cls_fl_filter, rwork);
427 
428 	__fl_destroy_filter(f);
429 }
430 
431 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
432 				 bool rtnl_held, struct netlink_ext_ack *extack)
433 {
434 	struct tcf_block *block = tp->chain->block;
435 	struct flow_cls_offload cls_flower = {};
436 
437 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
438 	cls_flower.command = FLOW_CLS_DESTROY;
439 	cls_flower.cookie = (unsigned long) f;
440 
441 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
442 			    &f->flags, &f->in_hw_count, rtnl_held);
443 
444 }
445 
446 static int fl_hw_replace_filter(struct tcf_proto *tp,
447 				struct cls_fl_filter *f, bool rtnl_held,
448 				struct netlink_ext_ack *extack)
449 {
450 	struct tcf_block *block = tp->chain->block;
451 	struct flow_cls_offload cls_flower = {};
452 	bool skip_sw = tc_skip_sw(f->flags);
453 	int err = 0;
454 
455 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
456 	if (!cls_flower.rule)
457 		return -ENOMEM;
458 
459 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
460 	cls_flower.command = FLOW_CLS_REPLACE;
461 	cls_flower.cookie = (unsigned long) f;
462 	cls_flower.rule->match.dissector = &f->mask->dissector;
463 	cls_flower.rule->match.mask = &f->mask->key;
464 	cls_flower.rule->match.key = &f->mkey;
465 	cls_flower.classid = f->res.classid;
466 
467 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
468 	if (err) {
469 		kfree(cls_flower.rule);
470 		if (skip_sw) {
471 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
472 			return err;
473 		}
474 		return 0;
475 	}
476 
477 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
478 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
479 	tc_cleanup_offload_action(&cls_flower.rule->action);
480 	kfree(cls_flower.rule);
481 
482 	if (err) {
483 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
484 		return err;
485 	}
486 
487 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
488 		return -EINVAL;
489 
490 	return 0;
491 }
492 
493 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
494 			       bool rtnl_held)
495 {
496 	struct tcf_block *block = tp->chain->block;
497 	struct flow_cls_offload cls_flower = {};
498 
499 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
500 	cls_flower.command = FLOW_CLS_STATS;
501 	cls_flower.cookie = (unsigned long) f;
502 	cls_flower.classid = f->res.classid;
503 
504 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
505 			 rtnl_held);
506 
507 	tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
508 				 cls_flower.stats.pkts,
509 				 cls_flower.stats.drops,
510 				 cls_flower.stats.lastused,
511 				 cls_flower.stats.used_hw_stats,
512 				 cls_flower.stats.used_hw_stats_valid);
513 }
514 
515 static void __fl_put(struct cls_fl_filter *f)
516 {
517 	if (!refcount_dec_and_test(&f->refcnt))
518 		return;
519 
520 	if (tcf_exts_get_net(&f->exts))
521 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
522 	else
523 		__fl_destroy_filter(f);
524 }
525 
526 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
527 {
528 	struct cls_fl_filter *f;
529 
530 	rcu_read_lock();
531 	f = idr_find(&head->handle_idr, handle);
532 	if (f && !refcount_inc_not_zero(&f->refcnt))
533 		f = NULL;
534 	rcu_read_unlock();
535 
536 	return f;
537 }
538 
539 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
540 		       bool *last, bool rtnl_held,
541 		       struct netlink_ext_ack *extack)
542 {
543 	struct cls_fl_head *head = fl_head_dereference(tp);
544 
545 	*last = false;
546 
547 	spin_lock(&tp->lock);
548 	if (f->deleted) {
549 		spin_unlock(&tp->lock);
550 		return -ENOENT;
551 	}
552 
553 	f->deleted = true;
554 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
555 			       f->mask->filter_ht_params);
556 	idr_remove(&head->handle_idr, f->handle);
557 	list_del_rcu(&f->list);
558 	spin_unlock(&tp->lock);
559 
560 	*last = fl_mask_put(head, f->mask);
561 	if (!tc_skip_hw(f->flags))
562 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
563 	tcf_unbind_filter(tp, &f->res);
564 	__fl_put(f);
565 
566 	return 0;
567 }
568 
569 static void fl_destroy_sleepable(struct work_struct *work)
570 {
571 	struct cls_fl_head *head = container_of(to_rcu_work(work),
572 						struct cls_fl_head,
573 						rwork);
574 
575 	rhashtable_destroy(&head->ht);
576 	kfree(head);
577 	module_put(THIS_MODULE);
578 }
579 
580 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
581 		       struct netlink_ext_ack *extack)
582 {
583 	struct cls_fl_head *head = fl_head_dereference(tp);
584 	struct fl_flow_mask *mask, *next_mask;
585 	struct cls_fl_filter *f, *next;
586 	bool last;
587 
588 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
589 		list_for_each_entry_safe(f, next, &mask->filters, list) {
590 			__fl_delete(tp, f, &last, rtnl_held, extack);
591 			if (last)
592 				break;
593 		}
594 	}
595 	idr_destroy(&head->handle_idr);
596 
597 	__module_get(THIS_MODULE);
598 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
599 }
600 
601 static void fl_put(struct tcf_proto *tp, void *arg)
602 {
603 	struct cls_fl_filter *f = arg;
604 
605 	__fl_put(f);
606 }
607 
608 static void *fl_get(struct tcf_proto *tp, u32 handle)
609 {
610 	struct cls_fl_head *head = fl_head_dereference(tp);
611 
612 	return __fl_get(head, handle);
613 }
614 
615 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
616 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
617 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
618 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
619 					    .len = IFNAMSIZ },
620 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
621 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
622 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
623 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
624 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
625 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
626 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
628 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
629 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
630 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
632 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
633 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
634 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
640 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
641 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
643 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
644 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
645 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
646 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
647 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
648 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
649 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
650 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
659 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
660 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
661 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
662 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
663 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
664 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
672 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
673 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
674 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
675 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
676 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
679 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
680 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
681 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
682 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
686 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
687 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
688 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
689 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
694 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
696 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
697 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
698 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
699 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
700 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
701 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
702 	[TCA_FLOWER_KEY_CT_STATE]	=
703 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
704 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
705 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
706 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
707 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
708 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
709 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
710 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
711 					    .len = 128 / BITS_PER_BYTE },
712 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
713 					    .len = 128 / BITS_PER_BYTE },
714 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
715 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
716 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
717 
718 };
719 
720 static const struct nla_policy
721 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
722 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
723 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
724 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
725 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
726 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
727 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
728 };
729 
730 static const struct nla_policy
731 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
732 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
733 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
734 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
735 						       .len = 128 },
736 };
737 
738 static const struct nla_policy
739 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
740 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
741 };
742 
743 static const struct nla_policy
744 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
745 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
746 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
747 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
748 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
749 };
750 
751 static const struct nla_policy
752 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
753 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
754 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
755 };
756 
757 static const struct nla_policy
758 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
759 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
760 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
761 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
762 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
763 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
764 };
765 
766 static void fl_set_key_val(struct nlattr **tb,
767 			   void *val, int val_type,
768 			   void *mask, int mask_type, int len)
769 {
770 	if (!tb[val_type])
771 		return;
772 	nla_memcpy(val, tb[val_type], len);
773 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
774 		memset(mask, 0xff, len);
775 	else
776 		nla_memcpy(mask, tb[mask_type], len);
777 }
778 
779 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
780 				 struct fl_flow_key *mask,
781 				 struct netlink_ext_ack *extack)
782 {
783 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
784 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
785 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
786 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
787 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
788 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
789 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
790 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
791 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
792 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
793 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
794 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
795 
796 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
797 	    ntohs(key->tp_range.tp_max.dst) <=
798 	    ntohs(key->tp_range.tp_min.dst)) {
799 		NL_SET_ERR_MSG_ATTR(extack,
800 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
801 				    "Invalid destination port range (min must be strictly smaller than max)");
802 		return -EINVAL;
803 	}
804 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
805 	    ntohs(key->tp_range.tp_max.src) <=
806 	    ntohs(key->tp_range.tp_min.src)) {
807 		NL_SET_ERR_MSG_ATTR(extack,
808 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
809 				    "Invalid source port range (min must be strictly smaller than max)");
810 		return -EINVAL;
811 	}
812 
813 	return 0;
814 }
815 
816 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
817 			       struct flow_dissector_key_mpls *key_val,
818 			       struct flow_dissector_key_mpls *key_mask,
819 			       struct netlink_ext_ack *extack)
820 {
821 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
822 	struct flow_dissector_mpls_lse *lse_mask;
823 	struct flow_dissector_mpls_lse *lse_val;
824 	u8 lse_index;
825 	u8 depth;
826 	int err;
827 
828 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
829 			       mpls_stack_entry_policy, extack);
830 	if (err < 0)
831 		return err;
832 
833 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
834 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
835 		return -EINVAL;
836 	}
837 
838 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
839 
840 	/* LSE depth starts at 1, for consistency with terminology used by
841 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
842 	 */
843 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
844 		NL_SET_ERR_MSG_ATTR(extack,
845 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
846 				    "Invalid MPLS depth");
847 		return -EINVAL;
848 	}
849 	lse_index = depth - 1;
850 
851 	dissector_set_mpls_lse(key_val, lse_index);
852 	dissector_set_mpls_lse(key_mask, lse_index);
853 
854 	lse_val = &key_val->ls[lse_index];
855 	lse_mask = &key_mask->ls[lse_index];
856 
857 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
858 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
859 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
860 	}
861 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
862 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
863 
864 		if (bos & ~MPLS_BOS_MASK) {
865 			NL_SET_ERR_MSG_ATTR(extack,
866 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
867 					    "Bottom Of Stack (BOS) must be 0 or 1");
868 			return -EINVAL;
869 		}
870 		lse_val->mpls_bos = bos;
871 		lse_mask->mpls_bos = MPLS_BOS_MASK;
872 	}
873 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
874 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
875 
876 		if (tc & ~MPLS_TC_MASK) {
877 			NL_SET_ERR_MSG_ATTR(extack,
878 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
879 					    "Traffic Class (TC) must be between 0 and 7");
880 			return -EINVAL;
881 		}
882 		lse_val->mpls_tc = tc;
883 		lse_mask->mpls_tc = MPLS_TC_MASK;
884 	}
885 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
886 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
887 
888 		if (label & ~MPLS_LABEL_MASK) {
889 			NL_SET_ERR_MSG_ATTR(extack,
890 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
891 					    "Label must be between 0 and 1048575");
892 			return -EINVAL;
893 		}
894 		lse_val->mpls_label = label;
895 		lse_mask->mpls_label = MPLS_LABEL_MASK;
896 	}
897 
898 	return 0;
899 }
900 
901 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
902 				struct flow_dissector_key_mpls *key_val,
903 				struct flow_dissector_key_mpls *key_mask,
904 				struct netlink_ext_ack *extack)
905 {
906 	struct nlattr *nla_lse;
907 	int rem;
908 	int err;
909 
910 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
911 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
912 				    "NLA_F_NESTED is missing");
913 		return -EINVAL;
914 	}
915 
916 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
917 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
918 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
919 					    "Invalid MPLS option type");
920 			return -EINVAL;
921 		}
922 
923 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
924 		if (err < 0)
925 			return err;
926 	}
927 	if (rem) {
928 		NL_SET_ERR_MSG(extack,
929 			       "Bytes leftover after parsing MPLS options");
930 		return -EINVAL;
931 	}
932 
933 	return 0;
934 }
935 
936 static int fl_set_key_mpls(struct nlattr **tb,
937 			   struct flow_dissector_key_mpls *key_val,
938 			   struct flow_dissector_key_mpls *key_mask,
939 			   struct netlink_ext_ack *extack)
940 {
941 	struct flow_dissector_mpls_lse *lse_mask;
942 	struct flow_dissector_mpls_lse *lse_val;
943 
944 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
945 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
946 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
947 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
948 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
949 			NL_SET_ERR_MSG_ATTR(extack,
950 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
951 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
952 			return -EBADMSG;
953 		}
954 
955 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
956 					    key_val, key_mask, extack);
957 	}
958 
959 	lse_val = &key_val->ls[0];
960 	lse_mask = &key_mask->ls[0];
961 
962 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
963 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
964 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
965 		dissector_set_mpls_lse(key_val, 0);
966 		dissector_set_mpls_lse(key_mask, 0);
967 	}
968 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
969 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
970 
971 		if (bos & ~MPLS_BOS_MASK) {
972 			NL_SET_ERR_MSG_ATTR(extack,
973 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
974 					    "Bottom Of Stack (BOS) must be 0 or 1");
975 			return -EINVAL;
976 		}
977 		lse_val->mpls_bos = bos;
978 		lse_mask->mpls_bos = MPLS_BOS_MASK;
979 		dissector_set_mpls_lse(key_val, 0);
980 		dissector_set_mpls_lse(key_mask, 0);
981 	}
982 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
983 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
984 
985 		if (tc & ~MPLS_TC_MASK) {
986 			NL_SET_ERR_MSG_ATTR(extack,
987 					    tb[TCA_FLOWER_KEY_MPLS_TC],
988 					    "Traffic Class (TC) must be between 0 and 7");
989 			return -EINVAL;
990 		}
991 		lse_val->mpls_tc = tc;
992 		lse_mask->mpls_tc = MPLS_TC_MASK;
993 		dissector_set_mpls_lse(key_val, 0);
994 		dissector_set_mpls_lse(key_mask, 0);
995 	}
996 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
997 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
998 
999 		if (label & ~MPLS_LABEL_MASK) {
1000 			NL_SET_ERR_MSG_ATTR(extack,
1001 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
1002 					    "Label must be between 0 and 1048575");
1003 			return -EINVAL;
1004 		}
1005 		lse_val->mpls_label = label;
1006 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1007 		dissector_set_mpls_lse(key_val, 0);
1008 		dissector_set_mpls_lse(key_mask, 0);
1009 	}
1010 	return 0;
1011 }
1012 
1013 static void fl_set_key_vlan(struct nlattr **tb,
1014 			    __be16 ethertype,
1015 			    int vlan_id_key, int vlan_prio_key,
1016 			    int vlan_next_eth_type_key,
1017 			    struct flow_dissector_key_vlan *key_val,
1018 			    struct flow_dissector_key_vlan *key_mask)
1019 {
1020 #define VLAN_PRIORITY_MASK	0x7
1021 
1022 	if (tb[vlan_id_key]) {
1023 		key_val->vlan_id =
1024 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1025 		key_mask->vlan_id = VLAN_VID_MASK;
1026 	}
1027 	if (tb[vlan_prio_key]) {
1028 		key_val->vlan_priority =
1029 			nla_get_u8(tb[vlan_prio_key]) &
1030 			VLAN_PRIORITY_MASK;
1031 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1032 	}
1033 	key_val->vlan_tpid = ethertype;
1034 	key_mask->vlan_tpid = cpu_to_be16(~0);
1035 	if (tb[vlan_next_eth_type_key]) {
1036 		key_val->vlan_eth_type =
1037 			nla_get_be16(tb[vlan_next_eth_type_key]);
1038 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1039 	}
1040 }
1041 
1042 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1043 			    u32 *dissector_key, u32 *dissector_mask,
1044 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1045 {
1046 	if (flower_mask & flower_flag_bit) {
1047 		*dissector_mask |= dissector_flag_bit;
1048 		if (flower_key & flower_flag_bit)
1049 			*dissector_key |= dissector_flag_bit;
1050 	}
1051 }
1052 
1053 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1054 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1055 {
1056 	u32 key, mask;
1057 
1058 	/* mask is mandatory for flags */
1059 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1060 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1061 		return -EINVAL;
1062 	}
1063 
1064 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1065 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1066 
1067 	*flags_key  = 0;
1068 	*flags_mask = 0;
1069 
1070 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1071 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1072 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1073 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1074 			FLOW_DIS_FIRST_FRAG);
1075 
1076 	return 0;
1077 }
1078 
1079 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1080 			  struct flow_dissector_key_ip *key,
1081 			  struct flow_dissector_key_ip *mask)
1082 {
1083 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1084 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1085 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1086 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1087 
1088 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1089 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1090 }
1091 
1092 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1093 			     int depth, int option_len,
1094 			     struct netlink_ext_ack *extack)
1095 {
1096 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1097 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1098 	struct geneve_opt *opt;
1099 	int err, data_len = 0;
1100 
1101 	if (option_len > sizeof(struct geneve_opt))
1102 		data_len = option_len - sizeof(struct geneve_opt);
1103 
1104 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1105 	memset(opt, 0xff, option_len);
1106 	opt->length = data_len / 4;
1107 	opt->r1 = 0;
1108 	opt->r2 = 0;
1109 	opt->r3 = 0;
1110 
1111 	/* If no mask has been prodived we assume an exact match. */
1112 	if (!depth)
1113 		return sizeof(struct geneve_opt) + data_len;
1114 
1115 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1116 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1117 		return -EINVAL;
1118 	}
1119 
1120 	err = nla_parse_nested_deprecated(tb,
1121 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1122 					  nla, geneve_opt_policy, extack);
1123 	if (err < 0)
1124 		return err;
1125 
1126 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1127 	 * fields from the key.
1128 	 */
1129 	if (!option_len &&
1130 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1131 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1132 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1133 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1134 		return -EINVAL;
1135 	}
1136 
1137 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1138 	 * for the mask.
1139 	 */
1140 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1141 		int new_len = key->enc_opts.len;
1142 
1143 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1144 		data_len = nla_len(data);
1145 		if (data_len < 4) {
1146 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1147 			return -ERANGE;
1148 		}
1149 		if (data_len % 4) {
1150 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1151 			return -ERANGE;
1152 		}
1153 
1154 		new_len += sizeof(struct geneve_opt) + data_len;
1155 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1156 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1157 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1158 			return -ERANGE;
1159 		}
1160 		opt->length = data_len / 4;
1161 		memcpy(opt->opt_data, nla_data(data), data_len);
1162 	}
1163 
1164 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1165 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1166 		opt->opt_class = nla_get_be16(class);
1167 	}
1168 
1169 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1170 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1171 		opt->type = nla_get_u8(type);
1172 	}
1173 
1174 	return sizeof(struct geneve_opt) + data_len;
1175 }
1176 
1177 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1178 			    int depth, int option_len,
1179 			    struct netlink_ext_ack *extack)
1180 {
1181 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1182 	struct vxlan_metadata *md;
1183 	int err;
1184 
1185 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1186 	memset(md, 0xff, sizeof(*md));
1187 
1188 	if (!depth)
1189 		return sizeof(*md);
1190 
1191 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1192 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1193 		return -EINVAL;
1194 	}
1195 
1196 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1197 			       vxlan_opt_policy, extack);
1198 	if (err < 0)
1199 		return err;
1200 
1201 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1202 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1203 		return -EINVAL;
1204 	}
1205 
1206 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1207 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1208 		md->gbp &= VXLAN_GBP_MASK;
1209 	}
1210 
1211 	return sizeof(*md);
1212 }
1213 
1214 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1215 			     int depth, int option_len,
1216 			     struct netlink_ext_ack *extack)
1217 {
1218 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1219 	struct erspan_metadata *md;
1220 	int err;
1221 
1222 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1223 	memset(md, 0xff, sizeof(*md));
1224 	md->version = 1;
1225 
1226 	if (!depth)
1227 		return sizeof(*md);
1228 
1229 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1230 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1231 		return -EINVAL;
1232 	}
1233 
1234 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1235 			       erspan_opt_policy, extack);
1236 	if (err < 0)
1237 		return err;
1238 
1239 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1240 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1241 		return -EINVAL;
1242 	}
1243 
1244 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1245 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1246 
1247 	if (md->version == 1) {
1248 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1249 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1250 			return -EINVAL;
1251 		}
1252 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1253 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1254 			memset(&md->u, 0x00, sizeof(md->u));
1255 			md->u.index = nla_get_be32(nla);
1256 		}
1257 	} else if (md->version == 2) {
1258 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1259 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1260 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1261 			return -EINVAL;
1262 		}
1263 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1264 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1265 			md->u.md2.dir = nla_get_u8(nla);
1266 		}
1267 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1268 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1269 			set_hwid(&md->u.md2, nla_get_u8(nla));
1270 		}
1271 	} else {
1272 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1273 		return -EINVAL;
1274 	}
1275 
1276 	return sizeof(*md);
1277 }
1278 
1279 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1280 			  int depth, int option_len,
1281 			  struct netlink_ext_ack *extack)
1282 {
1283 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1284 	struct gtp_pdu_session_info *sinfo;
1285 	u8 len = key->enc_opts.len;
1286 	int err;
1287 
1288 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1289 	memset(sinfo, 0xff, option_len);
1290 
1291 	if (!depth)
1292 		return sizeof(*sinfo);
1293 
1294 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1295 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1296 		return -EINVAL;
1297 	}
1298 
1299 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1300 			       gtp_opt_policy, extack);
1301 	if (err < 0)
1302 		return err;
1303 
1304 	if (!option_len &&
1305 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1306 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1307 		NL_SET_ERR_MSG_MOD(extack,
1308 				   "Missing tunnel key gtp option pdu type or qfi");
1309 		return -EINVAL;
1310 	}
1311 
1312 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1313 		sinfo->pdu_type =
1314 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1315 
1316 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1317 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1318 
1319 	return sizeof(*sinfo);
1320 }
1321 
1322 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1323 			  struct fl_flow_key *mask,
1324 			  struct netlink_ext_ack *extack)
1325 {
1326 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1327 	int err, option_len, key_depth, msk_depth = 0;
1328 
1329 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1330 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1331 					     enc_opts_policy, extack);
1332 	if (err)
1333 		return err;
1334 
1335 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1336 
1337 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1338 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1339 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1340 						     enc_opts_policy, extack);
1341 		if (err)
1342 			return err;
1343 
1344 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1345 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1346 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1347 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1348 			return -EINVAL;
1349 		}
1350 	}
1351 
1352 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1353 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1354 		switch (nla_type(nla_opt_key)) {
1355 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1356 			if (key->enc_opts.dst_opt_type &&
1357 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1358 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1359 				return -EINVAL;
1360 			}
1361 			option_len = 0;
1362 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1363 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1364 						       key_depth, option_len,
1365 						       extack);
1366 			if (option_len < 0)
1367 				return option_len;
1368 
1369 			key->enc_opts.len += option_len;
1370 			/* At the same time we need to parse through the mask
1371 			 * in order to verify exact and mask attribute lengths.
1372 			 */
1373 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1374 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1375 						       msk_depth, option_len,
1376 						       extack);
1377 			if (option_len < 0)
1378 				return option_len;
1379 
1380 			mask->enc_opts.len += option_len;
1381 			if (key->enc_opts.len != mask->enc_opts.len) {
1382 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1383 				return -EINVAL;
1384 			}
1385 			break;
1386 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1387 			if (key->enc_opts.dst_opt_type) {
1388 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1389 				return -EINVAL;
1390 			}
1391 			option_len = 0;
1392 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1393 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1394 						      key_depth, option_len,
1395 						      extack);
1396 			if (option_len < 0)
1397 				return option_len;
1398 
1399 			key->enc_opts.len += option_len;
1400 			/* At the same time we need to parse through the mask
1401 			 * in order to verify exact and mask attribute lengths.
1402 			 */
1403 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1404 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1405 						      msk_depth, option_len,
1406 						      extack);
1407 			if (option_len < 0)
1408 				return option_len;
1409 
1410 			mask->enc_opts.len += option_len;
1411 			if (key->enc_opts.len != mask->enc_opts.len) {
1412 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1413 				return -EINVAL;
1414 			}
1415 			break;
1416 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1417 			if (key->enc_opts.dst_opt_type) {
1418 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1419 				return -EINVAL;
1420 			}
1421 			option_len = 0;
1422 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1423 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1424 						       key_depth, option_len,
1425 						       extack);
1426 			if (option_len < 0)
1427 				return option_len;
1428 
1429 			key->enc_opts.len += option_len;
1430 			/* At the same time we need to parse through the mask
1431 			 * in order to verify exact and mask attribute lengths.
1432 			 */
1433 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1434 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1435 						       msk_depth, option_len,
1436 						       extack);
1437 			if (option_len < 0)
1438 				return option_len;
1439 
1440 			mask->enc_opts.len += option_len;
1441 			if (key->enc_opts.len != mask->enc_opts.len) {
1442 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1443 				return -EINVAL;
1444 			}
1445 			break;
1446 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1447 			if (key->enc_opts.dst_opt_type) {
1448 				NL_SET_ERR_MSG_MOD(extack,
1449 						   "Duplicate type for gtp options");
1450 				return -EINVAL;
1451 			}
1452 			option_len = 0;
1453 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1454 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1455 						    key_depth, option_len,
1456 						    extack);
1457 			if (option_len < 0)
1458 				return option_len;
1459 
1460 			key->enc_opts.len += option_len;
1461 			/* At the same time we need to parse through the mask
1462 			 * in order to verify exact and mask attribute lengths.
1463 			 */
1464 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1465 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1466 						    msk_depth, option_len,
1467 						    extack);
1468 			if (option_len < 0)
1469 				return option_len;
1470 
1471 			mask->enc_opts.len += option_len;
1472 			if (key->enc_opts.len != mask->enc_opts.len) {
1473 				NL_SET_ERR_MSG_MOD(extack,
1474 						   "Key and mask miss aligned");
1475 				return -EINVAL;
1476 			}
1477 			break;
1478 		default:
1479 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1480 			return -EINVAL;
1481 		}
1482 
1483 		if (!msk_depth)
1484 			continue;
1485 
1486 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1487 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1488 			return -EINVAL;
1489 		}
1490 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1491 	}
1492 
1493 	return 0;
1494 }
1495 
1496 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1497 				struct netlink_ext_ack *extack)
1498 {
1499 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1500 		NL_SET_ERR_MSG_ATTR(extack, tb,
1501 				    "no trk, so no other flag can be set");
1502 		return -EINVAL;
1503 	}
1504 
1505 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1506 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1507 		NL_SET_ERR_MSG_ATTR(extack, tb,
1508 				    "new and est are mutually exclusive");
1509 		return -EINVAL;
1510 	}
1511 
1512 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1513 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1514 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1515 		NL_SET_ERR_MSG_ATTR(extack, tb,
1516 				    "when inv is set, only trk may be set");
1517 		return -EINVAL;
1518 	}
1519 
1520 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1521 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1522 		NL_SET_ERR_MSG_ATTR(extack, tb,
1523 				    "new and rpl are mutually exclusive");
1524 		return -EINVAL;
1525 	}
1526 
1527 	return 0;
1528 }
1529 
1530 static int fl_set_key_ct(struct nlattr **tb,
1531 			 struct flow_dissector_key_ct *key,
1532 			 struct flow_dissector_key_ct *mask,
1533 			 struct netlink_ext_ack *extack)
1534 {
1535 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1536 		int err;
1537 
1538 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1539 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1540 			return -EOPNOTSUPP;
1541 		}
1542 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1543 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1544 			       sizeof(key->ct_state));
1545 
1546 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1547 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1548 					   extack);
1549 		if (err)
1550 			return err;
1551 
1552 	}
1553 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1554 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1555 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1556 			return -EOPNOTSUPP;
1557 		}
1558 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1559 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1560 			       sizeof(key->ct_zone));
1561 	}
1562 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1563 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1564 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1565 			return -EOPNOTSUPP;
1566 		}
1567 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1568 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1569 			       sizeof(key->ct_mark));
1570 	}
1571 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1572 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1573 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1574 			return -EOPNOTSUPP;
1575 		}
1576 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1577 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1578 			       sizeof(key->ct_labels));
1579 	}
1580 
1581 	return 0;
1582 }
1583 
1584 static int fl_set_key(struct net *net, struct nlattr **tb,
1585 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1586 		      struct netlink_ext_ack *extack)
1587 {
1588 	__be16 ethertype;
1589 	int ret = 0;
1590 
1591 	if (tb[TCA_FLOWER_INDEV]) {
1592 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1593 		if (err < 0)
1594 			return err;
1595 		key->meta.ingress_ifindex = err;
1596 		mask->meta.ingress_ifindex = 0xffffffff;
1597 	}
1598 
1599 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1600 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1601 		       sizeof(key->eth.dst));
1602 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1603 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1604 		       sizeof(key->eth.src));
1605 
1606 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1607 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1608 
1609 		if (eth_type_vlan(ethertype)) {
1610 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1611 					TCA_FLOWER_KEY_VLAN_PRIO,
1612 					TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1613 					&key->vlan, &mask->vlan);
1614 
1615 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1616 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1617 				if (eth_type_vlan(ethertype)) {
1618 					fl_set_key_vlan(tb, ethertype,
1619 							TCA_FLOWER_KEY_CVLAN_ID,
1620 							TCA_FLOWER_KEY_CVLAN_PRIO,
1621 							TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1622 							&key->cvlan, &mask->cvlan);
1623 					fl_set_key_val(tb, &key->basic.n_proto,
1624 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1625 						       &mask->basic.n_proto,
1626 						       TCA_FLOWER_UNSPEC,
1627 						       sizeof(key->basic.n_proto));
1628 				} else {
1629 					key->basic.n_proto = ethertype;
1630 					mask->basic.n_proto = cpu_to_be16(~0);
1631 				}
1632 			}
1633 		} else {
1634 			key->basic.n_proto = ethertype;
1635 			mask->basic.n_proto = cpu_to_be16(~0);
1636 		}
1637 	}
1638 
1639 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1640 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1641 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1642 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1643 			       sizeof(key->basic.ip_proto));
1644 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1645 	}
1646 
1647 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1648 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1649 		mask->control.addr_type = ~0;
1650 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1651 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1652 			       sizeof(key->ipv4.src));
1653 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1654 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1655 			       sizeof(key->ipv4.dst));
1656 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1657 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1658 		mask->control.addr_type = ~0;
1659 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1660 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1661 			       sizeof(key->ipv6.src));
1662 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1663 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1664 			       sizeof(key->ipv6.dst));
1665 	}
1666 
1667 	if (key->basic.ip_proto == IPPROTO_TCP) {
1668 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1669 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1670 			       sizeof(key->tp.src));
1671 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1672 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1673 			       sizeof(key->tp.dst));
1674 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1675 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1676 			       sizeof(key->tcp.flags));
1677 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1678 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1679 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1680 			       sizeof(key->tp.src));
1681 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1682 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1683 			       sizeof(key->tp.dst));
1684 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1685 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1686 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1687 			       sizeof(key->tp.src));
1688 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1689 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1690 			       sizeof(key->tp.dst));
1691 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1692 		   key->basic.ip_proto == IPPROTO_ICMP) {
1693 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1694 			       &mask->icmp.type,
1695 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1696 			       sizeof(key->icmp.type));
1697 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1698 			       &mask->icmp.code,
1699 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1700 			       sizeof(key->icmp.code));
1701 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1702 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1703 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1704 			       &mask->icmp.type,
1705 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1706 			       sizeof(key->icmp.type));
1707 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1708 			       &mask->icmp.code,
1709 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1710 			       sizeof(key->icmp.code));
1711 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1712 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1713 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1714 		if (ret)
1715 			return ret;
1716 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1717 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1718 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1719 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1720 			       sizeof(key->arp.sip));
1721 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1722 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1723 			       sizeof(key->arp.tip));
1724 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1725 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1726 			       sizeof(key->arp.op));
1727 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1728 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1729 			       sizeof(key->arp.sha));
1730 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1731 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1732 			       sizeof(key->arp.tha));
1733 	}
1734 
1735 	if (key->basic.ip_proto == IPPROTO_TCP ||
1736 	    key->basic.ip_proto == IPPROTO_UDP ||
1737 	    key->basic.ip_proto == IPPROTO_SCTP) {
1738 		ret = fl_set_key_port_range(tb, key, mask, extack);
1739 		if (ret)
1740 			return ret;
1741 	}
1742 
1743 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1744 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1745 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1746 		mask->enc_control.addr_type = ~0;
1747 		fl_set_key_val(tb, &key->enc_ipv4.src,
1748 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1749 			       &mask->enc_ipv4.src,
1750 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1751 			       sizeof(key->enc_ipv4.src));
1752 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1753 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1754 			       &mask->enc_ipv4.dst,
1755 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1756 			       sizeof(key->enc_ipv4.dst));
1757 	}
1758 
1759 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1760 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1761 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1762 		mask->enc_control.addr_type = ~0;
1763 		fl_set_key_val(tb, &key->enc_ipv6.src,
1764 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1765 			       &mask->enc_ipv6.src,
1766 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1767 			       sizeof(key->enc_ipv6.src));
1768 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1769 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1770 			       &mask->enc_ipv6.dst,
1771 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1772 			       sizeof(key->enc_ipv6.dst));
1773 	}
1774 
1775 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1776 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1777 		       sizeof(key->enc_key_id.keyid));
1778 
1779 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1780 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1781 		       sizeof(key->enc_tp.src));
1782 
1783 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1784 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1785 		       sizeof(key->enc_tp.dst));
1786 
1787 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1788 
1789 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1790 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1791 		       sizeof(key->hash.hash));
1792 
1793 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1794 		ret = fl_set_enc_opt(tb, key, mask, extack);
1795 		if (ret)
1796 			return ret;
1797 	}
1798 
1799 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1800 	if (ret)
1801 		return ret;
1802 
1803 	if (tb[TCA_FLOWER_KEY_FLAGS])
1804 		ret = fl_set_key_flags(tb, &key->control.flags,
1805 				       &mask->control.flags, extack);
1806 
1807 	return ret;
1808 }
1809 
1810 static void fl_mask_copy(struct fl_flow_mask *dst,
1811 			 struct fl_flow_mask *src)
1812 {
1813 	const void *psrc = fl_key_get_start(&src->key, src);
1814 	void *pdst = fl_key_get_start(&dst->key, src);
1815 
1816 	memcpy(pdst, psrc, fl_mask_range(src));
1817 	dst->range = src->range;
1818 }
1819 
1820 static const struct rhashtable_params fl_ht_params = {
1821 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1822 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1823 	.automatic_shrinking = true,
1824 };
1825 
1826 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1827 {
1828 	mask->filter_ht_params = fl_ht_params;
1829 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1830 	mask->filter_ht_params.key_offset += mask->range.start;
1831 
1832 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1833 }
1834 
1835 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1836 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1837 
1838 #define FL_KEY_IS_MASKED(mask, member)						\
1839 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1840 		   0, FL_KEY_MEMBER_SIZE(member))				\
1841 
1842 #define FL_KEY_SET(keys, cnt, id, member)					\
1843 	do {									\
1844 		keys[cnt].key_id = id;						\
1845 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1846 		cnt++;								\
1847 	} while(0);
1848 
1849 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1850 	do {									\
1851 		if (FL_KEY_IS_MASKED(mask, member))				\
1852 			FL_KEY_SET(keys, cnt, id, member);			\
1853 	} while(0);
1854 
1855 static void fl_init_dissector(struct flow_dissector *dissector,
1856 			      struct fl_flow_key *mask)
1857 {
1858 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1859 	size_t cnt = 0;
1860 
1861 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1862 			     FLOW_DISSECTOR_KEY_META, meta);
1863 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1864 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1865 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1866 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1867 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1868 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1869 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1870 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1871 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1872 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1873 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1874 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1875 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1876 			     FLOW_DISSECTOR_KEY_IP, ip);
1877 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1878 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1879 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1880 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1881 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1882 			     FLOW_DISSECTOR_KEY_ARP, arp);
1883 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1884 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1885 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1886 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1887 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1888 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1889 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1890 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1891 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1892 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1893 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1894 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1895 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1896 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1897 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1898 			   enc_control);
1899 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1900 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1901 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1902 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1903 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1904 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1905 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1906 			     FLOW_DISSECTOR_KEY_CT, ct);
1907 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1908 			     FLOW_DISSECTOR_KEY_HASH, hash);
1909 
1910 	skb_flow_dissector_init(dissector, keys, cnt);
1911 }
1912 
1913 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1914 					       struct fl_flow_mask *mask)
1915 {
1916 	struct fl_flow_mask *newmask;
1917 	int err;
1918 
1919 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1920 	if (!newmask)
1921 		return ERR_PTR(-ENOMEM);
1922 
1923 	fl_mask_copy(newmask, mask);
1924 
1925 	if ((newmask->key.tp_range.tp_min.dst &&
1926 	     newmask->key.tp_range.tp_max.dst) ||
1927 	    (newmask->key.tp_range.tp_min.src &&
1928 	     newmask->key.tp_range.tp_max.src))
1929 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1930 
1931 	err = fl_init_mask_hashtable(newmask);
1932 	if (err)
1933 		goto errout_free;
1934 
1935 	fl_init_dissector(&newmask->dissector, &newmask->key);
1936 
1937 	INIT_LIST_HEAD_RCU(&newmask->filters);
1938 
1939 	refcount_set(&newmask->refcnt, 1);
1940 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1941 				      &newmask->ht_node, mask_ht_params);
1942 	if (err)
1943 		goto errout_destroy;
1944 
1945 	spin_lock(&head->masks_lock);
1946 	list_add_tail_rcu(&newmask->list, &head->masks);
1947 	spin_unlock(&head->masks_lock);
1948 
1949 	return newmask;
1950 
1951 errout_destroy:
1952 	rhashtable_destroy(&newmask->ht);
1953 errout_free:
1954 	kfree(newmask);
1955 
1956 	return ERR_PTR(err);
1957 }
1958 
1959 static int fl_check_assign_mask(struct cls_fl_head *head,
1960 				struct cls_fl_filter *fnew,
1961 				struct cls_fl_filter *fold,
1962 				struct fl_flow_mask *mask)
1963 {
1964 	struct fl_flow_mask *newmask;
1965 	int ret = 0;
1966 
1967 	rcu_read_lock();
1968 
1969 	/* Insert mask as temporary node to prevent concurrent creation of mask
1970 	 * with same key. Any concurrent lookups with same key will return
1971 	 * -EAGAIN because mask's refcnt is zero.
1972 	 */
1973 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1974 						       &mask->ht_node,
1975 						       mask_ht_params);
1976 	if (!fnew->mask) {
1977 		rcu_read_unlock();
1978 
1979 		if (fold) {
1980 			ret = -EINVAL;
1981 			goto errout_cleanup;
1982 		}
1983 
1984 		newmask = fl_create_new_mask(head, mask);
1985 		if (IS_ERR(newmask)) {
1986 			ret = PTR_ERR(newmask);
1987 			goto errout_cleanup;
1988 		}
1989 
1990 		fnew->mask = newmask;
1991 		return 0;
1992 	} else if (IS_ERR(fnew->mask)) {
1993 		ret = PTR_ERR(fnew->mask);
1994 	} else if (fold && fold->mask != fnew->mask) {
1995 		ret = -EINVAL;
1996 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1997 		/* Mask was deleted concurrently, try again */
1998 		ret = -EAGAIN;
1999 	}
2000 	rcu_read_unlock();
2001 	return ret;
2002 
2003 errout_cleanup:
2004 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2005 			       mask_ht_params);
2006 	return ret;
2007 }
2008 
2009 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2010 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
2011 			unsigned long base, struct nlattr **tb,
2012 			struct nlattr *est,
2013 			struct fl_flow_tmplt *tmplt,
2014 			u32 flags, u32 fl_flags,
2015 			struct netlink_ext_ack *extack)
2016 {
2017 	int err;
2018 
2019 	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2020 				   fl_flags, extack);
2021 	if (err < 0)
2022 		return err;
2023 
2024 	if (tb[TCA_FLOWER_CLASSID]) {
2025 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2026 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2027 			rtnl_lock();
2028 		tcf_bind_filter(tp, &f->res, base);
2029 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2030 			rtnl_unlock();
2031 	}
2032 
2033 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2034 	if (err)
2035 		return err;
2036 
2037 	fl_mask_update_range(mask);
2038 	fl_set_masked_key(&f->mkey, &f->key, mask);
2039 
2040 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
2041 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2042 		return -EINVAL;
2043 	}
2044 
2045 	return 0;
2046 }
2047 
2048 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2049 			       struct cls_fl_filter *fold,
2050 			       bool *in_ht)
2051 {
2052 	struct fl_flow_mask *mask = fnew->mask;
2053 	int err;
2054 
2055 	err = rhashtable_lookup_insert_fast(&mask->ht,
2056 					    &fnew->ht_node,
2057 					    mask->filter_ht_params);
2058 	if (err) {
2059 		*in_ht = false;
2060 		/* It is okay if filter with same key exists when
2061 		 * overwriting.
2062 		 */
2063 		return fold && err == -EEXIST ? 0 : err;
2064 	}
2065 
2066 	*in_ht = true;
2067 	return 0;
2068 }
2069 
2070 static int fl_change(struct net *net, struct sk_buff *in_skb,
2071 		     struct tcf_proto *tp, unsigned long base,
2072 		     u32 handle, struct nlattr **tca,
2073 		     void **arg, u32 flags,
2074 		     struct netlink_ext_ack *extack)
2075 {
2076 	struct cls_fl_head *head = fl_head_dereference(tp);
2077 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2078 	struct cls_fl_filter *fold = *arg;
2079 	struct cls_fl_filter *fnew;
2080 	struct fl_flow_mask *mask;
2081 	struct nlattr **tb;
2082 	bool in_ht;
2083 	int err;
2084 
2085 	if (!tca[TCA_OPTIONS]) {
2086 		err = -EINVAL;
2087 		goto errout_fold;
2088 	}
2089 
2090 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2091 	if (!mask) {
2092 		err = -ENOBUFS;
2093 		goto errout_fold;
2094 	}
2095 
2096 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2097 	if (!tb) {
2098 		err = -ENOBUFS;
2099 		goto errout_mask_alloc;
2100 	}
2101 
2102 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2103 					  tca[TCA_OPTIONS], fl_policy, NULL);
2104 	if (err < 0)
2105 		goto errout_tb;
2106 
2107 	if (fold && handle && fold->handle != handle) {
2108 		err = -EINVAL;
2109 		goto errout_tb;
2110 	}
2111 
2112 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2113 	if (!fnew) {
2114 		err = -ENOBUFS;
2115 		goto errout_tb;
2116 	}
2117 	INIT_LIST_HEAD(&fnew->hw_list);
2118 	refcount_set(&fnew->refcnt, 1);
2119 
2120 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2121 	if (err < 0)
2122 		goto errout;
2123 
2124 	if (tb[TCA_FLOWER_FLAGS]) {
2125 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2126 
2127 		if (!tc_flags_valid(fnew->flags)) {
2128 			err = -EINVAL;
2129 			goto errout;
2130 		}
2131 	}
2132 
2133 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2134 			   tp->chain->tmplt_priv, flags, fnew->flags,
2135 			   extack);
2136 	if (err)
2137 		goto errout;
2138 
2139 	err = fl_check_assign_mask(head, fnew, fold, mask);
2140 	if (err)
2141 		goto errout;
2142 
2143 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2144 	if (err)
2145 		goto errout_mask;
2146 
2147 	if (!tc_skip_hw(fnew->flags)) {
2148 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2149 		if (err)
2150 			goto errout_ht;
2151 	}
2152 
2153 	if (!tc_in_hw(fnew->flags))
2154 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2155 
2156 	spin_lock(&tp->lock);
2157 
2158 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2159 	 * proto again or create new one, if necessary.
2160 	 */
2161 	if (tp->deleting) {
2162 		err = -EAGAIN;
2163 		goto errout_hw;
2164 	}
2165 
2166 	if (fold) {
2167 		/* Fold filter was deleted concurrently. Retry lookup. */
2168 		if (fold->deleted) {
2169 			err = -EAGAIN;
2170 			goto errout_hw;
2171 		}
2172 
2173 		fnew->handle = handle;
2174 
2175 		if (!in_ht) {
2176 			struct rhashtable_params params =
2177 				fnew->mask->filter_ht_params;
2178 
2179 			err = rhashtable_insert_fast(&fnew->mask->ht,
2180 						     &fnew->ht_node,
2181 						     params);
2182 			if (err)
2183 				goto errout_hw;
2184 			in_ht = true;
2185 		}
2186 
2187 		refcount_inc(&fnew->refcnt);
2188 		rhashtable_remove_fast(&fold->mask->ht,
2189 				       &fold->ht_node,
2190 				       fold->mask->filter_ht_params);
2191 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2192 		list_replace_rcu(&fold->list, &fnew->list);
2193 		fold->deleted = true;
2194 
2195 		spin_unlock(&tp->lock);
2196 
2197 		fl_mask_put(head, fold->mask);
2198 		if (!tc_skip_hw(fold->flags))
2199 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2200 		tcf_unbind_filter(tp, &fold->res);
2201 		/* Caller holds reference to fold, so refcnt is always > 0
2202 		 * after this.
2203 		 */
2204 		refcount_dec(&fold->refcnt);
2205 		__fl_put(fold);
2206 	} else {
2207 		if (handle) {
2208 			/* user specifies a handle and it doesn't exist */
2209 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2210 					    handle, GFP_ATOMIC);
2211 
2212 			/* Filter with specified handle was concurrently
2213 			 * inserted after initial check in cls_api. This is not
2214 			 * necessarily an error if NLM_F_EXCL is not set in
2215 			 * message flags. Returning EAGAIN will cause cls_api to
2216 			 * try to update concurrently inserted rule.
2217 			 */
2218 			if (err == -ENOSPC)
2219 				err = -EAGAIN;
2220 		} else {
2221 			handle = 1;
2222 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2223 					    INT_MAX, GFP_ATOMIC);
2224 		}
2225 		if (err)
2226 			goto errout_hw;
2227 
2228 		refcount_inc(&fnew->refcnt);
2229 		fnew->handle = handle;
2230 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2231 		spin_unlock(&tp->lock);
2232 	}
2233 
2234 	*arg = fnew;
2235 
2236 	kfree(tb);
2237 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2238 	return 0;
2239 
2240 errout_ht:
2241 	spin_lock(&tp->lock);
2242 errout_hw:
2243 	fnew->deleted = true;
2244 	spin_unlock(&tp->lock);
2245 	if (!tc_skip_hw(fnew->flags))
2246 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2247 	if (in_ht)
2248 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2249 				       fnew->mask->filter_ht_params);
2250 errout_mask:
2251 	fl_mask_put(head, fnew->mask);
2252 errout:
2253 	__fl_put(fnew);
2254 errout_tb:
2255 	kfree(tb);
2256 errout_mask_alloc:
2257 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2258 errout_fold:
2259 	if (fold)
2260 		__fl_put(fold);
2261 	return err;
2262 }
2263 
2264 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2265 		     bool rtnl_held, struct netlink_ext_ack *extack)
2266 {
2267 	struct cls_fl_head *head = fl_head_dereference(tp);
2268 	struct cls_fl_filter *f = arg;
2269 	bool last_on_mask;
2270 	int err = 0;
2271 
2272 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2273 	*last = list_empty(&head->masks);
2274 	__fl_put(f);
2275 
2276 	return err;
2277 }
2278 
2279 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2280 		    bool rtnl_held)
2281 {
2282 	struct cls_fl_head *head = fl_head_dereference(tp);
2283 	unsigned long id = arg->cookie, tmp;
2284 	struct cls_fl_filter *f;
2285 
2286 	arg->count = arg->skip;
2287 
2288 	rcu_read_lock();
2289 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2290 		/* don't return filters that are being deleted */
2291 		if (!refcount_inc_not_zero(&f->refcnt))
2292 			continue;
2293 		rcu_read_unlock();
2294 
2295 		if (arg->fn(tp, f, arg) < 0) {
2296 			__fl_put(f);
2297 			arg->stop = 1;
2298 			rcu_read_lock();
2299 			break;
2300 		}
2301 		__fl_put(f);
2302 		arg->count++;
2303 		rcu_read_lock();
2304 	}
2305 	rcu_read_unlock();
2306 	arg->cookie = id;
2307 }
2308 
2309 static struct cls_fl_filter *
2310 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2311 {
2312 	struct cls_fl_head *head = fl_head_dereference(tp);
2313 
2314 	spin_lock(&tp->lock);
2315 	if (list_empty(&head->hw_filters)) {
2316 		spin_unlock(&tp->lock);
2317 		return NULL;
2318 	}
2319 
2320 	if (!f)
2321 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2322 			       hw_list);
2323 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2324 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2325 			spin_unlock(&tp->lock);
2326 			return f;
2327 		}
2328 	}
2329 
2330 	spin_unlock(&tp->lock);
2331 	return NULL;
2332 }
2333 
2334 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2335 			void *cb_priv, struct netlink_ext_ack *extack)
2336 {
2337 	struct tcf_block *block = tp->chain->block;
2338 	struct flow_cls_offload cls_flower = {};
2339 	struct cls_fl_filter *f = NULL;
2340 	int err;
2341 
2342 	/* hw_filters list can only be changed by hw offload functions after
2343 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2344 	 * iterating it.
2345 	 */
2346 	ASSERT_RTNL();
2347 
2348 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2349 		cls_flower.rule =
2350 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2351 		if (!cls_flower.rule) {
2352 			__fl_put(f);
2353 			return -ENOMEM;
2354 		}
2355 
2356 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2357 					   extack);
2358 		cls_flower.command = add ?
2359 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2360 		cls_flower.cookie = (unsigned long)f;
2361 		cls_flower.rule->match.dissector = &f->mask->dissector;
2362 		cls_flower.rule->match.mask = &f->mask->key;
2363 		cls_flower.rule->match.key = &f->mkey;
2364 
2365 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
2366 		if (err) {
2367 			kfree(cls_flower.rule);
2368 			if (tc_skip_sw(f->flags)) {
2369 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2370 				__fl_put(f);
2371 				return err;
2372 			}
2373 			goto next_flow;
2374 		}
2375 
2376 		cls_flower.classid = f->res.classid;
2377 
2378 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2379 					    TC_SETUP_CLSFLOWER, &cls_flower,
2380 					    cb_priv, &f->flags,
2381 					    &f->in_hw_count);
2382 		tc_cleanup_offload_action(&cls_flower.rule->action);
2383 		kfree(cls_flower.rule);
2384 
2385 		if (err) {
2386 			__fl_put(f);
2387 			return err;
2388 		}
2389 next_flow:
2390 		__fl_put(f);
2391 	}
2392 
2393 	return 0;
2394 }
2395 
2396 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2397 {
2398 	struct flow_cls_offload *cls_flower = type_data;
2399 	struct cls_fl_filter *f =
2400 		(struct cls_fl_filter *) cls_flower->cookie;
2401 	struct cls_fl_head *head = fl_head_dereference(tp);
2402 
2403 	spin_lock(&tp->lock);
2404 	list_add(&f->hw_list, &head->hw_filters);
2405 	spin_unlock(&tp->lock);
2406 }
2407 
2408 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2409 {
2410 	struct flow_cls_offload *cls_flower = type_data;
2411 	struct cls_fl_filter *f =
2412 		(struct cls_fl_filter *) cls_flower->cookie;
2413 
2414 	spin_lock(&tp->lock);
2415 	if (!list_empty(&f->hw_list))
2416 		list_del_init(&f->hw_list);
2417 	spin_unlock(&tp->lock);
2418 }
2419 
2420 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2421 			      struct fl_flow_tmplt *tmplt)
2422 {
2423 	struct flow_cls_offload cls_flower = {};
2424 	struct tcf_block *block = chain->block;
2425 
2426 	cls_flower.rule = flow_rule_alloc(0);
2427 	if (!cls_flower.rule)
2428 		return -ENOMEM;
2429 
2430 	cls_flower.common.chain_index = chain->index;
2431 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2432 	cls_flower.cookie = (unsigned long) tmplt;
2433 	cls_flower.rule->match.dissector = &tmplt->dissector;
2434 	cls_flower.rule->match.mask = &tmplt->mask;
2435 	cls_flower.rule->match.key = &tmplt->dummy_key;
2436 
2437 	/* We don't care if driver (any of them) fails to handle this
2438 	 * call. It serves just as a hint for it.
2439 	 */
2440 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2441 	kfree(cls_flower.rule);
2442 
2443 	return 0;
2444 }
2445 
2446 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2447 				struct fl_flow_tmplt *tmplt)
2448 {
2449 	struct flow_cls_offload cls_flower = {};
2450 	struct tcf_block *block = chain->block;
2451 
2452 	cls_flower.common.chain_index = chain->index;
2453 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2454 	cls_flower.cookie = (unsigned long) tmplt;
2455 
2456 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2457 }
2458 
2459 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2460 			     struct nlattr **tca,
2461 			     struct netlink_ext_ack *extack)
2462 {
2463 	struct fl_flow_tmplt *tmplt;
2464 	struct nlattr **tb;
2465 	int err;
2466 
2467 	if (!tca[TCA_OPTIONS])
2468 		return ERR_PTR(-EINVAL);
2469 
2470 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2471 	if (!tb)
2472 		return ERR_PTR(-ENOBUFS);
2473 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2474 					  tca[TCA_OPTIONS], fl_policy, NULL);
2475 	if (err)
2476 		goto errout_tb;
2477 
2478 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2479 	if (!tmplt) {
2480 		err = -ENOMEM;
2481 		goto errout_tb;
2482 	}
2483 	tmplt->chain = chain;
2484 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2485 	if (err)
2486 		goto errout_tmplt;
2487 
2488 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2489 
2490 	err = fl_hw_create_tmplt(chain, tmplt);
2491 	if (err)
2492 		goto errout_tmplt;
2493 
2494 	kfree(tb);
2495 	return tmplt;
2496 
2497 errout_tmplt:
2498 	kfree(tmplt);
2499 errout_tb:
2500 	kfree(tb);
2501 	return ERR_PTR(err);
2502 }
2503 
2504 static void fl_tmplt_destroy(void *tmplt_priv)
2505 {
2506 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2507 
2508 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2509 	kfree(tmplt);
2510 }
2511 
2512 static int fl_dump_key_val(struct sk_buff *skb,
2513 			   void *val, int val_type,
2514 			   void *mask, int mask_type, int len)
2515 {
2516 	int err;
2517 
2518 	if (!memchr_inv(mask, 0, len))
2519 		return 0;
2520 	err = nla_put(skb, val_type, len, val);
2521 	if (err)
2522 		return err;
2523 	if (mask_type != TCA_FLOWER_UNSPEC) {
2524 		err = nla_put(skb, mask_type, len, mask);
2525 		if (err)
2526 			return err;
2527 	}
2528 	return 0;
2529 }
2530 
2531 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2532 				  struct fl_flow_key *mask)
2533 {
2534 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2535 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2536 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2537 			    sizeof(key->tp_range.tp_min.dst)) ||
2538 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2539 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2540 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2541 			    sizeof(key->tp_range.tp_max.dst)) ||
2542 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2543 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2544 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2545 			    sizeof(key->tp_range.tp_min.src)) ||
2546 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2547 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2548 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2549 			    sizeof(key->tp_range.tp_max.src)))
2550 		return -1;
2551 
2552 	return 0;
2553 }
2554 
2555 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2556 				    struct flow_dissector_key_mpls *mpls_key,
2557 				    struct flow_dissector_key_mpls *mpls_mask,
2558 				    u8 lse_index)
2559 {
2560 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2561 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2562 	int err;
2563 
2564 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2565 			 lse_index + 1);
2566 	if (err)
2567 		return err;
2568 
2569 	if (lse_mask->mpls_ttl) {
2570 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2571 				 lse_key->mpls_ttl);
2572 		if (err)
2573 			return err;
2574 	}
2575 	if (lse_mask->mpls_bos) {
2576 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2577 				 lse_key->mpls_bos);
2578 		if (err)
2579 			return err;
2580 	}
2581 	if (lse_mask->mpls_tc) {
2582 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2583 				 lse_key->mpls_tc);
2584 		if (err)
2585 			return err;
2586 	}
2587 	if (lse_mask->mpls_label) {
2588 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2589 				  lse_key->mpls_label);
2590 		if (err)
2591 			return err;
2592 	}
2593 
2594 	return 0;
2595 }
2596 
2597 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2598 				 struct flow_dissector_key_mpls *mpls_key,
2599 				 struct flow_dissector_key_mpls *mpls_mask)
2600 {
2601 	struct nlattr *opts;
2602 	struct nlattr *lse;
2603 	u8 lse_index;
2604 	int err;
2605 
2606 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2607 	if (!opts)
2608 		return -EMSGSIZE;
2609 
2610 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2611 		if (!(mpls_mask->used_lses & 1 << lse_index))
2612 			continue;
2613 
2614 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2615 		if (!lse) {
2616 			err = -EMSGSIZE;
2617 			goto err_opts;
2618 		}
2619 
2620 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2621 					       lse_index);
2622 		if (err)
2623 			goto err_opts_lse;
2624 		nla_nest_end(skb, lse);
2625 	}
2626 	nla_nest_end(skb, opts);
2627 
2628 	return 0;
2629 
2630 err_opts_lse:
2631 	nla_nest_cancel(skb, lse);
2632 err_opts:
2633 	nla_nest_cancel(skb, opts);
2634 
2635 	return err;
2636 }
2637 
2638 static int fl_dump_key_mpls(struct sk_buff *skb,
2639 			    struct flow_dissector_key_mpls *mpls_key,
2640 			    struct flow_dissector_key_mpls *mpls_mask)
2641 {
2642 	struct flow_dissector_mpls_lse *lse_mask;
2643 	struct flow_dissector_mpls_lse *lse_key;
2644 	int err;
2645 
2646 	if (!mpls_mask->used_lses)
2647 		return 0;
2648 
2649 	lse_mask = &mpls_mask->ls[0];
2650 	lse_key = &mpls_key->ls[0];
2651 
2652 	/* For backward compatibility, don't use the MPLS nested attributes if
2653 	 * the rule can be expressed using the old attributes.
2654 	 */
2655 	if (mpls_mask->used_lses & ~1 ||
2656 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2657 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2658 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2659 
2660 	if (lse_mask->mpls_ttl) {
2661 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2662 				 lse_key->mpls_ttl);
2663 		if (err)
2664 			return err;
2665 	}
2666 	if (lse_mask->mpls_tc) {
2667 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2668 				 lse_key->mpls_tc);
2669 		if (err)
2670 			return err;
2671 	}
2672 	if (lse_mask->mpls_label) {
2673 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2674 				  lse_key->mpls_label);
2675 		if (err)
2676 			return err;
2677 	}
2678 	if (lse_mask->mpls_bos) {
2679 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2680 				 lse_key->mpls_bos);
2681 		if (err)
2682 			return err;
2683 	}
2684 	return 0;
2685 }
2686 
2687 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2688 			  struct flow_dissector_key_ip *key,
2689 			  struct flow_dissector_key_ip *mask)
2690 {
2691 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2692 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2693 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2694 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2695 
2696 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2697 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2698 		return -1;
2699 
2700 	return 0;
2701 }
2702 
2703 static int fl_dump_key_vlan(struct sk_buff *skb,
2704 			    int vlan_id_key, int vlan_prio_key,
2705 			    struct flow_dissector_key_vlan *vlan_key,
2706 			    struct flow_dissector_key_vlan *vlan_mask)
2707 {
2708 	int err;
2709 
2710 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2711 		return 0;
2712 	if (vlan_mask->vlan_id) {
2713 		err = nla_put_u16(skb, vlan_id_key,
2714 				  vlan_key->vlan_id);
2715 		if (err)
2716 			return err;
2717 	}
2718 	if (vlan_mask->vlan_priority) {
2719 		err = nla_put_u8(skb, vlan_prio_key,
2720 				 vlan_key->vlan_priority);
2721 		if (err)
2722 			return err;
2723 	}
2724 	return 0;
2725 }
2726 
2727 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2728 			    u32 *flower_key, u32 *flower_mask,
2729 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2730 {
2731 	if (dissector_mask & dissector_flag_bit) {
2732 		*flower_mask |= flower_flag_bit;
2733 		if (dissector_key & dissector_flag_bit)
2734 			*flower_key |= flower_flag_bit;
2735 	}
2736 }
2737 
2738 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2739 {
2740 	u32 key, mask;
2741 	__be32 _key, _mask;
2742 	int err;
2743 
2744 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2745 		return 0;
2746 
2747 	key = 0;
2748 	mask = 0;
2749 
2750 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2751 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2752 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2753 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2754 			FLOW_DIS_FIRST_FRAG);
2755 
2756 	_key = cpu_to_be32(key);
2757 	_mask = cpu_to_be32(mask);
2758 
2759 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2760 	if (err)
2761 		return err;
2762 
2763 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2764 }
2765 
2766 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2767 				  struct flow_dissector_key_enc_opts *enc_opts)
2768 {
2769 	struct geneve_opt *opt;
2770 	struct nlattr *nest;
2771 	int opt_off = 0;
2772 
2773 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2774 	if (!nest)
2775 		goto nla_put_failure;
2776 
2777 	while (enc_opts->len > opt_off) {
2778 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2779 
2780 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2781 				 opt->opt_class))
2782 			goto nla_put_failure;
2783 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2784 			       opt->type))
2785 			goto nla_put_failure;
2786 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2787 			    opt->length * 4, opt->opt_data))
2788 			goto nla_put_failure;
2789 
2790 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2791 	}
2792 	nla_nest_end(skb, nest);
2793 	return 0;
2794 
2795 nla_put_failure:
2796 	nla_nest_cancel(skb, nest);
2797 	return -EMSGSIZE;
2798 }
2799 
2800 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2801 				 struct flow_dissector_key_enc_opts *enc_opts)
2802 {
2803 	struct vxlan_metadata *md;
2804 	struct nlattr *nest;
2805 
2806 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2807 	if (!nest)
2808 		goto nla_put_failure;
2809 
2810 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2811 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2812 		goto nla_put_failure;
2813 
2814 	nla_nest_end(skb, nest);
2815 	return 0;
2816 
2817 nla_put_failure:
2818 	nla_nest_cancel(skb, nest);
2819 	return -EMSGSIZE;
2820 }
2821 
2822 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2823 				  struct flow_dissector_key_enc_opts *enc_opts)
2824 {
2825 	struct erspan_metadata *md;
2826 	struct nlattr *nest;
2827 
2828 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2829 	if (!nest)
2830 		goto nla_put_failure;
2831 
2832 	md = (struct erspan_metadata *)&enc_opts->data[0];
2833 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2834 		goto nla_put_failure;
2835 
2836 	if (md->version == 1 &&
2837 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2838 		goto nla_put_failure;
2839 
2840 	if (md->version == 2 &&
2841 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2842 			md->u.md2.dir) ||
2843 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2844 			get_hwid(&md->u.md2))))
2845 		goto nla_put_failure;
2846 
2847 	nla_nest_end(skb, nest);
2848 	return 0;
2849 
2850 nla_put_failure:
2851 	nla_nest_cancel(skb, nest);
2852 	return -EMSGSIZE;
2853 }
2854 
2855 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2856 			       struct flow_dissector_key_enc_opts *enc_opts)
2857 
2858 {
2859 	struct gtp_pdu_session_info *session_info;
2860 	struct nlattr *nest;
2861 
2862 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2863 	if (!nest)
2864 		goto nla_put_failure;
2865 
2866 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2867 
2868 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2869 		       session_info->pdu_type))
2870 		goto nla_put_failure;
2871 
2872 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2873 		goto nla_put_failure;
2874 
2875 	nla_nest_end(skb, nest);
2876 	return 0;
2877 
2878 nla_put_failure:
2879 	nla_nest_cancel(skb, nest);
2880 	return -EMSGSIZE;
2881 }
2882 
2883 static int fl_dump_key_ct(struct sk_buff *skb,
2884 			  struct flow_dissector_key_ct *key,
2885 			  struct flow_dissector_key_ct *mask)
2886 {
2887 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2888 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2889 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2890 			    sizeof(key->ct_state)))
2891 		goto nla_put_failure;
2892 
2893 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2894 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2895 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2896 			    sizeof(key->ct_zone)))
2897 		goto nla_put_failure;
2898 
2899 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2900 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2901 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2902 			    sizeof(key->ct_mark)))
2903 		goto nla_put_failure;
2904 
2905 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2906 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2907 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2908 			    sizeof(key->ct_labels)))
2909 		goto nla_put_failure;
2910 
2911 	return 0;
2912 
2913 nla_put_failure:
2914 	return -EMSGSIZE;
2915 }
2916 
2917 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2918 			       struct flow_dissector_key_enc_opts *enc_opts)
2919 {
2920 	struct nlattr *nest;
2921 	int err;
2922 
2923 	if (!enc_opts->len)
2924 		return 0;
2925 
2926 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2927 	if (!nest)
2928 		goto nla_put_failure;
2929 
2930 	switch (enc_opts->dst_opt_type) {
2931 	case TUNNEL_GENEVE_OPT:
2932 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2933 		if (err)
2934 			goto nla_put_failure;
2935 		break;
2936 	case TUNNEL_VXLAN_OPT:
2937 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2938 		if (err)
2939 			goto nla_put_failure;
2940 		break;
2941 	case TUNNEL_ERSPAN_OPT:
2942 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2943 		if (err)
2944 			goto nla_put_failure;
2945 		break;
2946 	case TUNNEL_GTP_OPT:
2947 		err = fl_dump_key_gtp_opt(skb, enc_opts);
2948 		if (err)
2949 			goto nla_put_failure;
2950 		break;
2951 	default:
2952 		goto nla_put_failure;
2953 	}
2954 	nla_nest_end(skb, nest);
2955 	return 0;
2956 
2957 nla_put_failure:
2958 	nla_nest_cancel(skb, nest);
2959 	return -EMSGSIZE;
2960 }
2961 
2962 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2963 			       struct flow_dissector_key_enc_opts *key_opts,
2964 			       struct flow_dissector_key_enc_opts *msk_opts)
2965 {
2966 	int err;
2967 
2968 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2969 	if (err)
2970 		return err;
2971 
2972 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2973 }
2974 
2975 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2976 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2977 {
2978 	if (mask->meta.ingress_ifindex) {
2979 		struct net_device *dev;
2980 
2981 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2982 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2983 			goto nla_put_failure;
2984 	}
2985 
2986 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2987 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2988 			    sizeof(key->eth.dst)) ||
2989 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2990 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2991 			    sizeof(key->eth.src)) ||
2992 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2993 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2994 			    sizeof(key->basic.n_proto)))
2995 		goto nla_put_failure;
2996 
2997 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2998 		goto nla_put_failure;
2999 
3000 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3001 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3002 		goto nla_put_failure;
3003 
3004 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3005 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3006 			     &key->cvlan, &mask->cvlan) ||
3007 	    (mask->cvlan.vlan_tpid &&
3008 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3009 			  key->cvlan.vlan_tpid)))
3010 		goto nla_put_failure;
3011 
3012 	if (mask->basic.n_proto) {
3013 		if (mask->cvlan.vlan_eth_type) {
3014 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3015 					 key->basic.n_proto))
3016 				goto nla_put_failure;
3017 		} else if (mask->vlan.vlan_eth_type) {
3018 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3019 					 key->vlan.vlan_eth_type))
3020 				goto nla_put_failure;
3021 		}
3022 	}
3023 
3024 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3025 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3026 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3027 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3028 			    sizeof(key->basic.ip_proto)) ||
3029 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3030 		goto nla_put_failure;
3031 
3032 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3033 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3034 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3035 			     sizeof(key->ipv4.src)) ||
3036 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3037 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3038 			     sizeof(key->ipv4.dst))))
3039 		goto nla_put_failure;
3040 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3041 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3042 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3043 				  sizeof(key->ipv6.src)) ||
3044 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3045 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3046 				  sizeof(key->ipv6.dst))))
3047 		goto nla_put_failure;
3048 
3049 	if (key->basic.ip_proto == IPPROTO_TCP &&
3050 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3051 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3052 			     sizeof(key->tp.src)) ||
3053 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3054 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3055 			     sizeof(key->tp.dst)) ||
3056 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3057 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3058 			     sizeof(key->tcp.flags))))
3059 		goto nla_put_failure;
3060 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3061 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3062 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3063 				  sizeof(key->tp.src)) ||
3064 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3065 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3066 				  sizeof(key->tp.dst))))
3067 		goto nla_put_failure;
3068 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3069 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3070 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3071 				  sizeof(key->tp.src)) ||
3072 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3073 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3074 				  sizeof(key->tp.dst))))
3075 		goto nla_put_failure;
3076 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3077 		 key->basic.ip_proto == IPPROTO_ICMP &&
3078 		 (fl_dump_key_val(skb, &key->icmp.type,
3079 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3080 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3081 				  sizeof(key->icmp.type)) ||
3082 		  fl_dump_key_val(skb, &key->icmp.code,
3083 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3084 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3085 				  sizeof(key->icmp.code))))
3086 		goto nla_put_failure;
3087 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3088 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3089 		 (fl_dump_key_val(skb, &key->icmp.type,
3090 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3091 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3092 				  sizeof(key->icmp.type)) ||
3093 		  fl_dump_key_val(skb, &key->icmp.code,
3094 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3095 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3096 				  sizeof(key->icmp.code))))
3097 		goto nla_put_failure;
3098 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3099 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3100 		 (fl_dump_key_val(skb, &key->arp.sip,
3101 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3102 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3103 				  sizeof(key->arp.sip)) ||
3104 		  fl_dump_key_val(skb, &key->arp.tip,
3105 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3106 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3107 				  sizeof(key->arp.tip)) ||
3108 		  fl_dump_key_val(skb, &key->arp.op,
3109 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3110 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3111 				  sizeof(key->arp.op)) ||
3112 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3113 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3114 				  sizeof(key->arp.sha)) ||
3115 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3116 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3117 				  sizeof(key->arp.tha))))
3118 		goto nla_put_failure;
3119 
3120 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3121 	     key->basic.ip_proto == IPPROTO_UDP ||
3122 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3123 	     fl_dump_key_port_range(skb, key, mask))
3124 		goto nla_put_failure;
3125 
3126 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3127 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3128 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3129 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3130 			    sizeof(key->enc_ipv4.src)) ||
3131 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3132 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3133 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3134 			     sizeof(key->enc_ipv4.dst))))
3135 		goto nla_put_failure;
3136 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3137 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3138 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3139 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3140 			    sizeof(key->enc_ipv6.src)) ||
3141 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3142 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3143 				 &mask->enc_ipv6.dst,
3144 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3145 			    sizeof(key->enc_ipv6.dst))))
3146 		goto nla_put_failure;
3147 
3148 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3149 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3150 			    sizeof(key->enc_key_id)) ||
3151 	    fl_dump_key_val(skb, &key->enc_tp.src,
3152 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3153 			    &mask->enc_tp.src,
3154 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3155 			    sizeof(key->enc_tp.src)) ||
3156 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3157 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3158 			    &mask->enc_tp.dst,
3159 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3160 			    sizeof(key->enc_tp.dst)) ||
3161 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3162 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3163 		goto nla_put_failure;
3164 
3165 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3166 		goto nla_put_failure;
3167 
3168 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3169 		goto nla_put_failure;
3170 
3171 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3172 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3173 			     sizeof(key->hash.hash)))
3174 		goto nla_put_failure;
3175 
3176 	return 0;
3177 
3178 nla_put_failure:
3179 	return -EMSGSIZE;
3180 }
3181 
3182 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3183 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3184 {
3185 	struct cls_fl_filter *f = fh;
3186 	struct nlattr *nest;
3187 	struct fl_flow_key *key, *mask;
3188 	bool skip_hw;
3189 
3190 	if (!f)
3191 		return skb->len;
3192 
3193 	t->tcm_handle = f->handle;
3194 
3195 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3196 	if (!nest)
3197 		goto nla_put_failure;
3198 
3199 	spin_lock(&tp->lock);
3200 
3201 	if (f->res.classid &&
3202 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3203 		goto nla_put_failure_locked;
3204 
3205 	key = &f->key;
3206 	mask = &f->mask->key;
3207 	skip_hw = tc_skip_hw(f->flags);
3208 
3209 	if (fl_dump_key(skb, net, key, mask))
3210 		goto nla_put_failure_locked;
3211 
3212 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3213 		goto nla_put_failure_locked;
3214 
3215 	spin_unlock(&tp->lock);
3216 
3217 	if (!skip_hw)
3218 		fl_hw_update_stats(tp, f, rtnl_held);
3219 
3220 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3221 		goto nla_put_failure;
3222 
3223 	if (tcf_exts_dump(skb, &f->exts))
3224 		goto nla_put_failure;
3225 
3226 	nla_nest_end(skb, nest);
3227 
3228 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3229 		goto nla_put_failure;
3230 
3231 	return skb->len;
3232 
3233 nla_put_failure_locked:
3234 	spin_unlock(&tp->lock);
3235 nla_put_failure:
3236 	nla_nest_cancel(skb, nest);
3237 	return -1;
3238 }
3239 
3240 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3241 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3242 {
3243 	struct cls_fl_filter *f = fh;
3244 	struct nlattr *nest;
3245 	bool skip_hw;
3246 
3247 	if (!f)
3248 		return skb->len;
3249 
3250 	t->tcm_handle = f->handle;
3251 
3252 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3253 	if (!nest)
3254 		goto nla_put_failure;
3255 
3256 	spin_lock(&tp->lock);
3257 
3258 	skip_hw = tc_skip_hw(f->flags);
3259 
3260 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3261 		goto nla_put_failure_locked;
3262 
3263 	spin_unlock(&tp->lock);
3264 
3265 	if (!skip_hw)
3266 		fl_hw_update_stats(tp, f, rtnl_held);
3267 
3268 	if (tcf_exts_terse_dump(skb, &f->exts))
3269 		goto nla_put_failure;
3270 
3271 	nla_nest_end(skb, nest);
3272 
3273 	return skb->len;
3274 
3275 nla_put_failure_locked:
3276 	spin_unlock(&tp->lock);
3277 nla_put_failure:
3278 	nla_nest_cancel(skb, nest);
3279 	return -1;
3280 }
3281 
3282 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3283 {
3284 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3285 	struct fl_flow_key *key, *mask;
3286 	struct nlattr *nest;
3287 
3288 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3289 	if (!nest)
3290 		goto nla_put_failure;
3291 
3292 	key = &tmplt->dummy_key;
3293 	mask = &tmplt->mask;
3294 
3295 	if (fl_dump_key(skb, net, key, mask))
3296 		goto nla_put_failure;
3297 
3298 	nla_nest_end(skb, nest);
3299 
3300 	return skb->len;
3301 
3302 nla_put_failure:
3303 	nla_nest_cancel(skb, nest);
3304 	return -EMSGSIZE;
3305 }
3306 
3307 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3308 			  unsigned long base)
3309 {
3310 	struct cls_fl_filter *f = fh;
3311 
3312 	if (f && f->res.classid == classid) {
3313 		if (cl)
3314 			__tcf_bind_filter(q, &f->res, base);
3315 		else
3316 			__tcf_unbind_filter(q, &f->res);
3317 	}
3318 }
3319 
3320 static bool fl_delete_empty(struct tcf_proto *tp)
3321 {
3322 	struct cls_fl_head *head = fl_head_dereference(tp);
3323 
3324 	spin_lock(&tp->lock);
3325 	tp->deleting = idr_is_empty(&head->handle_idr);
3326 	spin_unlock(&tp->lock);
3327 
3328 	return tp->deleting;
3329 }
3330 
3331 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3332 	.kind		= "flower",
3333 	.classify	= fl_classify,
3334 	.init		= fl_init,
3335 	.destroy	= fl_destroy,
3336 	.get		= fl_get,
3337 	.put		= fl_put,
3338 	.change		= fl_change,
3339 	.delete		= fl_delete,
3340 	.delete_empty	= fl_delete_empty,
3341 	.walk		= fl_walk,
3342 	.reoffload	= fl_reoffload,
3343 	.hw_add		= fl_hw_add,
3344 	.hw_del		= fl_hw_del,
3345 	.dump		= fl_dump,
3346 	.terse_dump	= fl_terse_dump,
3347 	.bind_class	= fl_bind_class,
3348 	.tmplt_create	= fl_tmplt_create,
3349 	.tmplt_destroy	= fl_tmplt_destroy,
3350 	.tmplt_dump	= fl_tmplt_dump,
3351 	.owner		= THIS_MODULE,
3352 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3353 };
3354 
3355 static int __init cls_fl_init(void)
3356 {
3357 	return register_tcf_proto_ops(&cls_fl_ops);
3358 }
3359 
3360 static void __exit cls_fl_exit(void)
3361 {
3362 	unregister_tcf_proto_ops(&cls_fl_ops);
3363 }
3364 
3365 module_init(cls_fl_init);
3366 module_exit(cls_fl_exit);
3367 
3368 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3369 MODULE_DESCRIPTION("Flower classifier");
3370 MODULE_LICENSE("GPL v2");
3371