xref: /openbmc/linux/net/sched/cls_flower.c (revision 400c2a45)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 struct fl_flow_key {
34 	struct flow_dissector_key_meta meta;
35 	struct flow_dissector_key_control control;
36 	struct flow_dissector_key_control enc_control;
37 	struct flow_dissector_key_basic basic;
38 	struct flow_dissector_key_eth_addrs eth;
39 	struct flow_dissector_key_vlan vlan;
40 	struct flow_dissector_key_vlan cvlan;
41 	union {
42 		struct flow_dissector_key_ipv4_addrs ipv4;
43 		struct flow_dissector_key_ipv6_addrs ipv6;
44 	};
45 	struct flow_dissector_key_ports tp;
46 	struct flow_dissector_key_icmp icmp;
47 	struct flow_dissector_key_arp arp;
48 	struct flow_dissector_key_keyid enc_key_id;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 	};
53 	struct flow_dissector_key_ports enc_tp;
54 	struct flow_dissector_key_mpls mpls;
55 	struct flow_dissector_key_tcp tcp;
56 	struct flow_dissector_key_ip ip;
57 	struct flow_dissector_key_ip enc_ip;
58 	struct flow_dissector_key_enc_opts enc_opts;
59 	union {
60 		struct flow_dissector_key_ports tp;
61 		struct {
62 			struct flow_dissector_key_ports tp_min;
63 			struct flow_dissector_key_ports tp_max;
64 		};
65 	} tp_range;
66 	struct flow_dissector_key_ct ct;
67 	struct flow_dissector_key_hash hash;
68 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
69 
70 struct fl_flow_mask_range {
71 	unsigned short int start;
72 	unsigned short int end;
73 };
74 
75 struct fl_flow_mask {
76 	struct fl_flow_key key;
77 	struct fl_flow_mask_range range;
78 	u32 flags;
79 	struct rhash_head ht_node;
80 	struct rhashtable ht;
81 	struct rhashtable_params filter_ht_params;
82 	struct flow_dissector dissector;
83 	struct list_head filters;
84 	struct rcu_work rwork;
85 	struct list_head list;
86 	refcount_t refcnt;
87 };
88 
89 struct fl_flow_tmplt {
90 	struct fl_flow_key dummy_key;
91 	struct fl_flow_key mask;
92 	struct flow_dissector dissector;
93 	struct tcf_chain *chain;
94 };
95 
96 struct cls_fl_head {
97 	struct rhashtable ht;
98 	spinlock_t masks_lock; /* Protect masks list */
99 	struct list_head masks;
100 	struct list_head hw_filters;
101 	struct rcu_work rwork;
102 	struct idr handle_idr;
103 };
104 
105 struct cls_fl_filter {
106 	struct fl_flow_mask *mask;
107 	struct rhash_head ht_node;
108 	struct fl_flow_key mkey;
109 	struct tcf_exts exts;
110 	struct tcf_result res;
111 	struct fl_flow_key key;
112 	struct list_head list;
113 	struct list_head hw_list;
114 	u32 handle;
115 	u32 flags;
116 	u32 in_hw_count;
117 	struct rcu_work rwork;
118 	struct net_device *hw_dev;
119 	/* Flower classifier is unlocked, which means that its reference counter
120 	 * can be changed concurrently without any kind of external
121 	 * synchronization. Use atomic reference counter to be concurrency-safe.
122 	 */
123 	refcount_t refcnt;
124 	bool deleted;
125 };
126 
127 static const struct rhashtable_params mask_ht_params = {
128 	.key_offset = offsetof(struct fl_flow_mask, key),
129 	.key_len = sizeof(struct fl_flow_key),
130 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
131 	.automatic_shrinking = true,
132 };
133 
134 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
135 {
136 	return mask->range.end - mask->range.start;
137 }
138 
139 static void fl_mask_update_range(struct fl_flow_mask *mask)
140 {
141 	const u8 *bytes = (const u8 *) &mask->key;
142 	size_t size = sizeof(mask->key);
143 	size_t i, first = 0, last;
144 
145 	for (i = 0; i < size; i++) {
146 		if (bytes[i]) {
147 			first = i;
148 			break;
149 		}
150 	}
151 	last = first;
152 	for (i = size - 1; i != first; i--) {
153 		if (bytes[i]) {
154 			last = i;
155 			break;
156 		}
157 	}
158 	mask->range.start = rounddown(first, sizeof(long));
159 	mask->range.end = roundup(last + 1, sizeof(long));
160 }
161 
162 static void *fl_key_get_start(struct fl_flow_key *key,
163 			      const struct fl_flow_mask *mask)
164 {
165 	return (u8 *) key + mask->range.start;
166 }
167 
168 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
169 			      struct fl_flow_mask *mask)
170 {
171 	const long *lkey = fl_key_get_start(key, mask);
172 	const long *lmask = fl_key_get_start(&mask->key, mask);
173 	long *lmkey = fl_key_get_start(mkey, mask);
174 	int i;
175 
176 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
177 		*lmkey++ = *lkey++ & *lmask++;
178 }
179 
180 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
181 			       struct fl_flow_mask *mask)
182 {
183 	const long *lmask = fl_key_get_start(&mask->key, mask);
184 	const long *ltmplt;
185 	int i;
186 
187 	if (!tmplt)
188 		return true;
189 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
190 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
191 		if (~*ltmplt++ & *lmask++)
192 			return false;
193 	}
194 	return true;
195 }
196 
197 static void fl_clear_masked_range(struct fl_flow_key *key,
198 				  struct fl_flow_mask *mask)
199 {
200 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
201 }
202 
203 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
204 				  struct fl_flow_key *key,
205 				  struct fl_flow_key *mkey)
206 {
207 	__be16 min_mask, max_mask, min_val, max_val;
208 
209 	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
210 	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
211 	min_val = htons(filter->key.tp_range.tp_min.dst);
212 	max_val = htons(filter->key.tp_range.tp_max.dst);
213 
214 	if (min_mask && max_mask) {
215 		if (htons(key->tp_range.tp.dst) < min_val ||
216 		    htons(key->tp_range.tp.dst) > max_val)
217 			return false;
218 
219 		/* skb does not have min and max values */
220 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
221 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
222 	}
223 	return true;
224 }
225 
226 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
227 				  struct fl_flow_key *key,
228 				  struct fl_flow_key *mkey)
229 {
230 	__be16 min_mask, max_mask, min_val, max_val;
231 
232 	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
233 	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
234 	min_val = htons(filter->key.tp_range.tp_min.src);
235 	max_val = htons(filter->key.tp_range.tp_max.src);
236 
237 	if (min_mask && max_mask) {
238 		if (htons(key->tp_range.tp.src) < min_val ||
239 		    htons(key->tp_range.tp.src) > max_val)
240 			return false;
241 
242 		/* skb does not have min and max values */
243 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
244 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
245 	}
246 	return true;
247 }
248 
249 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
250 					 struct fl_flow_key *mkey)
251 {
252 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
253 				      mask->filter_ht_params);
254 }
255 
256 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
257 					     struct fl_flow_key *mkey,
258 					     struct fl_flow_key *key)
259 {
260 	struct cls_fl_filter *filter, *f;
261 
262 	list_for_each_entry_rcu(filter, &mask->filters, list) {
263 		if (!fl_range_port_dst_cmp(filter, key, mkey))
264 			continue;
265 
266 		if (!fl_range_port_src_cmp(filter, key, mkey))
267 			continue;
268 
269 		f = __fl_lookup(mask, mkey);
270 		if (f)
271 			return f;
272 	}
273 	return NULL;
274 }
275 
276 static noinline_for_stack
277 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
278 {
279 	struct fl_flow_key mkey;
280 
281 	fl_set_masked_key(&mkey, key, mask);
282 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
283 		return fl_lookup_range(mask, &mkey, key);
284 
285 	return __fl_lookup(mask, &mkey);
286 }
287 
288 static u16 fl_ct_info_to_flower_map[] = {
289 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
290 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
291 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
292 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
293 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
294 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
295 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
297 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
299 };
300 
301 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
302 		       struct tcf_result *res)
303 {
304 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
305 	struct fl_flow_key skb_key;
306 	struct fl_flow_mask *mask;
307 	struct cls_fl_filter *f;
308 
309 	list_for_each_entry_rcu(mask, &head->masks, list) {
310 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
311 		fl_clear_masked_range(&skb_key, mask);
312 
313 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
314 		/* skb_flow_dissect() does not set n_proto in case an unknown
315 		 * protocol, so do it rather here.
316 		 */
317 		skb_key.basic.n_proto = skb_protocol(skb, false);
318 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
319 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
320 				    fl_ct_info_to_flower_map,
321 				    ARRAY_SIZE(fl_ct_info_to_flower_map));
322 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
323 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
324 
325 		f = fl_mask_lookup(mask, &skb_key);
326 		if (f && !tc_skip_sw(f->flags)) {
327 			*res = f->res;
328 			return tcf_exts_exec(skb, &f->exts, res);
329 		}
330 	}
331 	return -1;
332 }
333 
334 static int fl_init(struct tcf_proto *tp)
335 {
336 	struct cls_fl_head *head;
337 
338 	head = kzalloc(sizeof(*head), GFP_KERNEL);
339 	if (!head)
340 		return -ENOBUFS;
341 
342 	spin_lock_init(&head->masks_lock);
343 	INIT_LIST_HEAD_RCU(&head->masks);
344 	INIT_LIST_HEAD(&head->hw_filters);
345 	rcu_assign_pointer(tp->root, head);
346 	idr_init(&head->handle_idr);
347 
348 	return rhashtable_init(&head->ht, &mask_ht_params);
349 }
350 
351 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
352 {
353 	/* temporary masks don't have their filters list and ht initialized */
354 	if (mask_init_done) {
355 		WARN_ON(!list_empty(&mask->filters));
356 		rhashtable_destroy(&mask->ht);
357 	}
358 	kfree(mask);
359 }
360 
361 static void fl_mask_free_work(struct work_struct *work)
362 {
363 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
364 						 struct fl_flow_mask, rwork);
365 
366 	fl_mask_free(mask, true);
367 }
368 
369 static void fl_uninit_mask_free_work(struct work_struct *work)
370 {
371 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
372 						 struct fl_flow_mask, rwork);
373 
374 	fl_mask_free(mask, false);
375 }
376 
377 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
378 {
379 	if (!refcount_dec_and_test(&mask->refcnt))
380 		return false;
381 
382 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
383 
384 	spin_lock(&head->masks_lock);
385 	list_del_rcu(&mask->list);
386 	spin_unlock(&head->masks_lock);
387 
388 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
389 
390 	return true;
391 }
392 
393 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
394 {
395 	/* Flower classifier only changes root pointer during init and destroy.
396 	 * Users must obtain reference to tcf_proto instance before calling its
397 	 * API, so tp->root pointer is protected from concurrent call to
398 	 * fl_destroy() by reference counting.
399 	 */
400 	return rcu_dereference_raw(tp->root);
401 }
402 
403 static void __fl_destroy_filter(struct cls_fl_filter *f)
404 {
405 	tcf_exts_destroy(&f->exts);
406 	tcf_exts_put_net(&f->exts);
407 	kfree(f);
408 }
409 
410 static void fl_destroy_filter_work(struct work_struct *work)
411 {
412 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
413 					struct cls_fl_filter, rwork);
414 
415 	__fl_destroy_filter(f);
416 }
417 
418 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
419 				 bool rtnl_held, struct netlink_ext_ack *extack)
420 {
421 	struct tcf_block *block = tp->chain->block;
422 	struct flow_cls_offload cls_flower = {};
423 
424 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
425 	cls_flower.command = FLOW_CLS_DESTROY;
426 	cls_flower.cookie = (unsigned long) f;
427 
428 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
429 			    &f->flags, &f->in_hw_count, rtnl_held);
430 
431 }
432 
433 static int fl_hw_replace_filter(struct tcf_proto *tp,
434 				struct cls_fl_filter *f, bool rtnl_held,
435 				struct netlink_ext_ack *extack)
436 {
437 	struct tcf_block *block = tp->chain->block;
438 	struct flow_cls_offload cls_flower = {};
439 	bool skip_sw = tc_skip_sw(f->flags);
440 	int err = 0;
441 
442 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
443 	if (!cls_flower.rule)
444 		return -ENOMEM;
445 
446 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
447 	cls_flower.command = FLOW_CLS_REPLACE;
448 	cls_flower.cookie = (unsigned long) f;
449 	cls_flower.rule->match.dissector = &f->mask->dissector;
450 	cls_flower.rule->match.mask = &f->mask->key;
451 	cls_flower.rule->match.key = &f->mkey;
452 	cls_flower.classid = f->res.classid;
453 
454 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
455 	if (err) {
456 		kfree(cls_flower.rule);
457 		if (skip_sw) {
458 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
459 			return err;
460 		}
461 		return 0;
462 	}
463 
464 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
465 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
466 	tc_cleanup_flow_action(&cls_flower.rule->action);
467 	kfree(cls_flower.rule);
468 
469 	if (err) {
470 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
471 		return err;
472 	}
473 
474 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
475 		return -EINVAL;
476 
477 	return 0;
478 }
479 
480 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
481 			       bool rtnl_held)
482 {
483 	struct tcf_block *block = tp->chain->block;
484 	struct flow_cls_offload cls_flower = {};
485 
486 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
487 	cls_flower.command = FLOW_CLS_STATS;
488 	cls_flower.cookie = (unsigned long) f;
489 	cls_flower.classid = f->res.classid;
490 
491 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
492 			 rtnl_held);
493 
494 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
495 			      cls_flower.stats.pkts,
496 			      cls_flower.stats.drops,
497 			      cls_flower.stats.lastused,
498 			      cls_flower.stats.used_hw_stats,
499 			      cls_flower.stats.used_hw_stats_valid);
500 }
501 
502 static void __fl_put(struct cls_fl_filter *f)
503 {
504 	if (!refcount_dec_and_test(&f->refcnt))
505 		return;
506 
507 	if (tcf_exts_get_net(&f->exts))
508 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
509 	else
510 		__fl_destroy_filter(f);
511 }
512 
513 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
514 {
515 	struct cls_fl_filter *f;
516 
517 	rcu_read_lock();
518 	f = idr_find(&head->handle_idr, handle);
519 	if (f && !refcount_inc_not_zero(&f->refcnt))
520 		f = NULL;
521 	rcu_read_unlock();
522 
523 	return f;
524 }
525 
526 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
527 		       bool *last, bool rtnl_held,
528 		       struct netlink_ext_ack *extack)
529 {
530 	struct cls_fl_head *head = fl_head_dereference(tp);
531 
532 	*last = false;
533 
534 	spin_lock(&tp->lock);
535 	if (f->deleted) {
536 		spin_unlock(&tp->lock);
537 		return -ENOENT;
538 	}
539 
540 	f->deleted = true;
541 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
542 			       f->mask->filter_ht_params);
543 	idr_remove(&head->handle_idr, f->handle);
544 	list_del_rcu(&f->list);
545 	spin_unlock(&tp->lock);
546 
547 	*last = fl_mask_put(head, f->mask);
548 	if (!tc_skip_hw(f->flags))
549 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
550 	tcf_unbind_filter(tp, &f->res);
551 	__fl_put(f);
552 
553 	return 0;
554 }
555 
556 static void fl_destroy_sleepable(struct work_struct *work)
557 {
558 	struct cls_fl_head *head = container_of(to_rcu_work(work),
559 						struct cls_fl_head,
560 						rwork);
561 
562 	rhashtable_destroy(&head->ht);
563 	kfree(head);
564 	module_put(THIS_MODULE);
565 }
566 
567 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
568 		       struct netlink_ext_ack *extack)
569 {
570 	struct cls_fl_head *head = fl_head_dereference(tp);
571 	struct fl_flow_mask *mask, *next_mask;
572 	struct cls_fl_filter *f, *next;
573 	bool last;
574 
575 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
576 		list_for_each_entry_safe(f, next, &mask->filters, list) {
577 			__fl_delete(tp, f, &last, rtnl_held, extack);
578 			if (last)
579 				break;
580 		}
581 	}
582 	idr_destroy(&head->handle_idr);
583 
584 	__module_get(THIS_MODULE);
585 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
586 }
587 
588 static void fl_put(struct tcf_proto *tp, void *arg)
589 {
590 	struct cls_fl_filter *f = arg;
591 
592 	__fl_put(f);
593 }
594 
595 static void *fl_get(struct tcf_proto *tp, u32 handle)
596 {
597 	struct cls_fl_head *head = fl_head_dereference(tp);
598 
599 	return __fl_get(head, handle);
600 }
601 
602 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
603 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
604 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
605 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
606 					    .len = IFNAMSIZ },
607 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
608 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
609 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
610 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
611 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
612 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
613 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
614 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
615 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
616 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
617 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
618 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
619 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
620 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
621 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
622 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
623 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
624 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
625 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
626 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
627 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
628 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
629 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
630 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
631 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
632 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
633 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
634 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
635 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
636 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
637 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
641 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
642 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
650 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
651 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
652 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
653 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
654 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
655 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
656 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
657 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
658 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
659 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
660 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
661 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
662 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
663 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
666 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
667 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
668 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
669 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
672 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
673 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
674 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
675 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
676 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
681 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
683 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
688 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
689 	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
690 	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
691 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
692 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
693 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
694 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
695 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
696 					    .len = 128 / BITS_PER_BYTE },
697 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
698 					    .len = 128 / BITS_PER_BYTE },
699 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
700 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
701 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
702 
703 };
704 
705 static const struct nla_policy
706 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
707 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
708 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
709 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
710 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
711 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
712 };
713 
714 static const struct nla_policy
715 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
716 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
717 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
718 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
719 						       .len = 128 },
720 };
721 
722 static const struct nla_policy
723 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
724 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
725 };
726 
727 static const struct nla_policy
728 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
729 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
730 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
731 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
732 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
733 };
734 
735 static const struct nla_policy
736 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
737 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
738 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
739 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
740 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
741 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
742 };
743 
744 static void fl_set_key_val(struct nlattr **tb,
745 			   void *val, int val_type,
746 			   void *mask, int mask_type, int len)
747 {
748 	if (!tb[val_type])
749 		return;
750 	nla_memcpy(val, tb[val_type], len);
751 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
752 		memset(mask, 0xff, len);
753 	else
754 		nla_memcpy(mask, tb[mask_type], len);
755 }
756 
757 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
758 				 struct fl_flow_key *mask,
759 				 struct netlink_ext_ack *extack)
760 {
761 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
762 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
763 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
764 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
765 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
766 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
767 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
768 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
769 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
770 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
771 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
772 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
773 
774 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
775 	    htons(key->tp_range.tp_max.dst) <=
776 	    htons(key->tp_range.tp_min.dst)) {
777 		NL_SET_ERR_MSG_ATTR(extack,
778 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
779 				    "Invalid destination port range (min must be strictly smaller than max)");
780 		return -EINVAL;
781 	}
782 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
783 	    htons(key->tp_range.tp_max.src) <=
784 	    htons(key->tp_range.tp_min.src)) {
785 		NL_SET_ERR_MSG_ATTR(extack,
786 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
787 				    "Invalid source port range (min must be strictly smaller than max)");
788 		return -EINVAL;
789 	}
790 
791 	return 0;
792 }
793 
794 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
795 			       struct flow_dissector_key_mpls *key_val,
796 			       struct flow_dissector_key_mpls *key_mask,
797 			       struct netlink_ext_ack *extack)
798 {
799 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
800 	struct flow_dissector_mpls_lse *lse_mask;
801 	struct flow_dissector_mpls_lse *lse_val;
802 	u8 lse_index;
803 	u8 depth;
804 	int err;
805 
806 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
807 			       mpls_stack_entry_policy, extack);
808 	if (err < 0)
809 		return err;
810 
811 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
812 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
813 		return -EINVAL;
814 	}
815 
816 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
817 
818 	/* LSE depth starts at 1, for consistency with terminology used by
819 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
820 	 */
821 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
822 		NL_SET_ERR_MSG_ATTR(extack,
823 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
824 				    "Invalid MPLS depth");
825 		return -EINVAL;
826 	}
827 	lse_index = depth - 1;
828 
829 	dissector_set_mpls_lse(key_val, lse_index);
830 	dissector_set_mpls_lse(key_mask, lse_index);
831 
832 	lse_val = &key_val->ls[lse_index];
833 	lse_mask = &key_mask->ls[lse_index];
834 
835 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
836 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
837 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
838 	}
839 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
840 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
841 
842 		if (bos & ~MPLS_BOS_MASK) {
843 			NL_SET_ERR_MSG_ATTR(extack,
844 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
845 					    "Bottom Of Stack (BOS) must be 0 or 1");
846 			return -EINVAL;
847 		}
848 		lse_val->mpls_bos = bos;
849 		lse_mask->mpls_bos = MPLS_BOS_MASK;
850 	}
851 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
852 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
853 
854 		if (tc & ~MPLS_TC_MASK) {
855 			NL_SET_ERR_MSG_ATTR(extack,
856 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
857 					    "Traffic Class (TC) must be between 0 and 7");
858 			return -EINVAL;
859 		}
860 		lse_val->mpls_tc = tc;
861 		lse_mask->mpls_tc = MPLS_TC_MASK;
862 	}
863 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
864 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
865 
866 		if (label & ~MPLS_LABEL_MASK) {
867 			NL_SET_ERR_MSG_ATTR(extack,
868 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
869 					    "Label must be between 0 and 1048575");
870 			return -EINVAL;
871 		}
872 		lse_val->mpls_label = label;
873 		lse_mask->mpls_label = MPLS_LABEL_MASK;
874 	}
875 
876 	return 0;
877 }
878 
879 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
880 				struct flow_dissector_key_mpls *key_val,
881 				struct flow_dissector_key_mpls *key_mask,
882 				struct netlink_ext_ack *extack)
883 {
884 	struct nlattr *nla_lse;
885 	int rem;
886 	int err;
887 
888 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
889 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
890 				    "NLA_F_NESTED is missing");
891 		return -EINVAL;
892 	}
893 
894 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
895 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
896 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
897 					    "Invalid MPLS option type");
898 			return -EINVAL;
899 		}
900 
901 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
902 		if (err < 0)
903 			return err;
904 	}
905 	if (rem) {
906 		NL_SET_ERR_MSG(extack,
907 			       "Bytes leftover after parsing MPLS options");
908 		return -EINVAL;
909 	}
910 
911 	return 0;
912 }
913 
914 static int fl_set_key_mpls(struct nlattr **tb,
915 			   struct flow_dissector_key_mpls *key_val,
916 			   struct flow_dissector_key_mpls *key_mask,
917 			   struct netlink_ext_ack *extack)
918 {
919 	struct flow_dissector_mpls_lse *lse_mask;
920 	struct flow_dissector_mpls_lse *lse_val;
921 
922 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
923 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
924 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
925 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
926 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
927 			NL_SET_ERR_MSG_ATTR(extack,
928 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
929 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
930 			return -EBADMSG;
931 		}
932 
933 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
934 					    key_val, key_mask, extack);
935 	}
936 
937 	lse_val = &key_val->ls[0];
938 	lse_mask = &key_mask->ls[0];
939 
940 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
941 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
942 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
943 		dissector_set_mpls_lse(key_val, 0);
944 		dissector_set_mpls_lse(key_mask, 0);
945 	}
946 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
947 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
948 
949 		if (bos & ~MPLS_BOS_MASK) {
950 			NL_SET_ERR_MSG_ATTR(extack,
951 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
952 					    "Bottom Of Stack (BOS) must be 0 or 1");
953 			return -EINVAL;
954 		}
955 		lse_val->mpls_bos = bos;
956 		lse_mask->mpls_bos = MPLS_BOS_MASK;
957 		dissector_set_mpls_lse(key_val, 0);
958 		dissector_set_mpls_lse(key_mask, 0);
959 	}
960 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
961 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
962 
963 		if (tc & ~MPLS_TC_MASK) {
964 			NL_SET_ERR_MSG_ATTR(extack,
965 					    tb[TCA_FLOWER_KEY_MPLS_TC],
966 					    "Traffic Class (TC) must be between 0 and 7");
967 			return -EINVAL;
968 		}
969 		lse_val->mpls_tc = tc;
970 		lse_mask->mpls_tc = MPLS_TC_MASK;
971 		dissector_set_mpls_lse(key_val, 0);
972 		dissector_set_mpls_lse(key_mask, 0);
973 	}
974 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
975 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
976 
977 		if (label & ~MPLS_LABEL_MASK) {
978 			NL_SET_ERR_MSG_ATTR(extack,
979 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
980 					    "Label must be between 0 and 1048575");
981 			return -EINVAL;
982 		}
983 		lse_val->mpls_label = label;
984 		lse_mask->mpls_label = MPLS_LABEL_MASK;
985 		dissector_set_mpls_lse(key_val, 0);
986 		dissector_set_mpls_lse(key_mask, 0);
987 	}
988 	return 0;
989 }
990 
991 static void fl_set_key_vlan(struct nlattr **tb,
992 			    __be16 ethertype,
993 			    int vlan_id_key, int vlan_prio_key,
994 			    struct flow_dissector_key_vlan *key_val,
995 			    struct flow_dissector_key_vlan *key_mask)
996 {
997 #define VLAN_PRIORITY_MASK	0x7
998 
999 	if (tb[vlan_id_key]) {
1000 		key_val->vlan_id =
1001 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1002 		key_mask->vlan_id = VLAN_VID_MASK;
1003 	}
1004 	if (tb[vlan_prio_key]) {
1005 		key_val->vlan_priority =
1006 			nla_get_u8(tb[vlan_prio_key]) &
1007 			VLAN_PRIORITY_MASK;
1008 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1009 	}
1010 	key_val->vlan_tpid = ethertype;
1011 	key_mask->vlan_tpid = cpu_to_be16(~0);
1012 }
1013 
1014 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1015 			    u32 *dissector_key, u32 *dissector_mask,
1016 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1017 {
1018 	if (flower_mask & flower_flag_bit) {
1019 		*dissector_mask |= dissector_flag_bit;
1020 		if (flower_key & flower_flag_bit)
1021 			*dissector_key |= dissector_flag_bit;
1022 	}
1023 }
1024 
1025 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1026 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1027 {
1028 	u32 key, mask;
1029 
1030 	/* mask is mandatory for flags */
1031 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1032 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1033 		return -EINVAL;
1034 	}
1035 
1036 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
1037 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1038 
1039 	*flags_key  = 0;
1040 	*flags_mask = 0;
1041 
1042 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1043 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1044 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1045 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1046 			FLOW_DIS_FIRST_FRAG);
1047 
1048 	return 0;
1049 }
1050 
1051 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1052 			  struct flow_dissector_key_ip *key,
1053 			  struct flow_dissector_key_ip *mask)
1054 {
1055 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1056 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1057 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1058 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1059 
1060 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1061 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1062 }
1063 
1064 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1065 			     int depth, int option_len,
1066 			     struct netlink_ext_ack *extack)
1067 {
1068 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1069 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1070 	struct geneve_opt *opt;
1071 	int err, data_len = 0;
1072 
1073 	if (option_len > sizeof(struct geneve_opt))
1074 		data_len = option_len - sizeof(struct geneve_opt);
1075 
1076 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1077 	memset(opt, 0xff, option_len);
1078 	opt->length = data_len / 4;
1079 	opt->r1 = 0;
1080 	opt->r2 = 0;
1081 	opt->r3 = 0;
1082 
1083 	/* If no mask has been prodived we assume an exact match. */
1084 	if (!depth)
1085 		return sizeof(struct geneve_opt) + data_len;
1086 
1087 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1088 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1089 		return -EINVAL;
1090 	}
1091 
1092 	err = nla_parse_nested_deprecated(tb,
1093 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1094 					  nla, geneve_opt_policy, extack);
1095 	if (err < 0)
1096 		return err;
1097 
1098 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1099 	 * fields from the key.
1100 	 */
1101 	if (!option_len &&
1102 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1103 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1104 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1105 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1106 		return -EINVAL;
1107 	}
1108 
1109 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1110 	 * for the mask.
1111 	 */
1112 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1113 		int new_len = key->enc_opts.len;
1114 
1115 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1116 		data_len = nla_len(data);
1117 		if (data_len < 4) {
1118 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1119 			return -ERANGE;
1120 		}
1121 		if (data_len % 4) {
1122 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1123 			return -ERANGE;
1124 		}
1125 
1126 		new_len += sizeof(struct geneve_opt) + data_len;
1127 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1128 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1129 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1130 			return -ERANGE;
1131 		}
1132 		opt->length = data_len / 4;
1133 		memcpy(opt->opt_data, nla_data(data), data_len);
1134 	}
1135 
1136 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1137 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1138 		opt->opt_class = nla_get_be16(class);
1139 	}
1140 
1141 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1142 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1143 		opt->type = nla_get_u8(type);
1144 	}
1145 
1146 	return sizeof(struct geneve_opt) + data_len;
1147 }
1148 
1149 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1150 			    int depth, int option_len,
1151 			    struct netlink_ext_ack *extack)
1152 {
1153 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1154 	struct vxlan_metadata *md;
1155 	int err;
1156 
1157 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1158 	memset(md, 0xff, sizeof(*md));
1159 
1160 	if (!depth)
1161 		return sizeof(*md);
1162 
1163 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1164 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1165 		return -EINVAL;
1166 	}
1167 
1168 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1169 			       vxlan_opt_policy, extack);
1170 	if (err < 0)
1171 		return err;
1172 
1173 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1174 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1175 		return -EINVAL;
1176 	}
1177 
1178 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1179 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1180 		md->gbp &= VXLAN_GBP_MASK;
1181 	}
1182 
1183 	return sizeof(*md);
1184 }
1185 
1186 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1187 			     int depth, int option_len,
1188 			     struct netlink_ext_ack *extack)
1189 {
1190 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1191 	struct erspan_metadata *md;
1192 	int err;
1193 
1194 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1195 	memset(md, 0xff, sizeof(*md));
1196 	md->version = 1;
1197 
1198 	if (!depth)
1199 		return sizeof(*md);
1200 
1201 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1202 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1203 		return -EINVAL;
1204 	}
1205 
1206 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1207 			       erspan_opt_policy, extack);
1208 	if (err < 0)
1209 		return err;
1210 
1211 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1212 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1213 		return -EINVAL;
1214 	}
1215 
1216 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1217 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1218 
1219 	if (md->version == 1) {
1220 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1221 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1222 			return -EINVAL;
1223 		}
1224 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1225 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1226 			memset(&md->u, 0x00, sizeof(md->u));
1227 			md->u.index = nla_get_be32(nla);
1228 		}
1229 	} else if (md->version == 2) {
1230 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1231 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1232 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1233 			return -EINVAL;
1234 		}
1235 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1236 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1237 			md->u.md2.dir = nla_get_u8(nla);
1238 		}
1239 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1240 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1241 			set_hwid(&md->u.md2, nla_get_u8(nla));
1242 		}
1243 	} else {
1244 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1245 		return -EINVAL;
1246 	}
1247 
1248 	return sizeof(*md);
1249 }
1250 
1251 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1252 			  struct fl_flow_key *mask,
1253 			  struct netlink_ext_ack *extack)
1254 {
1255 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1256 	int err, option_len, key_depth, msk_depth = 0;
1257 
1258 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1259 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1260 					     enc_opts_policy, extack);
1261 	if (err)
1262 		return err;
1263 
1264 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1265 
1266 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1267 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1268 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1269 						     enc_opts_policy, extack);
1270 		if (err)
1271 			return err;
1272 
1273 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1274 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1275 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1276 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1277 			return -EINVAL;
1278 		}
1279 	}
1280 
1281 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1282 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1283 		switch (nla_type(nla_opt_key)) {
1284 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1285 			if (key->enc_opts.dst_opt_type &&
1286 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1287 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1288 				return -EINVAL;
1289 			}
1290 			option_len = 0;
1291 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1292 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1293 						       key_depth, option_len,
1294 						       extack);
1295 			if (option_len < 0)
1296 				return option_len;
1297 
1298 			key->enc_opts.len += option_len;
1299 			/* At the same time we need to parse through the mask
1300 			 * in order to verify exact and mask attribute lengths.
1301 			 */
1302 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1303 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1304 						       msk_depth, option_len,
1305 						       extack);
1306 			if (option_len < 0)
1307 				return option_len;
1308 
1309 			mask->enc_opts.len += option_len;
1310 			if (key->enc_opts.len != mask->enc_opts.len) {
1311 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1312 				return -EINVAL;
1313 			}
1314 			break;
1315 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1316 			if (key->enc_opts.dst_opt_type) {
1317 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1318 				return -EINVAL;
1319 			}
1320 			option_len = 0;
1321 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1322 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1323 						      key_depth, option_len,
1324 						      extack);
1325 			if (option_len < 0)
1326 				return option_len;
1327 
1328 			key->enc_opts.len += option_len;
1329 			/* At the same time we need to parse through the mask
1330 			 * in order to verify exact and mask attribute lengths.
1331 			 */
1332 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1333 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1334 						      msk_depth, option_len,
1335 						      extack);
1336 			if (option_len < 0)
1337 				return option_len;
1338 
1339 			mask->enc_opts.len += option_len;
1340 			if (key->enc_opts.len != mask->enc_opts.len) {
1341 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1342 				return -EINVAL;
1343 			}
1344 			break;
1345 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1346 			if (key->enc_opts.dst_opt_type) {
1347 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1348 				return -EINVAL;
1349 			}
1350 			option_len = 0;
1351 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1352 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1353 						       key_depth, option_len,
1354 						       extack);
1355 			if (option_len < 0)
1356 				return option_len;
1357 
1358 			key->enc_opts.len += option_len;
1359 			/* At the same time we need to parse through the mask
1360 			 * in order to verify exact and mask attribute lengths.
1361 			 */
1362 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1363 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1364 						       msk_depth, option_len,
1365 						       extack);
1366 			if (option_len < 0)
1367 				return option_len;
1368 
1369 			mask->enc_opts.len += option_len;
1370 			if (key->enc_opts.len != mask->enc_opts.len) {
1371 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1372 				return -EINVAL;
1373 			}
1374 			break;
1375 		default:
1376 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1377 			return -EINVAL;
1378 		}
1379 
1380 		if (!msk_depth)
1381 			continue;
1382 
1383 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1384 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1385 			return -EINVAL;
1386 		}
1387 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1388 	}
1389 
1390 	return 0;
1391 }
1392 
1393 static int fl_set_key_ct(struct nlattr **tb,
1394 			 struct flow_dissector_key_ct *key,
1395 			 struct flow_dissector_key_ct *mask,
1396 			 struct netlink_ext_ack *extack)
1397 {
1398 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1399 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1400 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1401 			return -EOPNOTSUPP;
1402 		}
1403 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1404 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1405 			       sizeof(key->ct_state));
1406 	}
1407 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1408 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1409 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1410 			return -EOPNOTSUPP;
1411 		}
1412 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1413 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1414 			       sizeof(key->ct_zone));
1415 	}
1416 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1417 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1418 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1419 			return -EOPNOTSUPP;
1420 		}
1421 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1422 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1423 			       sizeof(key->ct_mark));
1424 	}
1425 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1426 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1427 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1428 			return -EOPNOTSUPP;
1429 		}
1430 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1431 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1432 			       sizeof(key->ct_labels));
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 static int fl_set_key(struct net *net, struct nlattr **tb,
1439 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1440 		      struct netlink_ext_ack *extack)
1441 {
1442 	__be16 ethertype;
1443 	int ret = 0;
1444 
1445 	if (tb[TCA_FLOWER_INDEV]) {
1446 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1447 		if (err < 0)
1448 			return err;
1449 		key->meta.ingress_ifindex = err;
1450 		mask->meta.ingress_ifindex = 0xffffffff;
1451 	}
1452 
1453 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1454 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1455 		       sizeof(key->eth.dst));
1456 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1457 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1458 		       sizeof(key->eth.src));
1459 
1460 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1461 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1462 
1463 		if (eth_type_vlan(ethertype)) {
1464 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1465 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1466 					&mask->vlan);
1467 
1468 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1469 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1470 				if (eth_type_vlan(ethertype)) {
1471 					fl_set_key_vlan(tb, ethertype,
1472 							TCA_FLOWER_KEY_CVLAN_ID,
1473 							TCA_FLOWER_KEY_CVLAN_PRIO,
1474 							&key->cvlan, &mask->cvlan);
1475 					fl_set_key_val(tb, &key->basic.n_proto,
1476 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1477 						       &mask->basic.n_proto,
1478 						       TCA_FLOWER_UNSPEC,
1479 						       sizeof(key->basic.n_proto));
1480 				} else {
1481 					key->basic.n_proto = ethertype;
1482 					mask->basic.n_proto = cpu_to_be16(~0);
1483 				}
1484 			}
1485 		} else {
1486 			key->basic.n_proto = ethertype;
1487 			mask->basic.n_proto = cpu_to_be16(~0);
1488 		}
1489 	}
1490 
1491 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1492 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1493 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1494 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1495 			       sizeof(key->basic.ip_proto));
1496 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1497 	}
1498 
1499 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1500 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1501 		mask->control.addr_type = ~0;
1502 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1503 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1504 			       sizeof(key->ipv4.src));
1505 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1506 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1507 			       sizeof(key->ipv4.dst));
1508 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1509 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1510 		mask->control.addr_type = ~0;
1511 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1512 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1513 			       sizeof(key->ipv6.src));
1514 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1515 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1516 			       sizeof(key->ipv6.dst));
1517 	}
1518 
1519 	if (key->basic.ip_proto == IPPROTO_TCP) {
1520 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1521 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1522 			       sizeof(key->tp.src));
1523 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1524 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1525 			       sizeof(key->tp.dst));
1526 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1527 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1528 			       sizeof(key->tcp.flags));
1529 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1530 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1531 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1532 			       sizeof(key->tp.src));
1533 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1534 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1535 			       sizeof(key->tp.dst));
1536 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1537 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1538 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1539 			       sizeof(key->tp.src));
1540 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1541 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1542 			       sizeof(key->tp.dst));
1543 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1544 		   key->basic.ip_proto == IPPROTO_ICMP) {
1545 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1546 			       &mask->icmp.type,
1547 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1548 			       sizeof(key->icmp.type));
1549 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1550 			       &mask->icmp.code,
1551 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1552 			       sizeof(key->icmp.code));
1553 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1554 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1555 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1556 			       &mask->icmp.type,
1557 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1558 			       sizeof(key->icmp.type));
1559 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1560 			       &mask->icmp.code,
1561 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1562 			       sizeof(key->icmp.code));
1563 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1564 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1565 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1566 		if (ret)
1567 			return ret;
1568 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1569 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1570 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1571 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1572 			       sizeof(key->arp.sip));
1573 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1574 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1575 			       sizeof(key->arp.tip));
1576 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1577 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1578 			       sizeof(key->arp.op));
1579 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1580 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1581 			       sizeof(key->arp.sha));
1582 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1583 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1584 			       sizeof(key->arp.tha));
1585 	}
1586 
1587 	if (key->basic.ip_proto == IPPROTO_TCP ||
1588 	    key->basic.ip_proto == IPPROTO_UDP ||
1589 	    key->basic.ip_proto == IPPROTO_SCTP) {
1590 		ret = fl_set_key_port_range(tb, key, mask, extack);
1591 		if (ret)
1592 			return ret;
1593 	}
1594 
1595 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1596 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1597 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1598 		mask->enc_control.addr_type = ~0;
1599 		fl_set_key_val(tb, &key->enc_ipv4.src,
1600 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1601 			       &mask->enc_ipv4.src,
1602 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1603 			       sizeof(key->enc_ipv4.src));
1604 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1605 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1606 			       &mask->enc_ipv4.dst,
1607 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1608 			       sizeof(key->enc_ipv4.dst));
1609 	}
1610 
1611 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1612 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1613 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1614 		mask->enc_control.addr_type = ~0;
1615 		fl_set_key_val(tb, &key->enc_ipv6.src,
1616 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1617 			       &mask->enc_ipv6.src,
1618 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1619 			       sizeof(key->enc_ipv6.src));
1620 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1621 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1622 			       &mask->enc_ipv6.dst,
1623 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1624 			       sizeof(key->enc_ipv6.dst));
1625 	}
1626 
1627 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1628 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1629 		       sizeof(key->enc_key_id.keyid));
1630 
1631 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1632 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1633 		       sizeof(key->enc_tp.src));
1634 
1635 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1636 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1637 		       sizeof(key->enc_tp.dst));
1638 
1639 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1640 
1641 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1642 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1643 		       sizeof(key->hash.hash));
1644 
1645 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1646 		ret = fl_set_enc_opt(tb, key, mask, extack);
1647 		if (ret)
1648 			return ret;
1649 	}
1650 
1651 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1652 	if (ret)
1653 		return ret;
1654 
1655 	if (tb[TCA_FLOWER_KEY_FLAGS])
1656 		ret = fl_set_key_flags(tb, &key->control.flags,
1657 				       &mask->control.flags, extack);
1658 
1659 	return ret;
1660 }
1661 
1662 static void fl_mask_copy(struct fl_flow_mask *dst,
1663 			 struct fl_flow_mask *src)
1664 {
1665 	const void *psrc = fl_key_get_start(&src->key, src);
1666 	void *pdst = fl_key_get_start(&dst->key, src);
1667 
1668 	memcpy(pdst, psrc, fl_mask_range(src));
1669 	dst->range = src->range;
1670 }
1671 
1672 static const struct rhashtable_params fl_ht_params = {
1673 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1674 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1675 	.automatic_shrinking = true,
1676 };
1677 
1678 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1679 {
1680 	mask->filter_ht_params = fl_ht_params;
1681 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1682 	mask->filter_ht_params.key_offset += mask->range.start;
1683 
1684 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1685 }
1686 
1687 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1688 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1689 
1690 #define FL_KEY_IS_MASKED(mask, member)						\
1691 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1692 		   0, FL_KEY_MEMBER_SIZE(member))				\
1693 
1694 #define FL_KEY_SET(keys, cnt, id, member)					\
1695 	do {									\
1696 		keys[cnt].key_id = id;						\
1697 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1698 		cnt++;								\
1699 	} while(0);
1700 
1701 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1702 	do {									\
1703 		if (FL_KEY_IS_MASKED(mask, member))				\
1704 			FL_KEY_SET(keys, cnt, id, member);			\
1705 	} while(0);
1706 
1707 static void fl_init_dissector(struct flow_dissector *dissector,
1708 			      struct fl_flow_key *mask)
1709 {
1710 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1711 	size_t cnt = 0;
1712 
1713 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1714 			     FLOW_DISSECTOR_KEY_META, meta);
1715 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1716 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1717 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1718 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1719 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1720 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1721 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1722 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1723 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1724 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1725 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1726 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1727 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1728 			     FLOW_DISSECTOR_KEY_IP, ip);
1729 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1730 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1731 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1732 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1733 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1734 			     FLOW_DISSECTOR_KEY_ARP, arp);
1735 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1736 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1737 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1738 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1739 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1740 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1741 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1742 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1743 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1744 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1745 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1746 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1747 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1748 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1749 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1750 			   enc_control);
1751 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1752 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1753 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1754 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1755 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1756 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1757 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1758 			     FLOW_DISSECTOR_KEY_CT, ct);
1759 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1760 			     FLOW_DISSECTOR_KEY_HASH, hash);
1761 
1762 	skb_flow_dissector_init(dissector, keys, cnt);
1763 }
1764 
1765 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1766 					       struct fl_flow_mask *mask)
1767 {
1768 	struct fl_flow_mask *newmask;
1769 	int err;
1770 
1771 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1772 	if (!newmask)
1773 		return ERR_PTR(-ENOMEM);
1774 
1775 	fl_mask_copy(newmask, mask);
1776 
1777 	if ((newmask->key.tp_range.tp_min.dst &&
1778 	     newmask->key.tp_range.tp_max.dst) ||
1779 	    (newmask->key.tp_range.tp_min.src &&
1780 	     newmask->key.tp_range.tp_max.src))
1781 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1782 
1783 	err = fl_init_mask_hashtable(newmask);
1784 	if (err)
1785 		goto errout_free;
1786 
1787 	fl_init_dissector(&newmask->dissector, &newmask->key);
1788 
1789 	INIT_LIST_HEAD_RCU(&newmask->filters);
1790 
1791 	refcount_set(&newmask->refcnt, 1);
1792 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1793 				      &newmask->ht_node, mask_ht_params);
1794 	if (err)
1795 		goto errout_destroy;
1796 
1797 	spin_lock(&head->masks_lock);
1798 	list_add_tail_rcu(&newmask->list, &head->masks);
1799 	spin_unlock(&head->masks_lock);
1800 
1801 	return newmask;
1802 
1803 errout_destroy:
1804 	rhashtable_destroy(&newmask->ht);
1805 errout_free:
1806 	kfree(newmask);
1807 
1808 	return ERR_PTR(err);
1809 }
1810 
1811 static int fl_check_assign_mask(struct cls_fl_head *head,
1812 				struct cls_fl_filter *fnew,
1813 				struct cls_fl_filter *fold,
1814 				struct fl_flow_mask *mask)
1815 {
1816 	struct fl_flow_mask *newmask;
1817 	int ret = 0;
1818 
1819 	rcu_read_lock();
1820 
1821 	/* Insert mask as temporary node to prevent concurrent creation of mask
1822 	 * with same key. Any concurrent lookups with same key will return
1823 	 * -EAGAIN because mask's refcnt is zero.
1824 	 */
1825 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1826 						       &mask->ht_node,
1827 						       mask_ht_params);
1828 	if (!fnew->mask) {
1829 		rcu_read_unlock();
1830 
1831 		if (fold) {
1832 			ret = -EINVAL;
1833 			goto errout_cleanup;
1834 		}
1835 
1836 		newmask = fl_create_new_mask(head, mask);
1837 		if (IS_ERR(newmask)) {
1838 			ret = PTR_ERR(newmask);
1839 			goto errout_cleanup;
1840 		}
1841 
1842 		fnew->mask = newmask;
1843 		return 0;
1844 	} else if (IS_ERR(fnew->mask)) {
1845 		ret = PTR_ERR(fnew->mask);
1846 	} else if (fold && fold->mask != fnew->mask) {
1847 		ret = -EINVAL;
1848 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1849 		/* Mask was deleted concurrently, try again */
1850 		ret = -EAGAIN;
1851 	}
1852 	rcu_read_unlock();
1853 	return ret;
1854 
1855 errout_cleanup:
1856 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1857 			       mask_ht_params);
1858 	return ret;
1859 }
1860 
1861 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1862 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1863 			unsigned long base, struct nlattr **tb,
1864 			struct nlattr *est, bool ovr,
1865 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1866 			struct netlink_ext_ack *extack)
1867 {
1868 	int err;
1869 
1870 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1871 				extack);
1872 	if (err < 0)
1873 		return err;
1874 
1875 	if (tb[TCA_FLOWER_CLASSID]) {
1876 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1877 		if (!rtnl_held)
1878 			rtnl_lock();
1879 		tcf_bind_filter(tp, &f->res, base);
1880 		if (!rtnl_held)
1881 			rtnl_unlock();
1882 	}
1883 
1884 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1885 	if (err)
1886 		return err;
1887 
1888 	fl_mask_update_range(mask);
1889 	fl_set_masked_key(&f->mkey, &f->key, mask);
1890 
1891 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1892 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1893 		return -EINVAL;
1894 	}
1895 
1896 	return 0;
1897 }
1898 
1899 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1900 			       struct cls_fl_filter *fold,
1901 			       bool *in_ht)
1902 {
1903 	struct fl_flow_mask *mask = fnew->mask;
1904 	int err;
1905 
1906 	err = rhashtable_lookup_insert_fast(&mask->ht,
1907 					    &fnew->ht_node,
1908 					    mask->filter_ht_params);
1909 	if (err) {
1910 		*in_ht = false;
1911 		/* It is okay if filter with same key exists when
1912 		 * overwriting.
1913 		 */
1914 		return fold && err == -EEXIST ? 0 : err;
1915 	}
1916 
1917 	*in_ht = true;
1918 	return 0;
1919 }
1920 
1921 static int fl_change(struct net *net, struct sk_buff *in_skb,
1922 		     struct tcf_proto *tp, unsigned long base,
1923 		     u32 handle, struct nlattr **tca,
1924 		     void **arg, bool ovr, bool rtnl_held,
1925 		     struct netlink_ext_ack *extack)
1926 {
1927 	struct cls_fl_head *head = fl_head_dereference(tp);
1928 	struct cls_fl_filter *fold = *arg;
1929 	struct cls_fl_filter *fnew;
1930 	struct fl_flow_mask *mask;
1931 	struct nlattr **tb;
1932 	bool in_ht;
1933 	int err;
1934 
1935 	if (!tca[TCA_OPTIONS]) {
1936 		err = -EINVAL;
1937 		goto errout_fold;
1938 	}
1939 
1940 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1941 	if (!mask) {
1942 		err = -ENOBUFS;
1943 		goto errout_fold;
1944 	}
1945 
1946 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1947 	if (!tb) {
1948 		err = -ENOBUFS;
1949 		goto errout_mask_alloc;
1950 	}
1951 
1952 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1953 					  tca[TCA_OPTIONS], fl_policy, NULL);
1954 	if (err < 0)
1955 		goto errout_tb;
1956 
1957 	if (fold && handle && fold->handle != handle) {
1958 		err = -EINVAL;
1959 		goto errout_tb;
1960 	}
1961 
1962 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1963 	if (!fnew) {
1964 		err = -ENOBUFS;
1965 		goto errout_tb;
1966 	}
1967 	INIT_LIST_HEAD(&fnew->hw_list);
1968 	refcount_set(&fnew->refcnt, 1);
1969 
1970 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1971 	if (err < 0)
1972 		goto errout;
1973 
1974 	if (tb[TCA_FLOWER_FLAGS]) {
1975 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1976 
1977 		if (!tc_flags_valid(fnew->flags)) {
1978 			err = -EINVAL;
1979 			goto errout;
1980 		}
1981 	}
1982 
1983 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1984 			   tp->chain->tmplt_priv, rtnl_held, extack);
1985 	if (err)
1986 		goto errout;
1987 
1988 	err = fl_check_assign_mask(head, fnew, fold, mask);
1989 	if (err)
1990 		goto errout;
1991 
1992 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1993 	if (err)
1994 		goto errout_mask;
1995 
1996 	if (!tc_skip_hw(fnew->flags)) {
1997 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1998 		if (err)
1999 			goto errout_ht;
2000 	}
2001 
2002 	if (!tc_in_hw(fnew->flags))
2003 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2004 
2005 	spin_lock(&tp->lock);
2006 
2007 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2008 	 * proto again or create new one, if necessary.
2009 	 */
2010 	if (tp->deleting) {
2011 		err = -EAGAIN;
2012 		goto errout_hw;
2013 	}
2014 
2015 	if (fold) {
2016 		/* Fold filter was deleted concurrently. Retry lookup. */
2017 		if (fold->deleted) {
2018 			err = -EAGAIN;
2019 			goto errout_hw;
2020 		}
2021 
2022 		fnew->handle = handle;
2023 
2024 		if (!in_ht) {
2025 			struct rhashtable_params params =
2026 				fnew->mask->filter_ht_params;
2027 
2028 			err = rhashtable_insert_fast(&fnew->mask->ht,
2029 						     &fnew->ht_node,
2030 						     params);
2031 			if (err)
2032 				goto errout_hw;
2033 			in_ht = true;
2034 		}
2035 
2036 		refcount_inc(&fnew->refcnt);
2037 		rhashtable_remove_fast(&fold->mask->ht,
2038 				       &fold->ht_node,
2039 				       fold->mask->filter_ht_params);
2040 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2041 		list_replace_rcu(&fold->list, &fnew->list);
2042 		fold->deleted = true;
2043 
2044 		spin_unlock(&tp->lock);
2045 
2046 		fl_mask_put(head, fold->mask);
2047 		if (!tc_skip_hw(fold->flags))
2048 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2049 		tcf_unbind_filter(tp, &fold->res);
2050 		/* Caller holds reference to fold, so refcnt is always > 0
2051 		 * after this.
2052 		 */
2053 		refcount_dec(&fold->refcnt);
2054 		__fl_put(fold);
2055 	} else {
2056 		if (handle) {
2057 			/* user specifies a handle and it doesn't exist */
2058 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2059 					    handle, GFP_ATOMIC);
2060 
2061 			/* Filter with specified handle was concurrently
2062 			 * inserted after initial check in cls_api. This is not
2063 			 * necessarily an error if NLM_F_EXCL is not set in
2064 			 * message flags. Returning EAGAIN will cause cls_api to
2065 			 * try to update concurrently inserted rule.
2066 			 */
2067 			if (err == -ENOSPC)
2068 				err = -EAGAIN;
2069 		} else {
2070 			handle = 1;
2071 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2072 					    INT_MAX, GFP_ATOMIC);
2073 		}
2074 		if (err)
2075 			goto errout_hw;
2076 
2077 		refcount_inc(&fnew->refcnt);
2078 		fnew->handle = handle;
2079 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2080 		spin_unlock(&tp->lock);
2081 	}
2082 
2083 	*arg = fnew;
2084 
2085 	kfree(tb);
2086 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2087 	return 0;
2088 
2089 errout_ht:
2090 	spin_lock(&tp->lock);
2091 errout_hw:
2092 	fnew->deleted = true;
2093 	spin_unlock(&tp->lock);
2094 	if (!tc_skip_hw(fnew->flags))
2095 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2096 	if (in_ht)
2097 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2098 				       fnew->mask->filter_ht_params);
2099 errout_mask:
2100 	fl_mask_put(head, fnew->mask);
2101 errout:
2102 	__fl_put(fnew);
2103 errout_tb:
2104 	kfree(tb);
2105 errout_mask_alloc:
2106 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2107 errout_fold:
2108 	if (fold)
2109 		__fl_put(fold);
2110 	return err;
2111 }
2112 
2113 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2114 		     bool rtnl_held, struct netlink_ext_ack *extack)
2115 {
2116 	struct cls_fl_head *head = fl_head_dereference(tp);
2117 	struct cls_fl_filter *f = arg;
2118 	bool last_on_mask;
2119 	int err = 0;
2120 
2121 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2122 	*last = list_empty(&head->masks);
2123 	__fl_put(f);
2124 
2125 	return err;
2126 }
2127 
2128 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2129 		    bool rtnl_held)
2130 {
2131 	struct cls_fl_head *head = fl_head_dereference(tp);
2132 	unsigned long id = arg->cookie, tmp;
2133 	struct cls_fl_filter *f;
2134 
2135 	arg->count = arg->skip;
2136 
2137 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2138 		/* don't return filters that are being deleted */
2139 		if (!refcount_inc_not_zero(&f->refcnt))
2140 			continue;
2141 		if (arg->fn(tp, f, arg) < 0) {
2142 			__fl_put(f);
2143 			arg->stop = 1;
2144 			break;
2145 		}
2146 		__fl_put(f);
2147 		arg->count++;
2148 	}
2149 	arg->cookie = id;
2150 }
2151 
2152 static struct cls_fl_filter *
2153 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2154 {
2155 	struct cls_fl_head *head = fl_head_dereference(tp);
2156 
2157 	spin_lock(&tp->lock);
2158 	if (list_empty(&head->hw_filters)) {
2159 		spin_unlock(&tp->lock);
2160 		return NULL;
2161 	}
2162 
2163 	if (!f)
2164 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2165 			       hw_list);
2166 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2167 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2168 			spin_unlock(&tp->lock);
2169 			return f;
2170 		}
2171 	}
2172 
2173 	spin_unlock(&tp->lock);
2174 	return NULL;
2175 }
2176 
2177 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2178 			void *cb_priv, struct netlink_ext_ack *extack)
2179 {
2180 	struct tcf_block *block = tp->chain->block;
2181 	struct flow_cls_offload cls_flower = {};
2182 	struct cls_fl_filter *f = NULL;
2183 	int err;
2184 
2185 	/* hw_filters list can only be changed by hw offload functions after
2186 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2187 	 * iterating it.
2188 	 */
2189 	ASSERT_RTNL();
2190 
2191 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2192 		cls_flower.rule =
2193 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2194 		if (!cls_flower.rule) {
2195 			__fl_put(f);
2196 			return -ENOMEM;
2197 		}
2198 
2199 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2200 					   extack);
2201 		cls_flower.command = add ?
2202 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2203 		cls_flower.cookie = (unsigned long)f;
2204 		cls_flower.rule->match.dissector = &f->mask->dissector;
2205 		cls_flower.rule->match.mask = &f->mask->key;
2206 		cls_flower.rule->match.key = &f->mkey;
2207 
2208 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2209 		if (err) {
2210 			kfree(cls_flower.rule);
2211 			if (tc_skip_sw(f->flags)) {
2212 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2213 				__fl_put(f);
2214 				return err;
2215 			}
2216 			goto next_flow;
2217 		}
2218 
2219 		cls_flower.classid = f->res.classid;
2220 
2221 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2222 					    TC_SETUP_CLSFLOWER, &cls_flower,
2223 					    cb_priv, &f->flags,
2224 					    &f->in_hw_count);
2225 		tc_cleanup_flow_action(&cls_flower.rule->action);
2226 		kfree(cls_flower.rule);
2227 
2228 		if (err) {
2229 			__fl_put(f);
2230 			return err;
2231 		}
2232 next_flow:
2233 		__fl_put(f);
2234 	}
2235 
2236 	return 0;
2237 }
2238 
2239 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2240 {
2241 	struct flow_cls_offload *cls_flower = type_data;
2242 	struct cls_fl_filter *f =
2243 		(struct cls_fl_filter *) cls_flower->cookie;
2244 	struct cls_fl_head *head = fl_head_dereference(tp);
2245 
2246 	spin_lock(&tp->lock);
2247 	list_add(&f->hw_list, &head->hw_filters);
2248 	spin_unlock(&tp->lock);
2249 }
2250 
2251 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2252 {
2253 	struct flow_cls_offload *cls_flower = type_data;
2254 	struct cls_fl_filter *f =
2255 		(struct cls_fl_filter *) cls_flower->cookie;
2256 
2257 	spin_lock(&tp->lock);
2258 	if (!list_empty(&f->hw_list))
2259 		list_del_init(&f->hw_list);
2260 	spin_unlock(&tp->lock);
2261 }
2262 
2263 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2264 			      struct fl_flow_tmplt *tmplt)
2265 {
2266 	struct flow_cls_offload cls_flower = {};
2267 	struct tcf_block *block = chain->block;
2268 
2269 	cls_flower.rule = flow_rule_alloc(0);
2270 	if (!cls_flower.rule)
2271 		return -ENOMEM;
2272 
2273 	cls_flower.common.chain_index = chain->index;
2274 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2275 	cls_flower.cookie = (unsigned long) tmplt;
2276 	cls_flower.rule->match.dissector = &tmplt->dissector;
2277 	cls_flower.rule->match.mask = &tmplt->mask;
2278 	cls_flower.rule->match.key = &tmplt->dummy_key;
2279 
2280 	/* We don't care if driver (any of them) fails to handle this
2281 	 * call. It serves just as a hint for it.
2282 	 */
2283 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2284 	kfree(cls_flower.rule);
2285 
2286 	return 0;
2287 }
2288 
2289 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2290 				struct fl_flow_tmplt *tmplt)
2291 {
2292 	struct flow_cls_offload cls_flower = {};
2293 	struct tcf_block *block = chain->block;
2294 
2295 	cls_flower.common.chain_index = chain->index;
2296 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2297 	cls_flower.cookie = (unsigned long) tmplt;
2298 
2299 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2300 }
2301 
2302 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2303 			     struct nlattr **tca,
2304 			     struct netlink_ext_ack *extack)
2305 {
2306 	struct fl_flow_tmplt *tmplt;
2307 	struct nlattr **tb;
2308 	int err;
2309 
2310 	if (!tca[TCA_OPTIONS])
2311 		return ERR_PTR(-EINVAL);
2312 
2313 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2314 	if (!tb)
2315 		return ERR_PTR(-ENOBUFS);
2316 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2317 					  tca[TCA_OPTIONS], fl_policy, NULL);
2318 	if (err)
2319 		goto errout_tb;
2320 
2321 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2322 	if (!tmplt) {
2323 		err = -ENOMEM;
2324 		goto errout_tb;
2325 	}
2326 	tmplt->chain = chain;
2327 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2328 	if (err)
2329 		goto errout_tmplt;
2330 
2331 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2332 
2333 	err = fl_hw_create_tmplt(chain, tmplt);
2334 	if (err)
2335 		goto errout_tmplt;
2336 
2337 	kfree(tb);
2338 	return tmplt;
2339 
2340 errout_tmplt:
2341 	kfree(tmplt);
2342 errout_tb:
2343 	kfree(tb);
2344 	return ERR_PTR(err);
2345 }
2346 
2347 static void fl_tmplt_destroy(void *tmplt_priv)
2348 {
2349 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2350 
2351 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2352 	kfree(tmplt);
2353 }
2354 
2355 static int fl_dump_key_val(struct sk_buff *skb,
2356 			   void *val, int val_type,
2357 			   void *mask, int mask_type, int len)
2358 {
2359 	int err;
2360 
2361 	if (!memchr_inv(mask, 0, len))
2362 		return 0;
2363 	err = nla_put(skb, val_type, len, val);
2364 	if (err)
2365 		return err;
2366 	if (mask_type != TCA_FLOWER_UNSPEC) {
2367 		err = nla_put(skb, mask_type, len, mask);
2368 		if (err)
2369 			return err;
2370 	}
2371 	return 0;
2372 }
2373 
2374 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2375 				  struct fl_flow_key *mask)
2376 {
2377 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2378 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2379 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2380 			    sizeof(key->tp_range.tp_min.dst)) ||
2381 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2382 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2383 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2384 			    sizeof(key->tp_range.tp_max.dst)) ||
2385 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2386 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2387 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2388 			    sizeof(key->tp_range.tp_min.src)) ||
2389 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2390 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2391 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2392 			    sizeof(key->tp_range.tp_max.src)))
2393 		return -1;
2394 
2395 	return 0;
2396 }
2397 
2398 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2399 				    struct flow_dissector_key_mpls *mpls_key,
2400 				    struct flow_dissector_key_mpls *mpls_mask,
2401 				    u8 lse_index)
2402 {
2403 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2404 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2405 	int err;
2406 
2407 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2408 			 lse_index + 1);
2409 	if (err)
2410 		return err;
2411 
2412 	if (lse_mask->mpls_ttl) {
2413 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2414 				 lse_key->mpls_ttl);
2415 		if (err)
2416 			return err;
2417 	}
2418 	if (lse_mask->mpls_bos) {
2419 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2420 				 lse_key->mpls_bos);
2421 		if (err)
2422 			return err;
2423 	}
2424 	if (lse_mask->mpls_tc) {
2425 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2426 				 lse_key->mpls_tc);
2427 		if (err)
2428 			return err;
2429 	}
2430 	if (lse_mask->mpls_label) {
2431 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2432 				  lse_key->mpls_label);
2433 		if (err)
2434 			return err;
2435 	}
2436 
2437 	return 0;
2438 }
2439 
2440 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2441 				 struct flow_dissector_key_mpls *mpls_key,
2442 				 struct flow_dissector_key_mpls *mpls_mask)
2443 {
2444 	struct nlattr *opts;
2445 	struct nlattr *lse;
2446 	u8 lse_index;
2447 	int err;
2448 
2449 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2450 	if (!opts)
2451 		return -EMSGSIZE;
2452 
2453 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2454 		if (!(mpls_mask->used_lses & 1 << lse_index))
2455 			continue;
2456 
2457 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2458 		if (!lse) {
2459 			err = -EMSGSIZE;
2460 			goto err_opts;
2461 		}
2462 
2463 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2464 					       lse_index);
2465 		if (err)
2466 			goto err_opts_lse;
2467 		nla_nest_end(skb, lse);
2468 	}
2469 	nla_nest_end(skb, opts);
2470 
2471 	return 0;
2472 
2473 err_opts_lse:
2474 	nla_nest_cancel(skb, lse);
2475 err_opts:
2476 	nla_nest_cancel(skb, opts);
2477 
2478 	return err;
2479 }
2480 
2481 static int fl_dump_key_mpls(struct sk_buff *skb,
2482 			    struct flow_dissector_key_mpls *mpls_key,
2483 			    struct flow_dissector_key_mpls *mpls_mask)
2484 {
2485 	struct flow_dissector_mpls_lse *lse_mask;
2486 	struct flow_dissector_mpls_lse *lse_key;
2487 	int err;
2488 
2489 	if (!mpls_mask->used_lses)
2490 		return 0;
2491 
2492 	lse_mask = &mpls_mask->ls[0];
2493 	lse_key = &mpls_key->ls[0];
2494 
2495 	/* For backward compatibility, don't use the MPLS nested attributes if
2496 	 * the rule can be expressed using the old attributes.
2497 	 */
2498 	if (mpls_mask->used_lses & ~1 ||
2499 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2500 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2501 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2502 
2503 	if (lse_mask->mpls_ttl) {
2504 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2505 				 lse_key->mpls_ttl);
2506 		if (err)
2507 			return err;
2508 	}
2509 	if (lse_mask->mpls_tc) {
2510 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2511 				 lse_key->mpls_tc);
2512 		if (err)
2513 			return err;
2514 	}
2515 	if (lse_mask->mpls_label) {
2516 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2517 				  lse_key->mpls_label);
2518 		if (err)
2519 			return err;
2520 	}
2521 	if (lse_mask->mpls_bos) {
2522 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2523 				 lse_key->mpls_bos);
2524 		if (err)
2525 			return err;
2526 	}
2527 	return 0;
2528 }
2529 
2530 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2531 			  struct flow_dissector_key_ip *key,
2532 			  struct flow_dissector_key_ip *mask)
2533 {
2534 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2535 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2536 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2537 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2538 
2539 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2540 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2541 		return -1;
2542 
2543 	return 0;
2544 }
2545 
2546 static int fl_dump_key_vlan(struct sk_buff *skb,
2547 			    int vlan_id_key, int vlan_prio_key,
2548 			    struct flow_dissector_key_vlan *vlan_key,
2549 			    struct flow_dissector_key_vlan *vlan_mask)
2550 {
2551 	int err;
2552 
2553 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2554 		return 0;
2555 	if (vlan_mask->vlan_id) {
2556 		err = nla_put_u16(skb, vlan_id_key,
2557 				  vlan_key->vlan_id);
2558 		if (err)
2559 			return err;
2560 	}
2561 	if (vlan_mask->vlan_priority) {
2562 		err = nla_put_u8(skb, vlan_prio_key,
2563 				 vlan_key->vlan_priority);
2564 		if (err)
2565 			return err;
2566 	}
2567 	return 0;
2568 }
2569 
2570 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2571 			    u32 *flower_key, u32 *flower_mask,
2572 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2573 {
2574 	if (dissector_mask & dissector_flag_bit) {
2575 		*flower_mask |= flower_flag_bit;
2576 		if (dissector_key & dissector_flag_bit)
2577 			*flower_key |= flower_flag_bit;
2578 	}
2579 }
2580 
2581 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2582 {
2583 	u32 key, mask;
2584 	__be32 _key, _mask;
2585 	int err;
2586 
2587 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2588 		return 0;
2589 
2590 	key = 0;
2591 	mask = 0;
2592 
2593 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2594 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2595 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2596 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2597 			FLOW_DIS_FIRST_FRAG);
2598 
2599 	_key = cpu_to_be32(key);
2600 	_mask = cpu_to_be32(mask);
2601 
2602 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2603 	if (err)
2604 		return err;
2605 
2606 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2607 }
2608 
2609 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2610 				  struct flow_dissector_key_enc_opts *enc_opts)
2611 {
2612 	struct geneve_opt *opt;
2613 	struct nlattr *nest;
2614 	int opt_off = 0;
2615 
2616 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2617 	if (!nest)
2618 		goto nla_put_failure;
2619 
2620 	while (enc_opts->len > opt_off) {
2621 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2622 
2623 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2624 				 opt->opt_class))
2625 			goto nla_put_failure;
2626 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2627 			       opt->type))
2628 			goto nla_put_failure;
2629 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2630 			    opt->length * 4, opt->opt_data))
2631 			goto nla_put_failure;
2632 
2633 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2634 	}
2635 	nla_nest_end(skb, nest);
2636 	return 0;
2637 
2638 nla_put_failure:
2639 	nla_nest_cancel(skb, nest);
2640 	return -EMSGSIZE;
2641 }
2642 
2643 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2644 				 struct flow_dissector_key_enc_opts *enc_opts)
2645 {
2646 	struct vxlan_metadata *md;
2647 	struct nlattr *nest;
2648 
2649 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2650 	if (!nest)
2651 		goto nla_put_failure;
2652 
2653 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2654 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2655 		goto nla_put_failure;
2656 
2657 	nla_nest_end(skb, nest);
2658 	return 0;
2659 
2660 nla_put_failure:
2661 	nla_nest_cancel(skb, nest);
2662 	return -EMSGSIZE;
2663 }
2664 
2665 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2666 				  struct flow_dissector_key_enc_opts *enc_opts)
2667 {
2668 	struct erspan_metadata *md;
2669 	struct nlattr *nest;
2670 
2671 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2672 	if (!nest)
2673 		goto nla_put_failure;
2674 
2675 	md = (struct erspan_metadata *)&enc_opts->data[0];
2676 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2677 		goto nla_put_failure;
2678 
2679 	if (md->version == 1 &&
2680 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2681 		goto nla_put_failure;
2682 
2683 	if (md->version == 2 &&
2684 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2685 			md->u.md2.dir) ||
2686 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2687 			get_hwid(&md->u.md2))))
2688 		goto nla_put_failure;
2689 
2690 	nla_nest_end(skb, nest);
2691 	return 0;
2692 
2693 nla_put_failure:
2694 	nla_nest_cancel(skb, nest);
2695 	return -EMSGSIZE;
2696 }
2697 
2698 static int fl_dump_key_ct(struct sk_buff *skb,
2699 			  struct flow_dissector_key_ct *key,
2700 			  struct flow_dissector_key_ct *mask)
2701 {
2702 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2703 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2704 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2705 			    sizeof(key->ct_state)))
2706 		goto nla_put_failure;
2707 
2708 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2709 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2710 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2711 			    sizeof(key->ct_zone)))
2712 		goto nla_put_failure;
2713 
2714 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2715 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2716 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2717 			    sizeof(key->ct_mark)))
2718 		goto nla_put_failure;
2719 
2720 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2721 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2722 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2723 			    sizeof(key->ct_labels)))
2724 		goto nla_put_failure;
2725 
2726 	return 0;
2727 
2728 nla_put_failure:
2729 	return -EMSGSIZE;
2730 }
2731 
2732 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2733 			       struct flow_dissector_key_enc_opts *enc_opts)
2734 {
2735 	struct nlattr *nest;
2736 	int err;
2737 
2738 	if (!enc_opts->len)
2739 		return 0;
2740 
2741 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2742 	if (!nest)
2743 		goto nla_put_failure;
2744 
2745 	switch (enc_opts->dst_opt_type) {
2746 	case TUNNEL_GENEVE_OPT:
2747 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2748 		if (err)
2749 			goto nla_put_failure;
2750 		break;
2751 	case TUNNEL_VXLAN_OPT:
2752 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2753 		if (err)
2754 			goto nla_put_failure;
2755 		break;
2756 	case TUNNEL_ERSPAN_OPT:
2757 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2758 		if (err)
2759 			goto nla_put_failure;
2760 		break;
2761 	default:
2762 		goto nla_put_failure;
2763 	}
2764 	nla_nest_end(skb, nest);
2765 	return 0;
2766 
2767 nla_put_failure:
2768 	nla_nest_cancel(skb, nest);
2769 	return -EMSGSIZE;
2770 }
2771 
2772 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2773 			       struct flow_dissector_key_enc_opts *key_opts,
2774 			       struct flow_dissector_key_enc_opts *msk_opts)
2775 {
2776 	int err;
2777 
2778 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2779 	if (err)
2780 		return err;
2781 
2782 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2783 }
2784 
2785 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2786 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2787 {
2788 	if (mask->meta.ingress_ifindex) {
2789 		struct net_device *dev;
2790 
2791 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2792 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2793 			goto nla_put_failure;
2794 	}
2795 
2796 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2797 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2798 			    sizeof(key->eth.dst)) ||
2799 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2800 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2801 			    sizeof(key->eth.src)) ||
2802 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2803 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2804 			    sizeof(key->basic.n_proto)))
2805 		goto nla_put_failure;
2806 
2807 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2808 		goto nla_put_failure;
2809 
2810 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2811 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2812 		goto nla_put_failure;
2813 
2814 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2815 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2816 			     &key->cvlan, &mask->cvlan) ||
2817 	    (mask->cvlan.vlan_tpid &&
2818 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2819 			  key->cvlan.vlan_tpid)))
2820 		goto nla_put_failure;
2821 
2822 	if (mask->basic.n_proto) {
2823 		if (mask->cvlan.vlan_tpid) {
2824 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2825 					 key->basic.n_proto))
2826 				goto nla_put_failure;
2827 		} else if (mask->vlan.vlan_tpid) {
2828 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2829 					 key->basic.n_proto))
2830 				goto nla_put_failure;
2831 		}
2832 	}
2833 
2834 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2835 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2836 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2837 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2838 			    sizeof(key->basic.ip_proto)) ||
2839 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2840 		goto nla_put_failure;
2841 
2842 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2843 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2844 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2845 			     sizeof(key->ipv4.src)) ||
2846 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2847 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2848 			     sizeof(key->ipv4.dst))))
2849 		goto nla_put_failure;
2850 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2851 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2852 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2853 				  sizeof(key->ipv6.src)) ||
2854 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2855 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2856 				  sizeof(key->ipv6.dst))))
2857 		goto nla_put_failure;
2858 
2859 	if (key->basic.ip_proto == IPPROTO_TCP &&
2860 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2861 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2862 			     sizeof(key->tp.src)) ||
2863 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2864 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2865 			     sizeof(key->tp.dst)) ||
2866 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2867 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2868 			     sizeof(key->tcp.flags))))
2869 		goto nla_put_failure;
2870 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2871 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2872 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2873 				  sizeof(key->tp.src)) ||
2874 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2875 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2876 				  sizeof(key->tp.dst))))
2877 		goto nla_put_failure;
2878 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2879 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2880 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2881 				  sizeof(key->tp.src)) ||
2882 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2883 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2884 				  sizeof(key->tp.dst))))
2885 		goto nla_put_failure;
2886 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2887 		 key->basic.ip_proto == IPPROTO_ICMP &&
2888 		 (fl_dump_key_val(skb, &key->icmp.type,
2889 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2890 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2891 				  sizeof(key->icmp.type)) ||
2892 		  fl_dump_key_val(skb, &key->icmp.code,
2893 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2894 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2895 				  sizeof(key->icmp.code))))
2896 		goto nla_put_failure;
2897 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2898 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2899 		 (fl_dump_key_val(skb, &key->icmp.type,
2900 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2901 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2902 				  sizeof(key->icmp.type)) ||
2903 		  fl_dump_key_val(skb, &key->icmp.code,
2904 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2905 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2906 				  sizeof(key->icmp.code))))
2907 		goto nla_put_failure;
2908 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2909 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2910 		 (fl_dump_key_val(skb, &key->arp.sip,
2911 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2912 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2913 				  sizeof(key->arp.sip)) ||
2914 		  fl_dump_key_val(skb, &key->arp.tip,
2915 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2916 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2917 				  sizeof(key->arp.tip)) ||
2918 		  fl_dump_key_val(skb, &key->arp.op,
2919 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2920 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2921 				  sizeof(key->arp.op)) ||
2922 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2923 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2924 				  sizeof(key->arp.sha)) ||
2925 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2926 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2927 				  sizeof(key->arp.tha))))
2928 		goto nla_put_failure;
2929 
2930 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2931 	     key->basic.ip_proto == IPPROTO_UDP ||
2932 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2933 	     fl_dump_key_port_range(skb, key, mask))
2934 		goto nla_put_failure;
2935 
2936 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2937 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2938 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2939 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2940 			    sizeof(key->enc_ipv4.src)) ||
2941 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2942 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2943 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2944 			     sizeof(key->enc_ipv4.dst))))
2945 		goto nla_put_failure;
2946 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2947 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2948 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2949 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2950 			    sizeof(key->enc_ipv6.src)) ||
2951 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2952 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2953 				 &mask->enc_ipv6.dst,
2954 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2955 			    sizeof(key->enc_ipv6.dst))))
2956 		goto nla_put_failure;
2957 
2958 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2959 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2960 			    sizeof(key->enc_key_id)) ||
2961 	    fl_dump_key_val(skb, &key->enc_tp.src,
2962 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2963 			    &mask->enc_tp.src,
2964 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2965 			    sizeof(key->enc_tp.src)) ||
2966 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2967 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2968 			    &mask->enc_tp.dst,
2969 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2970 			    sizeof(key->enc_tp.dst)) ||
2971 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2972 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2973 		goto nla_put_failure;
2974 
2975 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2976 		goto nla_put_failure;
2977 
2978 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2979 		goto nla_put_failure;
2980 
2981 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
2982 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
2983 			     sizeof(key->hash.hash)))
2984 		goto nla_put_failure;
2985 
2986 	return 0;
2987 
2988 nla_put_failure:
2989 	return -EMSGSIZE;
2990 }
2991 
2992 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2993 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2994 {
2995 	struct cls_fl_filter *f = fh;
2996 	struct nlattr *nest;
2997 	struct fl_flow_key *key, *mask;
2998 	bool skip_hw;
2999 
3000 	if (!f)
3001 		return skb->len;
3002 
3003 	t->tcm_handle = f->handle;
3004 
3005 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3006 	if (!nest)
3007 		goto nla_put_failure;
3008 
3009 	spin_lock(&tp->lock);
3010 
3011 	if (f->res.classid &&
3012 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3013 		goto nla_put_failure_locked;
3014 
3015 	key = &f->key;
3016 	mask = &f->mask->key;
3017 	skip_hw = tc_skip_hw(f->flags);
3018 
3019 	if (fl_dump_key(skb, net, key, mask))
3020 		goto nla_put_failure_locked;
3021 
3022 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3023 		goto nla_put_failure_locked;
3024 
3025 	spin_unlock(&tp->lock);
3026 
3027 	if (!skip_hw)
3028 		fl_hw_update_stats(tp, f, rtnl_held);
3029 
3030 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3031 		goto nla_put_failure;
3032 
3033 	if (tcf_exts_dump(skb, &f->exts))
3034 		goto nla_put_failure;
3035 
3036 	nla_nest_end(skb, nest);
3037 
3038 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3039 		goto nla_put_failure;
3040 
3041 	return skb->len;
3042 
3043 nla_put_failure_locked:
3044 	spin_unlock(&tp->lock);
3045 nla_put_failure:
3046 	nla_nest_cancel(skb, nest);
3047 	return -1;
3048 }
3049 
3050 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3051 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3052 {
3053 	struct cls_fl_filter *f = fh;
3054 	struct nlattr *nest;
3055 	bool skip_hw;
3056 
3057 	if (!f)
3058 		return skb->len;
3059 
3060 	t->tcm_handle = f->handle;
3061 
3062 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3063 	if (!nest)
3064 		goto nla_put_failure;
3065 
3066 	spin_lock(&tp->lock);
3067 
3068 	skip_hw = tc_skip_hw(f->flags);
3069 
3070 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3071 		goto nla_put_failure_locked;
3072 
3073 	spin_unlock(&tp->lock);
3074 
3075 	if (!skip_hw)
3076 		fl_hw_update_stats(tp, f, rtnl_held);
3077 
3078 	if (tcf_exts_terse_dump(skb, &f->exts))
3079 		goto nla_put_failure;
3080 
3081 	nla_nest_end(skb, nest);
3082 
3083 	return skb->len;
3084 
3085 nla_put_failure_locked:
3086 	spin_unlock(&tp->lock);
3087 nla_put_failure:
3088 	nla_nest_cancel(skb, nest);
3089 	return -1;
3090 }
3091 
3092 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3093 {
3094 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3095 	struct fl_flow_key *key, *mask;
3096 	struct nlattr *nest;
3097 
3098 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3099 	if (!nest)
3100 		goto nla_put_failure;
3101 
3102 	key = &tmplt->dummy_key;
3103 	mask = &tmplt->mask;
3104 
3105 	if (fl_dump_key(skb, net, key, mask))
3106 		goto nla_put_failure;
3107 
3108 	nla_nest_end(skb, nest);
3109 
3110 	return skb->len;
3111 
3112 nla_put_failure:
3113 	nla_nest_cancel(skb, nest);
3114 	return -EMSGSIZE;
3115 }
3116 
3117 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3118 			  unsigned long base)
3119 {
3120 	struct cls_fl_filter *f = fh;
3121 
3122 	if (f && f->res.classid == classid) {
3123 		if (cl)
3124 			__tcf_bind_filter(q, &f->res, base);
3125 		else
3126 			__tcf_unbind_filter(q, &f->res);
3127 	}
3128 }
3129 
3130 static bool fl_delete_empty(struct tcf_proto *tp)
3131 {
3132 	struct cls_fl_head *head = fl_head_dereference(tp);
3133 
3134 	spin_lock(&tp->lock);
3135 	tp->deleting = idr_is_empty(&head->handle_idr);
3136 	spin_unlock(&tp->lock);
3137 
3138 	return tp->deleting;
3139 }
3140 
3141 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3142 	.kind		= "flower",
3143 	.classify	= fl_classify,
3144 	.init		= fl_init,
3145 	.destroy	= fl_destroy,
3146 	.get		= fl_get,
3147 	.put		= fl_put,
3148 	.change		= fl_change,
3149 	.delete		= fl_delete,
3150 	.delete_empty	= fl_delete_empty,
3151 	.walk		= fl_walk,
3152 	.reoffload	= fl_reoffload,
3153 	.hw_add		= fl_hw_add,
3154 	.hw_del		= fl_hw_del,
3155 	.dump		= fl_dump,
3156 	.terse_dump	= fl_terse_dump,
3157 	.bind_class	= fl_bind_class,
3158 	.tmplt_create	= fl_tmplt_create,
3159 	.tmplt_destroy	= fl_tmplt_destroy,
3160 	.tmplt_dump	= fl_tmplt_dump,
3161 	.owner		= THIS_MODULE,
3162 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3163 };
3164 
3165 static int __init cls_fl_init(void)
3166 {
3167 	return register_tcf_proto_ops(&cls_fl_ops);
3168 }
3169 
3170 static void __exit cls_fl_exit(void)
3171 {
3172 	unregister_tcf_proto_ops(&cls_fl_ops);
3173 }
3174 
3175 module_init(cls_fl_init);
3176 module_exit(cls_fl_exit);
3177 
3178 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3179 MODULE_DESCRIPTION("Flower classifier");
3180 MODULE_LICENSE("GPL v2");
3181