xref: /openbmc/linux/net/sched/cls_flower.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
34 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
35 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
36 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
37 
38 struct fl_flow_key {
39 	struct flow_dissector_key_meta meta;
40 	struct flow_dissector_key_control control;
41 	struct flow_dissector_key_control enc_control;
42 	struct flow_dissector_key_basic basic;
43 	struct flow_dissector_key_eth_addrs eth;
44 	struct flow_dissector_key_vlan vlan;
45 	struct flow_dissector_key_vlan cvlan;
46 	union {
47 		struct flow_dissector_key_ipv4_addrs ipv4;
48 		struct flow_dissector_key_ipv6_addrs ipv6;
49 	};
50 	struct flow_dissector_key_ports tp;
51 	struct flow_dissector_key_icmp icmp;
52 	struct flow_dissector_key_arp arp;
53 	struct flow_dissector_key_keyid enc_key_id;
54 	union {
55 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
56 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
57 	};
58 	struct flow_dissector_key_ports enc_tp;
59 	struct flow_dissector_key_mpls mpls;
60 	struct flow_dissector_key_tcp tcp;
61 	struct flow_dissector_key_ip ip;
62 	struct flow_dissector_key_ip enc_ip;
63 	struct flow_dissector_key_enc_opts enc_opts;
64 	union {
65 		struct flow_dissector_key_ports tp;
66 		struct {
67 			struct flow_dissector_key_ports tp_min;
68 			struct flow_dissector_key_ports tp_max;
69 		};
70 	} tp_range;
71 	struct flow_dissector_key_ct ct;
72 	struct flow_dissector_key_hash hash;
73 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
74 
75 struct fl_flow_mask_range {
76 	unsigned short int start;
77 	unsigned short int end;
78 };
79 
80 struct fl_flow_mask {
81 	struct fl_flow_key key;
82 	struct fl_flow_mask_range range;
83 	u32 flags;
84 	struct rhash_head ht_node;
85 	struct rhashtable ht;
86 	struct rhashtable_params filter_ht_params;
87 	struct flow_dissector dissector;
88 	struct list_head filters;
89 	struct rcu_work rwork;
90 	struct list_head list;
91 	refcount_t refcnt;
92 };
93 
94 struct fl_flow_tmplt {
95 	struct fl_flow_key dummy_key;
96 	struct fl_flow_key mask;
97 	struct flow_dissector dissector;
98 	struct tcf_chain *chain;
99 };
100 
101 struct cls_fl_head {
102 	struct rhashtable ht;
103 	spinlock_t masks_lock; /* Protect masks list */
104 	struct list_head masks;
105 	struct list_head hw_filters;
106 	struct rcu_work rwork;
107 	struct idr handle_idr;
108 };
109 
110 struct cls_fl_filter {
111 	struct fl_flow_mask *mask;
112 	struct rhash_head ht_node;
113 	struct fl_flow_key mkey;
114 	struct tcf_exts exts;
115 	struct tcf_result res;
116 	struct fl_flow_key key;
117 	struct list_head list;
118 	struct list_head hw_list;
119 	u32 handle;
120 	u32 flags;
121 	u32 in_hw_count;
122 	struct rcu_work rwork;
123 	struct net_device *hw_dev;
124 	/* Flower classifier is unlocked, which means that its reference counter
125 	 * can be changed concurrently without any kind of external
126 	 * synchronization. Use atomic reference counter to be concurrency-safe.
127 	 */
128 	refcount_t refcnt;
129 	bool deleted;
130 };
131 
132 static const struct rhashtable_params mask_ht_params = {
133 	.key_offset = offsetof(struct fl_flow_mask, key),
134 	.key_len = sizeof(struct fl_flow_key),
135 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
136 	.automatic_shrinking = true,
137 };
138 
139 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
140 {
141 	return mask->range.end - mask->range.start;
142 }
143 
144 static void fl_mask_update_range(struct fl_flow_mask *mask)
145 {
146 	const u8 *bytes = (const u8 *) &mask->key;
147 	size_t size = sizeof(mask->key);
148 	size_t i, first = 0, last;
149 
150 	for (i = 0; i < size; i++) {
151 		if (bytes[i]) {
152 			first = i;
153 			break;
154 		}
155 	}
156 	last = first;
157 	for (i = size - 1; i != first; i--) {
158 		if (bytes[i]) {
159 			last = i;
160 			break;
161 		}
162 	}
163 	mask->range.start = rounddown(first, sizeof(long));
164 	mask->range.end = roundup(last + 1, sizeof(long));
165 }
166 
167 static void *fl_key_get_start(struct fl_flow_key *key,
168 			      const struct fl_flow_mask *mask)
169 {
170 	return (u8 *) key + mask->range.start;
171 }
172 
173 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
174 			      struct fl_flow_mask *mask)
175 {
176 	const long *lkey = fl_key_get_start(key, mask);
177 	const long *lmask = fl_key_get_start(&mask->key, mask);
178 	long *lmkey = fl_key_get_start(mkey, mask);
179 	int i;
180 
181 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
182 		*lmkey++ = *lkey++ & *lmask++;
183 }
184 
185 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
186 			       struct fl_flow_mask *mask)
187 {
188 	const long *lmask = fl_key_get_start(&mask->key, mask);
189 	const long *ltmplt;
190 	int i;
191 
192 	if (!tmplt)
193 		return true;
194 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
195 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
196 		if (~*ltmplt++ & *lmask++)
197 			return false;
198 	}
199 	return true;
200 }
201 
202 static void fl_clear_masked_range(struct fl_flow_key *key,
203 				  struct fl_flow_mask *mask)
204 {
205 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
206 }
207 
208 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209 				  struct fl_flow_key *key,
210 				  struct fl_flow_key *mkey)
211 {
212 	__be16 min_mask, max_mask, min_val, max_val;
213 
214 	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
215 	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
216 	min_val = htons(filter->key.tp_range.tp_min.dst);
217 	max_val = htons(filter->key.tp_range.tp_max.dst);
218 
219 	if (min_mask && max_mask) {
220 		if (htons(key->tp_range.tp.dst) < min_val ||
221 		    htons(key->tp_range.tp.dst) > max_val)
222 			return false;
223 
224 		/* skb does not have min and max values */
225 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
227 	}
228 	return true;
229 }
230 
231 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232 				  struct fl_flow_key *key,
233 				  struct fl_flow_key *mkey)
234 {
235 	__be16 min_mask, max_mask, min_val, max_val;
236 
237 	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
238 	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
239 	min_val = htons(filter->key.tp_range.tp_min.src);
240 	max_val = htons(filter->key.tp_range.tp_max.src);
241 
242 	if (min_mask && max_mask) {
243 		if (htons(key->tp_range.tp.src) < min_val ||
244 		    htons(key->tp_range.tp.src) > max_val)
245 			return false;
246 
247 		/* skb does not have min and max values */
248 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
250 	}
251 	return true;
252 }
253 
254 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255 					 struct fl_flow_key *mkey)
256 {
257 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
258 				      mask->filter_ht_params);
259 }
260 
261 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262 					     struct fl_flow_key *mkey,
263 					     struct fl_flow_key *key)
264 {
265 	struct cls_fl_filter *filter, *f;
266 
267 	list_for_each_entry_rcu(filter, &mask->filters, list) {
268 		if (!fl_range_port_dst_cmp(filter, key, mkey))
269 			continue;
270 
271 		if (!fl_range_port_src_cmp(filter, key, mkey))
272 			continue;
273 
274 		f = __fl_lookup(mask, mkey);
275 		if (f)
276 			return f;
277 	}
278 	return NULL;
279 }
280 
281 static noinline_for_stack
282 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
283 {
284 	struct fl_flow_key mkey;
285 
286 	fl_set_masked_key(&mkey, key, mask);
287 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288 		return fl_lookup_range(mask, &mkey, key);
289 
290 	return __fl_lookup(mask, &mkey);
291 }
292 
293 static u16 fl_ct_info_to_flower_map[] = {
294 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
300 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
301 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
302 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
303 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
304 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
305 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
306 };
307 
308 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
309 		       struct tcf_result *res)
310 {
311 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
312 	bool post_ct = qdisc_skb_cb(skb)->post_ct;
313 	struct fl_flow_key skb_key;
314 	struct fl_flow_mask *mask;
315 	struct cls_fl_filter *f;
316 
317 	list_for_each_entry_rcu(mask, &head->masks, list) {
318 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
319 		fl_clear_masked_range(&skb_key, mask);
320 
321 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
322 		/* skb_flow_dissect() does not set n_proto in case an unknown
323 		 * protocol, so do it rather here.
324 		 */
325 		skb_key.basic.n_proto = skb_protocol(skb, false);
326 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
327 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
328 				    fl_ct_info_to_flower_map,
329 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
330 				    post_ct);
331 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
332 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
333 
334 		f = fl_mask_lookup(mask, &skb_key);
335 		if (f && !tc_skip_sw(f->flags)) {
336 			*res = f->res;
337 			return tcf_exts_exec(skb, &f->exts, res);
338 		}
339 	}
340 	return -1;
341 }
342 
343 static int fl_init(struct tcf_proto *tp)
344 {
345 	struct cls_fl_head *head;
346 
347 	head = kzalloc(sizeof(*head), GFP_KERNEL);
348 	if (!head)
349 		return -ENOBUFS;
350 
351 	spin_lock_init(&head->masks_lock);
352 	INIT_LIST_HEAD_RCU(&head->masks);
353 	INIT_LIST_HEAD(&head->hw_filters);
354 	rcu_assign_pointer(tp->root, head);
355 	idr_init(&head->handle_idr);
356 
357 	return rhashtable_init(&head->ht, &mask_ht_params);
358 }
359 
360 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
361 {
362 	/* temporary masks don't have their filters list and ht initialized */
363 	if (mask_init_done) {
364 		WARN_ON(!list_empty(&mask->filters));
365 		rhashtable_destroy(&mask->ht);
366 	}
367 	kfree(mask);
368 }
369 
370 static void fl_mask_free_work(struct work_struct *work)
371 {
372 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
373 						 struct fl_flow_mask, rwork);
374 
375 	fl_mask_free(mask, true);
376 }
377 
378 static void fl_uninit_mask_free_work(struct work_struct *work)
379 {
380 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
381 						 struct fl_flow_mask, rwork);
382 
383 	fl_mask_free(mask, false);
384 }
385 
386 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
387 {
388 	if (!refcount_dec_and_test(&mask->refcnt))
389 		return false;
390 
391 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
392 
393 	spin_lock(&head->masks_lock);
394 	list_del_rcu(&mask->list);
395 	spin_unlock(&head->masks_lock);
396 
397 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
398 
399 	return true;
400 }
401 
402 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
403 {
404 	/* Flower classifier only changes root pointer during init and destroy.
405 	 * Users must obtain reference to tcf_proto instance before calling its
406 	 * API, so tp->root pointer is protected from concurrent call to
407 	 * fl_destroy() by reference counting.
408 	 */
409 	return rcu_dereference_raw(tp->root);
410 }
411 
412 static void __fl_destroy_filter(struct cls_fl_filter *f)
413 {
414 	tcf_exts_destroy(&f->exts);
415 	tcf_exts_put_net(&f->exts);
416 	kfree(f);
417 }
418 
419 static void fl_destroy_filter_work(struct work_struct *work)
420 {
421 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
422 					struct cls_fl_filter, rwork);
423 
424 	__fl_destroy_filter(f);
425 }
426 
427 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
428 				 bool rtnl_held, struct netlink_ext_ack *extack)
429 {
430 	struct tcf_block *block = tp->chain->block;
431 	struct flow_cls_offload cls_flower = {};
432 
433 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
434 	cls_flower.command = FLOW_CLS_DESTROY;
435 	cls_flower.cookie = (unsigned long) f;
436 
437 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
438 			    &f->flags, &f->in_hw_count, rtnl_held);
439 
440 }
441 
442 static int fl_hw_replace_filter(struct tcf_proto *tp,
443 				struct cls_fl_filter *f, bool rtnl_held,
444 				struct netlink_ext_ack *extack)
445 {
446 	struct tcf_block *block = tp->chain->block;
447 	struct flow_cls_offload cls_flower = {};
448 	bool skip_sw = tc_skip_sw(f->flags);
449 	int err = 0;
450 
451 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
452 	if (!cls_flower.rule)
453 		return -ENOMEM;
454 
455 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
456 	cls_flower.command = FLOW_CLS_REPLACE;
457 	cls_flower.cookie = (unsigned long) f;
458 	cls_flower.rule->match.dissector = &f->mask->dissector;
459 	cls_flower.rule->match.mask = &f->mask->key;
460 	cls_flower.rule->match.key = &f->mkey;
461 	cls_flower.classid = f->res.classid;
462 
463 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
464 	if (err) {
465 		kfree(cls_flower.rule);
466 		if (skip_sw) {
467 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
468 			return err;
469 		}
470 		return 0;
471 	}
472 
473 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
474 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
475 	tc_cleanup_flow_action(&cls_flower.rule->action);
476 	kfree(cls_flower.rule);
477 
478 	if (err) {
479 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
480 		return err;
481 	}
482 
483 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
484 		return -EINVAL;
485 
486 	return 0;
487 }
488 
489 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
490 			       bool rtnl_held)
491 {
492 	struct tcf_block *block = tp->chain->block;
493 	struct flow_cls_offload cls_flower = {};
494 
495 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
496 	cls_flower.command = FLOW_CLS_STATS;
497 	cls_flower.cookie = (unsigned long) f;
498 	cls_flower.classid = f->res.classid;
499 
500 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
501 			 rtnl_held);
502 
503 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
504 			      cls_flower.stats.pkts,
505 			      cls_flower.stats.drops,
506 			      cls_flower.stats.lastused,
507 			      cls_flower.stats.used_hw_stats,
508 			      cls_flower.stats.used_hw_stats_valid);
509 }
510 
511 static void __fl_put(struct cls_fl_filter *f)
512 {
513 	if (!refcount_dec_and_test(&f->refcnt))
514 		return;
515 
516 	if (tcf_exts_get_net(&f->exts))
517 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
518 	else
519 		__fl_destroy_filter(f);
520 }
521 
522 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
523 {
524 	struct cls_fl_filter *f;
525 
526 	rcu_read_lock();
527 	f = idr_find(&head->handle_idr, handle);
528 	if (f && !refcount_inc_not_zero(&f->refcnt))
529 		f = NULL;
530 	rcu_read_unlock();
531 
532 	return f;
533 }
534 
535 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
536 		       bool *last, bool rtnl_held,
537 		       struct netlink_ext_ack *extack)
538 {
539 	struct cls_fl_head *head = fl_head_dereference(tp);
540 
541 	*last = false;
542 
543 	spin_lock(&tp->lock);
544 	if (f->deleted) {
545 		spin_unlock(&tp->lock);
546 		return -ENOENT;
547 	}
548 
549 	f->deleted = true;
550 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
551 			       f->mask->filter_ht_params);
552 	idr_remove(&head->handle_idr, f->handle);
553 	list_del_rcu(&f->list);
554 	spin_unlock(&tp->lock);
555 
556 	*last = fl_mask_put(head, f->mask);
557 	if (!tc_skip_hw(f->flags))
558 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
559 	tcf_unbind_filter(tp, &f->res);
560 	__fl_put(f);
561 
562 	return 0;
563 }
564 
565 static void fl_destroy_sleepable(struct work_struct *work)
566 {
567 	struct cls_fl_head *head = container_of(to_rcu_work(work),
568 						struct cls_fl_head,
569 						rwork);
570 
571 	rhashtable_destroy(&head->ht);
572 	kfree(head);
573 	module_put(THIS_MODULE);
574 }
575 
576 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
577 		       struct netlink_ext_ack *extack)
578 {
579 	struct cls_fl_head *head = fl_head_dereference(tp);
580 	struct fl_flow_mask *mask, *next_mask;
581 	struct cls_fl_filter *f, *next;
582 	bool last;
583 
584 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
585 		list_for_each_entry_safe(f, next, &mask->filters, list) {
586 			__fl_delete(tp, f, &last, rtnl_held, extack);
587 			if (last)
588 				break;
589 		}
590 	}
591 	idr_destroy(&head->handle_idr);
592 
593 	__module_get(THIS_MODULE);
594 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
595 }
596 
597 static void fl_put(struct tcf_proto *tp, void *arg)
598 {
599 	struct cls_fl_filter *f = arg;
600 
601 	__fl_put(f);
602 }
603 
604 static void *fl_get(struct tcf_proto *tp, u32 handle)
605 {
606 	struct cls_fl_head *head = fl_head_dereference(tp);
607 
608 	return __fl_get(head, handle);
609 }
610 
611 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
612 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
613 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
614 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
615 					    .len = IFNAMSIZ },
616 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
617 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
618 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
619 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
620 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
621 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
622 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
624 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
627 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
628 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
629 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
636 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
639 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
640 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
643 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
644 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
645 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
646 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
659 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
660 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
669 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
670 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
672 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
675 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
676 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
677 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
678 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
681 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
682 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
683 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
684 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
685 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
690 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
692 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
694 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
696 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
697 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
698 	[TCA_FLOWER_KEY_CT_STATE]	=
699 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
700 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
701 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
702 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
703 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
704 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
705 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
706 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
707 					    .len = 128 / BITS_PER_BYTE },
708 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
709 					    .len = 128 / BITS_PER_BYTE },
710 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
711 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
712 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
713 
714 };
715 
716 static const struct nla_policy
717 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
718 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
719 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
720 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
721 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
722 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
723 };
724 
725 static const struct nla_policy
726 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
727 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
728 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
729 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
730 						       .len = 128 },
731 };
732 
733 static const struct nla_policy
734 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
735 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
736 };
737 
738 static const struct nla_policy
739 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
740 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
741 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
742 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
743 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
744 };
745 
746 static const struct nla_policy
747 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
748 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
749 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
750 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
751 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
752 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
753 };
754 
755 static void fl_set_key_val(struct nlattr **tb,
756 			   void *val, int val_type,
757 			   void *mask, int mask_type, int len)
758 {
759 	if (!tb[val_type])
760 		return;
761 	nla_memcpy(val, tb[val_type], len);
762 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
763 		memset(mask, 0xff, len);
764 	else
765 		nla_memcpy(mask, tb[mask_type], len);
766 }
767 
768 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
769 				 struct fl_flow_key *mask,
770 				 struct netlink_ext_ack *extack)
771 {
772 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
773 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
774 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
775 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
776 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
777 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
778 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
779 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
780 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
781 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
782 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
783 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
784 
785 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
786 	    htons(key->tp_range.tp_max.dst) <=
787 	    htons(key->tp_range.tp_min.dst)) {
788 		NL_SET_ERR_MSG_ATTR(extack,
789 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
790 				    "Invalid destination port range (min must be strictly smaller than max)");
791 		return -EINVAL;
792 	}
793 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
794 	    htons(key->tp_range.tp_max.src) <=
795 	    htons(key->tp_range.tp_min.src)) {
796 		NL_SET_ERR_MSG_ATTR(extack,
797 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
798 				    "Invalid source port range (min must be strictly smaller than max)");
799 		return -EINVAL;
800 	}
801 
802 	return 0;
803 }
804 
805 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
806 			       struct flow_dissector_key_mpls *key_val,
807 			       struct flow_dissector_key_mpls *key_mask,
808 			       struct netlink_ext_ack *extack)
809 {
810 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
811 	struct flow_dissector_mpls_lse *lse_mask;
812 	struct flow_dissector_mpls_lse *lse_val;
813 	u8 lse_index;
814 	u8 depth;
815 	int err;
816 
817 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
818 			       mpls_stack_entry_policy, extack);
819 	if (err < 0)
820 		return err;
821 
822 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
823 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
824 		return -EINVAL;
825 	}
826 
827 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
828 
829 	/* LSE depth starts at 1, for consistency with terminology used by
830 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
831 	 */
832 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
833 		NL_SET_ERR_MSG_ATTR(extack,
834 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
835 				    "Invalid MPLS depth");
836 		return -EINVAL;
837 	}
838 	lse_index = depth - 1;
839 
840 	dissector_set_mpls_lse(key_val, lse_index);
841 	dissector_set_mpls_lse(key_mask, lse_index);
842 
843 	lse_val = &key_val->ls[lse_index];
844 	lse_mask = &key_mask->ls[lse_index];
845 
846 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
847 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
848 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
849 	}
850 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
851 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
852 
853 		if (bos & ~MPLS_BOS_MASK) {
854 			NL_SET_ERR_MSG_ATTR(extack,
855 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
856 					    "Bottom Of Stack (BOS) must be 0 or 1");
857 			return -EINVAL;
858 		}
859 		lse_val->mpls_bos = bos;
860 		lse_mask->mpls_bos = MPLS_BOS_MASK;
861 	}
862 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
863 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
864 
865 		if (tc & ~MPLS_TC_MASK) {
866 			NL_SET_ERR_MSG_ATTR(extack,
867 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
868 					    "Traffic Class (TC) must be between 0 and 7");
869 			return -EINVAL;
870 		}
871 		lse_val->mpls_tc = tc;
872 		lse_mask->mpls_tc = MPLS_TC_MASK;
873 	}
874 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
875 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
876 
877 		if (label & ~MPLS_LABEL_MASK) {
878 			NL_SET_ERR_MSG_ATTR(extack,
879 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
880 					    "Label must be between 0 and 1048575");
881 			return -EINVAL;
882 		}
883 		lse_val->mpls_label = label;
884 		lse_mask->mpls_label = MPLS_LABEL_MASK;
885 	}
886 
887 	return 0;
888 }
889 
890 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
891 				struct flow_dissector_key_mpls *key_val,
892 				struct flow_dissector_key_mpls *key_mask,
893 				struct netlink_ext_ack *extack)
894 {
895 	struct nlattr *nla_lse;
896 	int rem;
897 	int err;
898 
899 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
900 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
901 				    "NLA_F_NESTED is missing");
902 		return -EINVAL;
903 	}
904 
905 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
906 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
907 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
908 					    "Invalid MPLS option type");
909 			return -EINVAL;
910 		}
911 
912 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
913 		if (err < 0)
914 			return err;
915 	}
916 	if (rem) {
917 		NL_SET_ERR_MSG(extack,
918 			       "Bytes leftover after parsing MPLS options");
919 		return -EINVAL;
920 	}
921 
922 	return 0;
923 }
924 
925 static int fl_set_key_mpls(struct nlattr **tb,
926 			   struct flow_dissector_key_mpls *key_val,
927 			   struct flow_dissector_key_mpls *key_mask,
928 			   struct netlink_ext_ack *extack)
929 {
930 	struct flow_dissector_mpls_lse *lse_mask;
931 	struct flow_dissector_mpls_lse *lse_val;
932 
933 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
934 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
935 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
936 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
937 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
938 			NL_SET_ERR_MSG_ATTR(extack,
939 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
940 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
941 			return -EBADMSG;
942 		}
943 
944 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
945 					    key_val, key_mask, extack);
946 	}
947 
948 	lse_val = &key_val->ls[0];
949 	lse_mask = &key_mask->ls[0];
950 
951 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
952 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
953 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
954 		dissector_set_mpls_lse(key_val, 0);
955 		dissector_set_mpls_lse(key_mask, 0);
956 	}
957 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
958 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
959 
960 		if (bos & ~MPLS_BOS_MASK) {
961 			NL_SET_ERR_MSG_ATTR(extack,
962 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
963 					    "Bottom Of Stack (BOS) must be 0 or 1");
964 			return -EINVAL;
965 		}
966 		lse_val->mpls_bos = bos;
967 		lse_mask->mpls_bos = MPLS_BOS_MASK;
968 		dissector_set_mpls_lse(key_val, 0);
969 		dissector_set_mpls_lse(key_mask, 0);
970 	}
971 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
972 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
973 
974 		if (tc & ~MPLS_TC_MASK) {
975 			NL_SET_ERR_MSG_ATTR(extack,
976 					    tb[TCA_FLOWER_KEY_MPLS_TC],
977 					    "Traffic Class (TC) must be between 0 and 7");
978 			return -EINVAL;
979 		}
980 		lse_val->mpls_tc = tc;
981 		lse_mask->mpls_tc = MPLS_TC_MASK;
982 		dissector_set_mpls_lse(key_val, 0);
983 		dissector_set_mpls_lse(key_mask, 0);
984 	}
985 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
986 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
987 
988 		if (label & ~MPLS_LABEL_MASK) {
989 			NL_SET_ERR_MSG_ATTR(extack,
990 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
991 					    "Label must be between 0 and 1048575");
992 			return -EINVAL;
993 		}
994 		lse_val->mpls_label = label;
995 		lse_mask->mpls_label = MPLS_LABEL_MASK;
996 		dissector_set_mpls_lse(key_val, 0);
997 		dissector_set_mpls_lse(key_mask, 0);
998 	}
999 	return 0;
1000 }
1001 
1002 static void fl_set_key_vlan(struct nlattr **tb,
1003 			    __be16 ethertype,
1004 			    int vlan_id_key, int vlan_prio_key,
1005 			    struct flow_dissector_key_vlan *key_val,
1006 			    struct flow_dissector_key_vlan *key_mask)
1007 {
1008 #define VLAN_PRIORITY_MASK	0x7
1009 
1010 	if (tb[vlan_id_key]) {
1011 		key_val->vlan_id =
1012 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1013 		key_mask->vlan_id = VLAN_VID_MASK;
1014 	}
1015 	if (tb[vlan_prio_key]) {
1016 		key_val->vlan_priority =
1017 			nla_get_u8(tb[vlan_prio_key]) &
1018 			VLAN_PRIORITY_MASK;
1019 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1020 	}
1021 	key_val->vlan_tpid = ethertype;
1022 	key_mask->vlan_tpid = cpu_to_be16(~0);
1023 }
1024 
1025 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1026 			    u32 *dissector_key, u32 *dissector_mask,
1027 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1028 {
1029 	if (flower_mask & flower_flag_bit) {
1030 		*dissector_mask |= dissector_flag_bit;
1031 		if (flower_key & flower_flag_bit)
1032 			*dissector_key |= dissector_flag_bit;
1033 	}
1034 }
1035 
1036 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1037 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1038 {
1039 	u32 key, mask;
1040 
1041 	/* mask is mandatory for flags */
1042 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1043 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1044 		return -EINVAL;
1045 	}
1046 
1047 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
1048 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1049 
1050 	*flags_key  = 0;
1051 	*flags_mask = 0;
1052 
1053 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1054 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1055 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1056 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1057 			FLOW_DIS_FIRST_FRAG);
1058 
1059 	return 0;
1060 }
1061 
1062 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1063 			  struct flow_dissector_key_ip *key,
1064 			  struct flow_dissector_key_ip *mask)
1065 {
1066 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1067 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1068 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1069 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1070 
1071 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1072 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1073 }
1074 
1075 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1076 			     int depth, int option_len,
1077 			     struct netlink_ext_ack *extack)
1078 {
1079 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1080 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1081 	struct geneve_opt *opt;
1082 	int err, data_len = 0;
1083 
1084 	if (option_len > sizeof(struct geneve_opt))
1085 		data_len = option_len - sizeof(struct geneve_opt);
1086 
1087 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1088 	memset(opt, 0xff, option_len);
1089 	opt->length = data_len / 4;
1090 	opt->r1 = 0;
1091 	opt->r2 = 0;
1092 	opt->r3 = 0;
1093 
1094 	/* If no mask has been prodived we assume an exact match. */
1095 	if (!depth)
1096 		return sizeof(struct geneve_opt) + data_len;
1097 
1098 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1099 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1100 		return -EINVAL;
1101 	}
1102 
1103 	err = nla_parse_nested_deprecated(tb,
1104 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1105 					  nla, geneve_opt_policy, extack);
1106 	if (err < 0)
1107 		return err;
1108 
1109 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1110 	 * fields from the key.
1111 	 */
1112 	if (!option_len &&
1113 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1114 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1115 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1116 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1117 		return -EINVAL;
1118 	}
1119 
1120 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1121 	 * for the mask.
1122 	 */
1123 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1124 		int new_len = key->enc_opts.len;
1125 
1126 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1127 		data_len = nla_len(data);
1128 		if (data_len < 4) {
1129 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1130 			return -ERANGE;
1131 		}
1132 		if (data_len % 4) {
1133 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1134 			return -ERANGE;
1135 		}
1136 
1137 		new_len += sizeof(struct geneve_opt) + data_len;
1138 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1139 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1140 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1141 			return -ERANGE;
1142 		}
1143 		opt->length = data_len / 4;
1144 		memcpy(opt->opt_data, nla_data(data), data_len);
1145 	}
1146 
1147 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1148 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1149 		opt->opt_class = nla_get_be16(class);
1150 	}
1151 
1152 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1153 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1154 		opt->type = nla_get_u8(type);
1155 	}
1156 
1157 	return sizeof(struct geneve_opt) + data_len;
1158 }
1159 
1160 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1161 			    int depth, int option_len,
1162 			    struct netlink_ext_ack *extack)
1163 {
1164 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1165 	struct vxlan_metadata *md;
1166 	int err;
1167 
1168 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1169 	memset(md, 0xff, sizeof(*md));
1170 
1171 	if (!depth)
1172 		return sizeof(*md);
1173 
1174 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1175 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1176 		return -EINVAL;
1177 	}
1178 
1179 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1180 			       vxlan_opt_policy, extack);
1181 	if (err < 0)
1182 		return err;
1183 
1184 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1185 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1186 		return -EINVAL;
1187 	}
1188 
1189 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1190 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1191 		md->gbp &= VXLAN_GBP_MASK;
1192 	}
1193 
1194 	return sizeof(*md);
1195 }
1196 
1197 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1198 			     int depth, int option_len,
1199 			     struct netlink_ext_ack *extack)
1200 {
1201 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1202 	struct erspan_metadata *md;
1203 	int err;
1204 
1205 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1206 	memset(md, 0xff, sizeof(*md));
1207 	md->version = 1;
1208 
1209 	if (!depth)
1210 		return sizeof(*md);
1211 
1212 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1213 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1214 		return -EINVAL;
1215 	}
1216 
1217 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1218 			       erspan_opt_policy, extack);
1219 	if (err < 0)
1220 		return err;
1221 
1222 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1223 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1224 		return -EINVAL;
1225 	}
1226 
1227 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1228 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1229 
1230 	if (md->version == 1) {
1231 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1232 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1233 			return -EINVAL;
1234 		}
1235 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1236 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1237 			memset(&md->u, 0x00, sizeof(md->u));
1238 			md->u.index = nla_get_be32(nla);
1239 		}
1240 	} else if (md->version == 2) {
1241 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1242 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1243 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1244 			return -EINVAL;
1245 		}
1246 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1247 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1248 			md->u.md2.dir = nla_get_u8(nla);
1249 		}
1250 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1251 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1252 			set_hwid(&md->u.md2, nla_get_u8(nla));
1253 		}
1254 	} else {
1255 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1256 		return -EINVAL;
1257 	}
1258 
1259 	return sizeof(*md);
1260 }
1261 
1262 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1263 			  struct fl_flow_key *mask,
1264 			  struct netlink_ext_ack *extack)
1265 {
1266 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1267 	int err, option_len, key_depth, msk_depth = 0;
1268 
1269 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1270 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1271 					     enc_opts_policy, extack);
1272 	if (err)
1273 		return err;
1274 
1275 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1276 
1277 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1278 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1279 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1280 						     enc_opts_policy, extack);
1281 		if (err)
1282 			return err;
1283 
1284 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1285 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1286 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1287 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1288 			return -EINVAL;
1289 		}
1290 	}
1291 
1292 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1293 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1294 		switch (nla_type(nla_opt_key)) {
1295 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1296 			if (key->enc_opts.dst_opt_type &&
1297 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1298 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1299 				return -EINVAL;
1300 			}
1301 			option_len = 0;
1302 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1303 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1304 						       key_depth, option_len,
1305 						       extack);
1306 			if (option_len < 0)
1307 				return option_len;
1308 
1309 			key->enc_opts.len += option_len;
1310 			/* At the same time we need to parse through the mask
1311 			 * in order to verify exact and mask attribute lengths.
1312 			 */
1313 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1314 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1315 						       msk_depth, option_len,
1316 						       extack);
1317 			if (option_len < 0)
1318 				return option_len;
1319 
1320 			mask->enc_opts.len += option_len;
1321 			if (key->enc_opts.len != mask->enc_opts.len) {
1322 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1323 				return -EINVAL;
1324 			}
1325 			break;
1326 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1327 			if (key->enc_opts.dst_opt_type) {
1328 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1329 				return -EINVAL;
1330 			}
1331 			option_len = 0;
1332 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1333 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1334 						      key_depth, option_len,
1335 						      extack);
1336 			if (option_len < 0)
1337 				return option_len;
1338 
1339 			key->enc_opts.len += option_len;
1340 			/* At the same time we need to parse through the mask
1341 			 * in order to verify exact and mask attribute lengths.
1342 			 */
1343 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1344 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1345 						      msk_depth, option_len,
1346 						      extack);
1347 			if (option_len < 0)
1348 				return option_len;
1349 
1350 			mask->enc_opts.len += option_len;
1351 			if (key->enc_opts.len != mask->enc_opts.len) {
1352 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1353 				return -EINVAL;
1354 			}
1355 			break;
1356 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1357 			if (key->enc_opts.dst_opt_type) {
1358 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1359 				return -EINVAL;
1360 			}
1361 			option_len = 0;
1362 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1363 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1364 						       key_depth, option_len,
1365 						       extack);
1366 			if (option_len < 0)
1367 				return option_len;
1368 
1369 			key->enc_opts.len += option_len;
1370 			/* At the same time we need to parse through the mask
1371 			 * in order to verify exact and mask attribute lengths.
1372 			 */
1373 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1374 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1375 						       msk_depth, option_len,
1376 						       extack);
1377 			if (option_len < 0)
1378 				return option_len;
1379 
1380 			mask->enc_opts.len += option_len;
1381 			if (key->enc_opts.len != mask->enc_opts.len) {
1382 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1383 				return -EINVAL;
1384 			}
1385 			break;
1386 		default:
1387 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1388 			return -EINVAL;
1389 		}
1390 
1391 		if (!msk_depth)
1392 			continue;
1393 
1394 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1395 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1396 			return -EINVAL;
1397 		}
1398 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1405 				struct netlink_ext_ack *extack)
1406 {
1407 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1408 		NL_SET_ERR_MSG_ATTR(extack, tb,
1409 				    "no trk, so no other flag can be set");
1410 		return -EINVAL;
1411 	}
1412 
1413 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1414 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1415 		NL_SET_ERR_MSG_ATTR(extack, tb,
1416 				    "new and est are mutually exclusive");
1417 		return -EINVAL;
1418 	}
1419 
1420 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1421 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1422 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1423 		NL_SET_ERR_MSG_ATTR(extack, tb,
1424 				    "when inv is set, only trk may be set");
1425 		return -EINVAL;
1426 	}
1427 
1428 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1429 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1430 		NL_SET_ERR_MSG_ATTR(extack, tb,
1431 				    "new and rpl are mutually exclusive");
1432 		return -EINVAL;
1433 	}
1434 
1435 	return 0;
1436 }
1437 
1438 static int fl_set_key_ct(struct nlattr **tb,
1439 			 struct flow_dissector_key_ct *key,
1440 			 struct flow_dissector_key_ct *mask,
1441 			 struct netlink_ext_ack *extack)
1442 {
1443 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1444 		int err;
1445 
1446 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1447 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1448 			return -EOPNOTSUPP;
1449 		}
1450 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1451 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1452 			       sizeof(key->ct_state));
1453 
1454 		err = fl_validate_ct_state(mask->ct_state,
1455 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1456 					   extack);
1457 		if (err)
1458 			return err;
1459 
1460 	}
1461 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1462 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1463 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1464 			return -EOPNOTSUPP;
1465 		}
1466 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1467 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1468 			       sizeof(key->ct_zone));
1469 	}
1470 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1471 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1472 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1473 			return -EOPNOTSUPP;
1474 		}
1475 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1476 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1477 			       sizeof(key->ct_mark));
1478 	}
1479 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1480 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1481 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1482 			return -EOPNOTSUPP;
1483 		}
1484 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1485 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1486 			       sizeof(key->ct_labels));
1487 	}
1488 
1489 	return 0;
1490 }
1491 
1492 static int fl_set_key(struct net *net, struct nlattr **tb,
1493 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1494 		      struct netlink_ext_ack *extack)
1495 {
1496 	__be16 ethertype;
1497 	int ret = 0;
1498 
1499 	if (tb[TCA_FLOWER_INDEV]) {
1500 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1501 		if (err < 0)
1502 			return err;
1503 		key->meta.ingress_ifindex = err;
1504 		mask->meta.ingress_ifindex = 0xffffffff;
1505 	}
1506 
1507 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1508 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1509 		       sizeof(key->eth.dst));
1510 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1511 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1512 		       sizeof(key->eth.src));
1513 
1514 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1515 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1516 
1517 		if (eth_type_vlan(ethertype)) {
1518 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1519 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1520 					&mask->vlan);
1521 
1522 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1523 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1524 				if (eth_type_vlan(ethertype)) {
1525 					fl_set_key_vlan(tb, ethertype,
1526 							TCA_FLOWER_KEY_CVLAN_ID,
1527 							TCA_FLOWER_KEY_CVLAN_PRIO,
1528 							&key->cvlan, &mask->cvlan);
1529 					fl_set_key_val(tb, &key->basic.n_proto,
1530 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1531 						       &mask->basic.n_proto,
1532 						       TCA_FLOWER_UNSPEC,
1533 						       sizeof(key->basic.n_proto));
1534 				} else {
1535 					key->basic.n_proto = ethertype;
1536 					mask->basic.n_proto = cpu_to_be16(~0);
1537 				}
1538 			}
1539 		} else {
1540 			key->basic.n_proto = ethertype;
1541 			mask->basic.n_proto = cpu_to_be16(~0);
1542 		}
1543 	}
1544 
1545 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1546 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1547 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1548 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1549 			       sizeof(key->basic.ip_proto));
1550 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1551 	}
1552 
1553 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1554 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1555 		mask->control.addr_type = ~0;
1556 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1557 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1558 			       sizeof(key->ipv4.src));
1559 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1560 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1561 			       sizeof(key->ipv4.dst));
1562 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1563 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1564 		mask->control.addr_type = ~0;
1565 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1566 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1567 			       sizeof(key->ipv6.src));
1568 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1569 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1570 			       sizeof(key->ipv6.dst));
1571 	}
1572 
1573 	if (key->basic.ip_proto == IPPROTO_TCP) {
1574 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1575 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1576 			       sizeof(key->tp.src));
1577 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1578 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1579 			       sizeof(key->tp.dst));
1580 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1581 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1582 			       sizeof(key->tcp.flags));
1583 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1584 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1585 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1586 			       sizeof(key->tp.src));
1587 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1588 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1589 			       sizeof(key->tp.dst));
1590 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1591 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1592 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1593 			       sizeof(key->tp.src));
1594 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1595 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1596 			       sizeof(key->tp.dst));
1597 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1598 		   key->basic.ip_proto == IPPROTO_ICMP) {
1599 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1600 			       &mask->icmp.type,
1601 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1602 			       sizeof(key->icmp.type));
1603 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1604 			       &mask->icmp.code,
1605 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1606 			       sizeof(key->icmp.code));
1607 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1608 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1609 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1610 			       &mask->icmp.type,
1611 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1612 			       sizeof(key->icmp.type));
1613 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1614 			       &mask->icmp.code,
1615 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1616 			       sizeof(key->icmp.code));
1617 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1618 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1619 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1620 		if (ret)
1621 			return ret;
1622 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1623 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1624 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1625 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1626 			       sizeof(key->arp.sip));
1627 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1628 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1629 			       sizeof(key->arp.tip));
1630 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1631 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1632 			       sizeof(key->arp.op));
1633 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1634 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1635 			       sizeof(key->arp.sha));
1636 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1637 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1638 			       sizeof(key->arp.tha));
1639 	}
1640 
1641 	if (key->basic.ip_proto == IPPROTO_TCP ||
1642 	    key->basic.ip_proto == IPPROTO_UDP ||
1643 	    key->basic.ip_proto == IPPROTO_SCTP) {
1644 		ret = fl_set_key_port_range(tb, key, mask, extack);
1645 		if (ret)
1646 			return ret;
1647 	}
1648 
1649 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1650 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1651 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1652 		mask->enc_control.addr_type = ~0;
1653 		fl_set_key_val(tb, &key->enc_ipv4.src,
1654 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1655 			       &mask->enc_ipv4.src,
1656 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1657 			       sizeof(key->enc_ipv4.src));
1658 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1659 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1660 			       &mask->enc_ipv4.dst,
1661 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1662 			       sizeof(key->enc_ipv4.dst));
1663 	}
1664 
1665 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1666 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1667 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1668 		mask->enc_control.addr_type = ~0;
1669 		fl_set_key_val(tb, &key->enc_ipv6.src,
1670 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1671 			       &mask->enc_ipv6.src,
1672 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1673 			       sizeof(key->enc_ipv6.src));
1674 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1675 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1676 			       &mask->enc_ipv6.dst,
1677 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1678 			       sizeof(key->enc_ipv6.dst));
1679 	}
1680 
1681 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1682 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1683 		       sizeof(key->enc_key_id.keyid));
1684 
1685 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1686 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1687 		       sizeof(key->enc_tp.src));
1688 
1689 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1690 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1691 		       sizeof(key->enc_tp.dst));
1692 
1693 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1694 
1695 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1696 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1697 		       sizeof(key->hash.hash));
1698 
1699 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1700 		ret = fl_set_enc_opt(tb, key, mask, extack);
1701 		if (ret)
1702 			return ret;
1703 	}
1704 
1705 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1706 	if (ret)
1707 		return ret;
1708 
1709 	if (tb[TCA_FLOWER_KEY_FLAGS])
1710 		ret = fl_set_key_flags(tb, &key->control.flags,
1711 				       &mask->control.flags, extack);
1712 
1713 	return ret;
1714 }
1715 
1716 static void fl_mask_copy(struct fl_flow_mask *dst,
1717 			 struct fl_flow_mask *src)
1718 {
1719 	const void *psrc = fl_key_get_start(&src->key, src);
1720 	void *pdst = fl_key_get_start(&dst->key, src);
1721 
1722 	memcpy(pdst, psrc, fl_mask_range(src));
1723 	dst->range = src->range;
1724 }
1725 
1726 static const struct rhashtable_params fl_ht_params = {
1727 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1728 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1729 	.automatic_shrinking = true,
1730 };
1731 
1732 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1733 {
1734 	mask->filter_ht_params = fl_ht_params;
1735 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1736 	mask->filter_ht_params.key_offset += mask->range.start;
1737 
1738 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1739 }
1740 
1741 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1742 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1743 
1744 #define FL_KEY_IS_MASKED(mask, member)						\
1745 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1746 		   0, FL_KEY_MEMBER_SIZE(member))				\
1747 
1748 #define FL_KEY_SET(keys, cnt, id, member)					\
1749 	do {									\
1750 		keys[cnt].key_id = id;						\
1751 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1752 		cnt++;								\
1753 	} while(0);
1754 
1755 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1756 	do {									\
1757 		if (FL_KEY_IS_MASKED(mask, member))				\
1758 			FL_KEY_SET(keys, cnt, id, member);			\
1759 	} while(0);
1760 
1761 static void fl_init_dissector(struct flow_dissector *dissector,
1762 			      struct fl_flow_key *mask)
1763 {
1764 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1765 	size_t cnt = 0;
1766 
1767 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1768 			     FLOW_DISSECTOR_KEY_META, meta);
1769 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1770 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1771 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1772 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1773 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1774 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1775 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1776 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1777 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1778 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1779 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1780 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1781 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1782 			     FLOW_DISSECTOR_KEY_IP, ip);
1783 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1784 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1785 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1786 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1787 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1788 			     FLOW_DISSECTOR_KEY_ARP, arp);
1789 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1790 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1791 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1792 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1793 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1794 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1795 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1796 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1797 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1798 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1799 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1800 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1801 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1802 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1803 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1804 			   enc_control);
1805 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1806 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1807 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1808 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1809 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1810 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1811 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1812 			     FLOW_DISSECTOR_KEY_CT, ct);
1813 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1814 			     FLOW_DISSECTOR_KEY_HASH, hash);
1815 
1816 	skb_flow_dissector_init(dissector, keys, cnt);
1817 }
1818 
1819 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1820 					       struct fl_flow_mask *mask)
1821 {
1822 	struct fl_flow_mask *newmask;
1823 	int err;
1824 
1825 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1826 	if (!newmask)
1827 		return ERR_PTR(-ENOMEM);
1828 
1829 	fl_mask_copy(newmask, mask);
1830 
1831 	if ((newmask->key.tp_range.tp_min.dst &&
1832 	     newmask->key.tp_range.tp_max.dst) ||
1833 	    (newmask->key.tp_range.tp_min.src &&
1834 	     newmask->key.tp_range.tp_max.src))
1835 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1836 
1837 	err = fl_init_mask_hashtable(newmask);
1838 	if (err)
1839 		goto errout_free;
1840 
1841 	fl_init_dissector(&newmask->dissector, &newmask->key);
1842 
1843 	INIT_LIST_HEAD_RCU(&newmask->filters);
1844 
1845 	refcount_set(&newmask->refcnt, 1);
1846 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1847 				      &newmask->ht_node, mask_ht_params);
1848 	if (err)
1849 		goto errout_destroy;
1850 
1851 	spin_lock(&head->masks_lock);
1852 	list_add_tail_rcu(&newmask->list, &head->masks);
1853 	spin_unlock(&head->masks_lock);
1854 
1855 	return newmask;
1856 
1857 errout_destroy:
1858 	rhashtable_destroy(&newmask->ht);
1859 errout_free:
1860 	kfree(newmask);
1861 
1862 	return ERR_PTR(err);
1863 }
1864 
1865 static int fl_check_assign_mask(struct cls_fl_head *head,
1866 				struct cls_fl_filter *fnew,
1867 				struct cls_fl_filter *fold,
1868 				struct fl_flow_mask *mask)
1869 {
1870 	struct fl_flow_mask *newmask;
1871 	int ret = 0;
1872 
1873 	rcu_read_lock();
1874 
1875 	/* Insert mask as temporary node to prevent concurrent creation of mask
1876 	 * with same key. Any concurrent lookups with same key will return
1877 	 * -EAGAIN because mask's refcnt is zero.
1878 	 */
1879 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1880 						       &mask->ht_node,
1881 						       mask_ht_params);
1882 	if (!fnew->mask) {
1883 		rcu_read_unlock();
1884 
1885 		if (fold) {
1886 			ret = -EINVAL;
1887 			goto errout_cleanup;
1888 		}
1889 
1890 		newmask = fl_create_new_mask(head, mask);
1891 		if (IS_ERR(newmask)) {
1892 			ret = PTR_ERR(newmask);
1893 			goto errout_cleanup;
1894 		}
1895 
1896 		fnew->mask = newmask;
1897 		return 0;
1898 	} else if (IS_ERR(fnew->mask)) {
1899 		ret = PTR_ERR(fnew->mask);
1900 	} else if (fold && fold->mask != fnew->mask) {
1901 		ret = -EINVAL;
1902 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1903 		/* Mask was deleted concurrently, try again */
1904 		ret = -EAGAIN;
1905 	}
1906 	rcu_read_unlock();
1907 	return ret;
1908 
1909 errout_cleanup:
1910 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1911 			       mask_ht_params);
1912 	return ret;
1913 }
1914 
1915 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1916 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1917 			unsigned long base, struct nlattr **tb,
1918 			struct nlattr *est, bool ovr,
1919 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1920 			struct netlink_ext_ack *extack)
1921 {
1922 	int err;
1923 
1924 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1925 				extack);
1926 	if (err < 0)
1927 		return err;
1928 
1929 	if (tb[TCA_FLOWER_CLASSID]) {
1930 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1931 		if (!rtnl_held)
1932 			rtnl_lock();
1933 		tcf_bind_filter(tp, &f->res, base);
1934 		if (!rtnl_held)
1935 			rtnl_unlock();
1936 	}
1937 
1938 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1939 	if (err)
1940 		return err;
1941 
1942 	fl_mask_update_range(mask);
1943 	fl_set_masked_key(&f->mkey, &f->key, mask);
1944 
1945 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1946 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1947 		return -EINVAL;
1948 	}
1949 
1950 	return 0;
1951 }
1952 
1953 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1954 			       struct cls_fl_filter *fold,
1955 			       bool *in_ht)
1956 {
1957 	struct fl_flow_mask *mask = fnew->mask;
1958 	int err;
1959 
1960 	err = rhashtable_lookup_insert_fast(&mask->ht,
1961 					    &fnew->ht_node,
1962 					    mask->filter_ht_params);
1963 	if (err) {
1964 		*in_ht = false;
1965 		/* It is okay if filter with same key exists when
1966 		 * overwriting.
1967 		 */
1968 		return fold && err == -EEXIST ? 0 : err;
1969 	}
1970 
1971 	*in_ht = true;
1972 	return 0;
1973 }
1974 
1975 static int fl_change(struct net *net, struct sk_buff *in_skb,
1976 		     struct tcf_proto *tp, unsigned long base,
1977 		     u32 handle, struct nlattr **tca,
1978 		     void **arg, bool ovr, bool rtnl_held,
1979 		     struct netlink_ext_ack *extack)
1980 {
1981 	struct cls_fl_head *head = fl_head_dereference(tp);
1982 	struct cls_fl_filter *fold = *arg;
1983 	struct cls_fl_filter *fnew;
1984 	struct fl_flow_mask *mask;
1985 	struct nlattr **tb;
1986 	bool in_ht;
1987 	int err;
1988 
1989 	if (!tca[TCA_OPTIONS]) {
1990 		err = -EINVAL;
1991 		goto errout_fold;
1992 	}
1993 
1994 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1995 	if (!mask) {
1996 		err = -ENOBUFS;
1997 		goto errout_fold;
1998 	}
1999 
2000 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2001 	if (!tb) {
2002 		err = -ENOBUFS;
2003 		goto errout_mask_alloc;
2004 	}
2005 
2006 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2007 					  tca[TCA_OPTIONS], fl_policy, NULL);
2008 	if (err < 0)
2009 		goto errout_tb;
2010 
2011 	if (fold && handle && fold->handle != handle) {
2012 		err = -EINVAL;
2013 		goto errout_tb;
2014 	}
2015 
2016 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2017 	if (!fnew) {
2018 		err = -ENOBUFS;
2019 		goto errout_tb;
2020 	}
2021 	INIT_LIST_HEAD(&fnew->hw_list);
2022 	refcount_set(&fnew->refcnt, 1);
2023 
2024 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2025 	if (err < 0)
2026 		goto errout;
2027 
2028 	if (tb[TCA_FLOWER_FLAGS]) {
2029 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2030 
2031 		if (!tc_flags_valid(fnew->flags)) {
2032 			err = -EINVAL;
2033 			goto errout;
2034 		}
2035 	}
2036 
2037 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
2038 			   tp->chain->tmplt_priv, rtnl_held, extack);
2039 	if (err)
2040 		goto errout;
2041 
2042 	err = fl_check_assign_mask(head, fnew, fold, mask);
2043 	if (err)
2044 		goto errout;
2045 
2046 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2047 	if (err)
2048 		goto errout_mask;
2049 
2050 	if (!tc_skip_hw(fnew->flags)) {
2051 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2052 		if (err)
2053 			goto errout_ht;
2054 	}
2055 
2056 	if (!tc_in_hw(fnew->flags))
2057 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2058 
2059 	spin_lock(&tp->lock);
2060 
2061 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2062 	 * proto again or create new one, if necessary.
2063 	 */
2064 	if (tp->deleting) {
2065 		err = -EAGAIN;
2066 		goto errout_hw;
2067 	}
2068 
2069 	if (fold) {
2070 		/* Fold filter was deleted concurrently. Retry lookup. */
2071 		if (fold->deleted) {
2072 			err = -EAGAIN;
2073 			goto errout_hw;
2074 		}
2075 
2076 		fnew->handle = handle;
2077 
2078 		if (!in_ht) {
2079 			struct rhashtable_params params =
2080 				fnew->mask->filter_ht_params;
2081 
2082 			err = rhashtable_insert_fast(&fnew->mask->ht,
2083 						     &fnew->ht_node,
2084 						     params);
2085 			if (err)
2086 				goto errout_hw;
2087 			in_ht = true;
2088 		}
2089 
2090 		refcount_inc(&fnew->refcnt);
2091 		rhashtable_remove_fast(&fold->mask->ht,
2092 				       &fold->ht_node,
2093 				       fold->mask->filter_ht_params);
2094 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2095 		list_replace_rcu(&fold->list, &fnew->list);
2096 		fold->deleted = true;
2097 
2098 		spin_unlock(&tp->lock);
2099 
2100 		fl_mask_put(head, fold->mask);
2101 		if (!tc_skip_hw(fold->flags))
2102 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2103 		tcf_unbind_filter(tp, &fold->res);
2104 		/* Caller holds reference to fold, so refcnt is always > 0
2105 		 * after this.
2106 		 */
2107 		refcount_dec(&fold->refcnt);
2108 		__fl_put(fold);
2109 	} else {
2110 		if (handle) {
2111 			/* user specifies a handle and it doesn't exist */
2112 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2113 					    handle, GFP_ATOMIC);
2114 
2115 			/* Filter with specified handle was concurrently
2116 			 * inserted after initial check in cls_api. This is not
2117 			 * necessarily an error if NLM_F_EXCL is not set in
2118 			 * message flags. Returning EAGAIN will cause cls_api to
2119 			 * try to update concurrently inserted rule.
2120 			 */
2121 			if (err == -ENOSPC)
2122 				err = -EAGAIN;
2123 		} else {
2124 			handle = 1;
2125 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2126 					    INT_MAX, GFP_ATOMIC);
2127 		}
2128 		if (err)
2129 			goto errout_hw;
2130 
2131 		refcount_inc(&fnew->refcnt);
2132 		fnew->handle = handle;
2133 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2134 		spin_unlock(&tp->lock);
2135 	}
2136 
2137 	*arg = fnew;
2138 
2139 	kfree(tb);
2140 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2141 	return 0;
2142 
2143 errout_ht:
2144 	spin_lock(&tp->lock);
2145 errout_hw:
2146 	fnew->deleted = true;
2147 	spin_unlock(&tp->lock);
2148 	if (!tc_skip_hw(fnew->flags))
2149 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2150 	if (in_ht)
2151 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2152 				       fnew->mask->filter_ht_params);
2153 errout_mask:
2154 	fl_mask_put(head, fnew->mask);
2155 errout:
2156 	__fl_put(fnew);
2157 errout_tb:
2158 	kfree(tb);
2159 errout_mask_alloc:
2160 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2161 errout_fold:
2162 	if (fold)
2163 		__fl_put(fold);
2164 	return err;
2165 }
2166 
2167 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2168 		     bool rtnl_held, struct netlink_ext_ack *extack)
2169 {
2170 	struct cls_fl_head *head = fl_head_dereference(tp);
2171 	struct cls_fl_filter *f = arg;
2172 	bool last_on_mask;
2173 	int err = 0;
2174 
2175 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2176 	*last = list_empty(&head->masks);
2177 	__fl_put(f);
2178 
2179 	return err;
2180 }
2181 
2182 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2183 		    bool rtnl_held)
2184 {
2185 	struct cls_fl_head *head = fl_head_dereference(tp);
2186 	unsigned long id = arg->cookie, tmp;
2187 	struct cls_fl_filter *f;
2188 
2189 	arg->count = arg->skip;
2190 
2191 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2192 		/* don't return filters that are being deleted */
2193 		if (!refcount_inc_not_zero(&f->refcnt))
2194 			continue;
2195 		if (arg->fn(tp, f, arg) < 0) {
2196 			__fl_put(f);
2197 			arg->stop = 1;
2198 			break;
2199 		}
2200 		__fl_put(f);
2201 		arg->count++;
2202 	}
2203 	arg->cookie = id;
2204 }
2205 
2206 static struct cls_fl_filter *
2207 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2208 {
2209 	struct cls_fl_head *head = fl_head_dereference(tp);
2210 
2211 	spin_lock(&tp->lock);
2212 	if (list_empty(&head->hw_filters)) {
2213 		spin_unlock(&tp->lock);
2214 		return NULL;
2215 	}
2216 
2217 	if (!f)
2218 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2219 			       hw_list);
2220 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2221 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2222 			spin_unlock(&tp->lock);
2223 			return f;
2224 		}
2225 	}
2226 
2227 	spin_unlock(&tp->lock);
2228 	return NULL;
2229 }
2230 
2231 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2232 			void *cb_priv, struct netlink_ext_ack *extack)
2233 {
2234 	struct tcf_block *block = tp->chain->block;
2235 	struct flow_cls_offload cls_flower = {};
2236 	struct cls_fl_filter *f = NULL;
2237 	int err;
2238 
2239 	/* hw_filters list can only be changed by hw offload functions after
2240 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2241 	 * iterating it.
2242 	 */
2243 	ASSERT_RTNL();
2244 
2245 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2246 		cls_flower.rule =
2247 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2248 		if (!cls_flower.rule) {
2249 			__fl_put(f);
2250 			return -ENOMEM;
2251 		}
2252 
2253 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2254 					   extack);
2255 		cls_flower.command = add ?
2256 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2257 		cls_flower.cookie = (unsigned long)f;
2258 		cls_flower.rule->match.dissector = &f->mask->dissector;
2259 		cls_flower.rule->match.mask = &f->mask->key;
2260 		cls_flower.rule->match.key = &f->mkey;
2261 
2262 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2263 		if (err) {
2264 			kfree(cls_flower.rule);
2265 			if (tc_skip_sw(f->flags)) {
2266 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2267 				__fl_put(f);
2268 				return err;
2269 			}
2270 			goto next_flow;
2271 		}
2272 
2273 		cls_flower.classid = f->res.classid;
2274 
2275 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2276 					    TC_SETUP_CLSFLOWER, &cls_flower,
2277 					    cb_priv, &f->flags,
2278 					    &f->in_hw_count);
2279 		tc_cleanup_flow_action(&cls_flower.rule->action);
2280 		kfree(cls_flower.rule);
2281 
2282 		if (err) {
2283 			__fl_put(f);
2284 			return err;
2285 		}
2286 next_flow:
2287 		__fl_put(f);
2288 	}
2289 
2290 	return 0;
2291 }
2292 
2293 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2294 {
2295 	struct flow_cls_offload *cls_flower = type_data;
2296 	struct cls_fl_filter *f =
2297 		(struct cls_fl_filter *) cls_flower->cookie;
2298 	struct cls_fl_head *head = fl_head_dereference(tp);
2299 
2300 	spin_lock(&tp->lock);
2301 	list_add(&f->hw_list, &head->hw_filters);
2302 	spin_unlock(&tp->lock);
2303 }
2304 
2305 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2306 {
2307 	struct flow_cls_offload *cls_flower = type_data;
2308 	struct cls_fl_filter *f =
2309 		(struct cls_fl_filter *) cls_flower->cookie;
2310 
2311 	spin_lock(&tp->lock);
2312 	if (!list_empty(&f->hw_list))
2313 		list_del_init(&f->hw_list);
2314 	spin_unlock(&tp->lock);
2315 }
2316 
2317 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2318 			      struct fl_flow_tmplt *tmplt)
2319 {
2320 	struct flow_cls_offload cls_flower = {};
2321 	struct tcf_block *block = chain->block;
2322 
2323 	cls_flower.rule = flow_rule_alloc(0);
2324 	if (!cls_flower.rule)
2325 		return -ENOMEM;
2326 
2327 	cls_flower.common.chain_index = chain->index;
2328 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2329 	cls_flower.cookie = (unsigned long) tmplt;
2330 	cls_flower.rule->match.dissector = &tmplt->dissector;
2331 	cls_flower.rule->match.mask = &tmplt->mask;
2332 	cls_flower.rule->match.key = &tmplt->dummy_key;
2333 
2334 	/* We don't care if driver (any of them) fails to handle this
2335 	 * call. It serves just as a hint for it.
2336 	 */
2337 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2338 	kfree(cls_flower.rule);
2339 
2340 	return 0;
2341 }
2342 
2343 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2344 				struct fl_flow_tmplt *tmplt)
2345 {
2346 	struct flow_cls_offload cls_flower = {};
2347 	struct tcf_block *block = chain->block;
2348 
2349 	cls_flower.common.chain_index = chain->index;
2350 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2351 	cls_flower.cookie = (unsigned long) tmplt;
2352 
2353 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2354 }
2355 
2356 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2357 			     struct nlattr **tca,
2358 			     struct netlink_ext_ack *extack)
2359 {
2360 	struct fl_flow_tmplt *tmplt;
2361 	struct nlattr **tb;
2362 	int err;
2363 
2364 	if (!tca[TCA_OPTIONS])
2365 		return ERR_PTR(-EINVAL);
2366 
2367 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2368 	if (!tb)
2369 		return ERR_PTR(-ENOBUFS);
2370 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2371 					  tca[TCA_OPTIONS], fl_policy, NULL);
2372 	if (err)
2373 		goto errout_tb;
2374 
2375 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2376 	if (!tmplt) {
2377 		err = -ENOMEM;
2378 		goto errout_tb;
2379 	}
2380 	tmplt->chain = chain;
2381 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2382 	if (err)
2383 		goto errout_tmplt;
2384 
2385 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2386 
2387 	err = fl_hw_create_tmplt(chain, tmplt);
2388 	if (err)
2389 		goto errout_tmplt;
2390 
2391 	kfree(tb);
2392 	return tmplt;
2393 
2394 errout_tmplt:
2395 	kfree(tmplt);
2396 errout_tb:
2397 	kfree(tb);
2398 	return ERR_PTR(err);
2399 }
2400 
2401 static void fl_tmplt_destroy(void *tmplt_priv)
2402 {
2403 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2404 
2405 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2406 	kfree(tmplt);
2407 }
2408 
2409 static int fl_dump_key_val(struct sk_buff *skb,
2410 			   void *val, int val_type,
2411 			   void *mask, int mask_type, int len)
2412 {
2413 	int err;
2414 
2415 	if (!memchr_inv(mask, 0, len))
2416 		return 0;
2417 	err = nla_put(skb, val_type, len, val);
2418 	if (err)
2419 		return err;
2420 	if (mask_type != TCA_FLOWER_UNSPEC) {
2421 		err = nla_put(skb, mask_type, len, mask);
2422 		if (err)
2423 			return err;
2424 	}
2425 	return 0;
2426 }
2427 
2428 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2429 				  struct fl_flow_key *mask)
2430 {
2431 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2432 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2433 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2434 			    sizeof(key->tp_range.tp_min.dst)) ||
2435 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2436 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2437 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2438 			    sizeof(key->tp_range.tp_max.dst)) ||
2439 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2440 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2441 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2442 			    sizeof(key->tp_range.tp_min.src)) ||
2443 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2444 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2445 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2446 			    sizeof(key->tp_range.tp_max.src)))
2447 		return -1;
2448 
2449 	return 0;
2450 }
2451 
2452 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2453 				    struct flow_dissector_key_mpls *mpls_key,
2454 				    struct flow_dissector_key_mpls *mpls_mask,
2455 				    u8 lse_index)
2456 {
2457 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2458 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2459 	int err;
2460 
2461 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2462 			 lse_index + 1);
2463 	if (err)
2464 		return err;
2465 
2466 	if (lse_mask->mpls_ttl) {
2467 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2468 				 lse_key->mpls_ttl);
2469 		if (err)
2470 			return err;
2471 	}
2472 	if (lse_mask->mpls_bos) {
2473 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2474 				 lse_key->mpls_bos);
2475 		if (err)
2476 			return err;
2477 	}
2478 	if (lse_mask->mpls_tc) {
2479 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2480 				 lse_key->mpls_tc);
2481 		if (err)
2482 			return err;
2483 	}
2484 	if (lse_mask->mpls_label) {
2485 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2486 				  lse_key->mpls_label);
2487 		if (err)
2488 			return err;
2489 	}
2490 
2491 	return 0;
2492 }
2493 
2494 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2495 				 struct flow_dissector_key_mpls *mpls_key,
2496 				 struct flow_dissector_key_mpls *mpls_mask)
2497 {
2498 	struct nlattr *opts;
2499 	struct nlattr *lse;
2500 	u8 lse_index;
2501 	int err;
2502 
2503 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2504 	if (!opts)
2505 		return -EMSGSIZE;
2506 
2507 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2508 		if (!(mpls_mask->used_lses & 1 << lse_index))
2509 			continue;
2510 
2511 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2512 		if (!lse) {
2513 			err = -EMSGSIZE;
2514 			goto err_opts;
2515 		}
2516 
2517 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2518 					       lse_index);
2519 		if (err)
2520 			goto err_opts_lse;
2521 		nla_nest_end(skb, lse);
2522 	}
2523 	nla_nest_end(skb, opts);
2524 
2525 	return 0;
2526 
2527 err_opts_lse:
2528 	nla_nest_cancel(skb, lse);
2529 err_opts:
2530 	nla_nest_cancel(skb, opts);
2531 
2532 	return err;
2533 }
2534 
2535 static int fl_dump_key_mpls(struct sk_buff *skb,
2536 			    struct flow_dissector_key_mpls *mpls_key,
2537 			    struct flow_dissector_key_mpls *mpls_mask)
2538 {
2539 	struct flow_dissector_mpls_lse *lse_mask;
2540 	struct flow_dissector_mpls_lse *lse_key;
2541 	int err;
2542 
2543 	if (!mpls_mask->used_lses)
2544 		return 0;
2545 
2546 	lse_mask = &mpls_mask->ls[0];
2547 	lse_key = &mpls_key->ls[0];
2548 
2549 	/* For backward compatibility, don't use the MPLS nested attributes if
2550 	 * the rule can be expressed using the old attributes.
2551 	 */
2552 	if (mpls_mask->used_lses & ~1 ||
2553 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2554 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2555 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2556 
2557 	if (lse_mask->mpls_ttl) {
2558 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2559 				 lse_key->mpls_ttl);
2560 		if (err)
2561 			return err;
2562 	}
2563 	if (lse_mask->mpls_tc) {
2564 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2565 				 lse_key->mpls_tc);
2566 		if (err)
2567 			return err;
2568 	}
2569 	if (lse_mask->mpls_label) {
2570 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2571 				  lse_key->mpls_label);
2572 		if (err)
2573 			return err;
2574 	}
2575 	if (lse_mask->mpls_bos) {
2576 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2577 				 lse_key->mpls_bos);
2578 		if (err)
2579 			return err;
2580 	}
2581 	return 0;
2582 }
2583 
2584 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2585 			  struct flow_dissector_key_ip *key,
2586 			  struct flow_dissector_key_ip *mask)
2587 {
2588 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2589 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2590 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2591 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2592 
2593 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2594 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2595 		return -1;
2596 
2597 	return 0;
2598 }
2599 
2600 static int fl_dump_key_vlan(struct sk_buff *skb,
2601 			    int vlan_id_key, int vlan_prio_key,
2602 			    struct flow_dissector_key_vlan *vlan_key,
2603 			    struct flow_dissector_key_vlan *vlan_mask)
2604 {
2605 	int err;
2606 
2607 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2608 		return 0;
2609 	if (vlan_mask->vlan_id) {
2610 		err = nla_put_u16(skb, vlan_id_key,
2611 				  vlan_key->vlan_id);
2612 		if (err)
2613 			return err;
2614 	}
2615 	if (vlan_mask->vlan_priority) {
2616 		err = nla_put_u8(skb, vlan_prio_key,
2617 				 vlan_key->vlan_priority);
2618 		if (err)
2619 			return err;
2620 	}
2621 	return 0;
2622 }
2623 
2624 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2625 			    u32 *flower_key, u32 *flower_mask,
2626 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2627 {
2628 	if (dissector_mask & dissector_flag_bit) {
2629 		*flower_mask |= flower_flag_bit;
2630 		if (dissector_key & dissector_flag_bit)
2631 			*flower_key |= flower_flag_bit;
2632 	}
2633 }
2634 
2635 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2636 {
2637 	u32 key, mask;
2638 	__be32 _key, _mask;
2639 	int err;
2640 
2641 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2642 		return 0;
2643 
2644 	key = 0;
2645 	mask = 0;
2646 
2647 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2648 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2649 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2650 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2651 			FLOW_DIS_FIRST_FRAG);
2652 
2653 	_key = cpu_to_be32(key);
2654 	_mask = cpu_to_be32(mask);
2655 
2656 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2657 	if (err)
2658 		return err;
2659 
2660 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2661 }
2662 
2663 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2664 				  struct flow_dissector_key_enc_opts *enc_opts)
2665 {
2666 	struct geneve_opt *opt;
2667 	struct nlattr *nest;
2668 	int opt_off = 0;
2669 
2670 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2671 	if (!nest)
2672 		goto nla_put_failure;
2673 
2674 	while (enc_opts->len > opt_off) {
2675 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2676 
2677 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2678 				 opt->opt_class))
2679 			goto nla_put_failure;
2680 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2681 			       opt->type))
2682 			goto nla_put_failure;
2683 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2684 			    opt->length * 4, opt->opt_data))
2685 			goto nla_put_failure;
2686 
2687 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2688 	}
2689 	nla_nest_end(skb, nest);
2690 	return 0;
2691 
2692 nla_put_failure:
2693 	nla_nest_cancel(skb, nest);
2694 	return -EMSGSIZE;
2695 }
2696 
2697 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2698 				 struct flow_dissector_key_enc_opts *enc_opts)
2699 {
2700 	struct vxlan_metadata *md;
2701 	struct nlattr *nest;
2702 
2703 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2704 	if (!nest)
2705 		goto nla_put_failure;
2706 
2707 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2708 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2709 		goto nla_put_failure;
2710 
2711 	nla_nest_end(skb, nest);
2712 	return 0;
2713 
2714 nla_put_failure:
2715 	nla_nest_cancel(skb, nest);
2716 	return -EMSGSIZE;
2717 }
2718 
2719 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2720 				  struct flow_dissector_key_enc_opts *enc_opts)
2721 {
2722 	struct erspan_metadata *md;
2723 	struct nlattr *nest;
2724 
2725 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2726 	if (!nest)
2727 		goto nla_put_failure;
2728 
2729 	md = (struct erspan_metadata *)&enc_opts->data[0];
2730 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2731 		goto nla_put_failure;
2732 
2733 	if (md->version == 1 &&
2734 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2735 		goto nla_put_failure;
2736 
2737 	if (md->version == 2 &&
2738 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2739 			md->u.md2.dir) ||
2740 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2741 			get_hwid(&md->u.md2))))
2742 		goto nla_put_failure;
2743 
2744 	nla_nest_end(skb, nest);
2745 	return 0;
2746 
2747 nla_put_failure:
2748 	nla_nest_cancel(skb, nest);
2749 	return -EMSGSIZE;
2750 }
2751 
2752 static int fl_dump_key_ct(struct sk_buff *skb,
2753 			  struct flow_dissector_key_ct *key,
2754 			  struct flow_dissector_key_ct *mask)
2755 {
2756 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2757 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2758 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2759 			    sizeof(key->ct_state)))
2760 		goto nla_put_failure;
2761 
2762 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2763 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2764 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2765 			    sizeof(key->ct_zone)))
2766 		goto nla_put_failure;
2767 
2768 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2769 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2770 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2771 			    sizeof(key->ct_mark)))
2772 		goto nla_put_failure;
2773 
2774 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2775 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2776 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2777 			    sizeof(key->ct_labels)))
2778 		goto nla_put_failure;
2779 
2780 	return 0;
2781 
2782 nla_put_failure:
2783 	return -EMSGSIZE;
2784 }
2785 
2786 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2787 			       struct flow_dissector_key_enc_opts *enc_opts)
2788 {
2789 	struct nlattr *nest;
2790 	int err;
2791 
2792 	if (!enc_opts->len)
2793 		return 0;
2794 
2795 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2796 	if (!nest)
2797 		goto nla_put_failure;
2798 
2799 	switch (enc_opts->dst_opt_type) {
2800 	case TUNNEL_GENEVE_OPT:
2801 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2802 		if (err)
2803 			goto nla_put_failure;
2804 		break;
2805 	case TUNNEL_VXLAN_OPT:
2806 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2807 		if (err)
2808 			goto nla_put_failure;
2809 		break;
2810 	case TUNNEL_ERSPAN_OPT:
2811 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2812 		if (err)
2813 			goto nla_put_failure;
2814 		break;
2815 	default:
2816 		goto nla_put_failure;
2817 	}
2818 	nla_nest_end(skb, nest);
2819 	return 0;
2820 
2821 nla_put_failure:
2822 	nla_nest_cancel(skb, nest);
2823 	return -EMSGSIZE;
2824 }
2825 
2826 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2827 			       struct flow_dissector_key_enc_opts *key_opts,
2828 			       struct flow_dissector_key_enc_opts *msk_opts)
2829 {
2830 	int err;
2831 
2832 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2833 	if (err)
2834 		return err;
2835 
2836 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2837 }
2838 
2839 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2840 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2841 {
2842 	if (mask->meta.ingress_ifindex) {
2843 		struct net_device *dev;
2844 
2845 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2846 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2847 			goto nla_put_failure;
2848 	}
2849 
2850 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2851 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2852 			    sizeof(key->eth.dst)) ||
2853 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2854 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2855 			    sizeof(key->eth.src)) ||
2856 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2857 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2858 			    sizeof(key->basic.n_proto)))
2859 		goto nla_put_failure;
2860 
2861 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2862 		goto nla_put_failure;
2863 
2864 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2865 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2866 		goto nla_put_failure;
2867 
2868 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2869 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2870 			     &key->cvlan, &mask->cvlan) ||
2871 	    (mask->cvlan.vlan_tpid &&
2872 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2873 			  key->cvlan.vlan_tpid)))
2874 		goto nla_put_failure;
2875 
2876 	if (mask->basic.n_proto) {
2877 		if (mask->cvlan.vlan_tpid) {
2878 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2879 					 key->basic.n_proto))
2880 				goto nla_put_failure;
2881 		} else if (mask->vlan.vlan_tpid) {
2882 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2883 					 key->basic.n_proto))
2884 				goto nla_put_failure;
2885 		}
2886 	}
2887 
2888 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2889 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2890 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2891 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2892 			    sizeof(key->basic.ip_proto)) ||
2893 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2894 		goto nla_put_failure;
2895 
2896 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2897 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2898 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2899 			     sizeof(key->ipv4.src)) ||
2900 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2901 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2902 			     sizeof(key->ipv4.dst))))
2903 		goto nla_put_failure;
2904 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2905 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2906 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2907 				  sizeof(key->ipv6.src)) ||
2908 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2909 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2910 				  sizeof(key->ipv6.dst))))
2911 		goto nla_put_failure;
2912 
2913 	if (key->basic.ip_proto == IPPROTO_TCP &&
2914 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2915 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2916 			     sizeof(key->tp.src)) ||
2917 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2918 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2919 			     sizeof(key->tp.dst)) ||
2920 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2921 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2922 			     sizeof(key->tcp.flags))))
2923 		goto nla_put_failure;
2924 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2925 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2926 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2927 				  sizeof(key->tp.src)) ||
2928 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2929 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2930 				  sizeof(key->tp.dst))))
2931 		goto nla_put_failure;
2932 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2933 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2934 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2935 				  sizeof(key->tp.src)) ||
2936 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2937 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2938 				  sizeof(key->tp.dst))))
2939 		goto nla_put_failure;
2940 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2941 		 key->basic.ip_proto == IPPROTO_ICMP &&
2942 		 (fl_dump_key_val(skb, &key->icmp.type,
2943 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2944 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2945 				  sizeof(key->icmp.type)) ||
2946 		  fl_dump_key_val(skb, &key->icmp.code,
2947 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2948 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2949 				  sizeof(key->icmp.code))))
2950 		goto nla_put_failure;
2951 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2952 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2953 		 (fl_dump_key_val(skb, &key->icmp.type,
2954 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2955 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2956 				  sizeof(key->icmp.type)) ||
2957 		  fl_dump_key_val(skb, &key->icmp.code,
2958 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2959 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2960 				  sizeof(key->icmp.code))))
2961 		goto nla_put_failure;
2962 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2963 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2964 		 (fl_dump_key_val(skb, &key->arp.sip,
2965 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2966 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2967 				  sizeof(key->arp.sip)) ||
2968 		  fl_dump_key_val(skb, &key->arp.tip,
2969 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2970 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2971 				  sizeof(key->arp.tip)) ||
2972 		  fl_dump_key_val(skb, &key->arp.op,
2973 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2974 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2975 				  sizeof(key->arp.op)) ||
2976 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2977 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2978 				  sizeof(key->arp.sha)) ||
2979 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2980 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2981 				  sizeof(key->arp.tha))))
2982 		goto nla_put_failure;
2983 
2984 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2985 	     key->basic.ip_proto == IPPROTO_UDP ||
2986 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2987 	     fl_dump_key_port_range(skb, key, mask))
2988 		goto nla_put_failure;
2989 
2990 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2991 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2992 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2993 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2994 			    sizeof(key->enc_ipv4.src)) ||
2995 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2996 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2997 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2998 			     sizeof(key->enc_ipv4.dst))))
2999 		goto nla_put_failure;
3000 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3001 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3002 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3003 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3004 			    sizeof(key->enc_ipv6.src)) ||
3005 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3006 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3007 				 &mask->enc_ipv6.dst,
3008 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3009 			    sizeof(key->enc_ipv6.dst))))
3010 		goto nla_put_failure;
3011 
3012 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3013 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3014 			    sizeof(key->enc_key_id)) ||
3015 	    fl_dump_key_val(skb, &key->enc_tp.src,
3016 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3017 			    &mask->enc_tp.src,
3018 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3019 			    sizeof(key->enc_tp.src)) ||
3020 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3021 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3022 			    &mask->enc_tp.dst,
3023 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3024 			    sizeof(key->enc_tp.dst)) ||
3025 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3026 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3027 		goto nla_put_failure;
3028 
3029 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3030 		goto nla_put_failure;
3031 
3032 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3033 		goto nla_put_failure;
3034 
3035 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3036 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3037 			     sizeof(key->hash.hash)))
3038 		goto nla_put_failure;
3039 
3040 	return 0;
3041 
3042 nla_put_failure:
3043 	return -EMSGSIZE;
3044 }
3045 
3046 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3047 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3048 {
3049 	struct cls_fl_filter *f = fh;
3050 	struct nlattr *nest;
3051 	struct fl_flow_key *key, *mask;
3052 	bool skip_hw;
3053 
3054 	if (!f)
3055 		return skb->len;
3056 
3057 	t->tcm_handle = f->handle;
3058 
3059 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3060 	if (!nest)
3061 		goto nla_put_failure;
3062 
3063 	spin_lock(&tp->lock);
3064 
3065 	if (f->res.classid &&
3066 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3067 		goto nla_put_failure_locked;
3068 
3069 	key = &f->key;
3070 	mask = &f->mask->key;
3071 	skip_hw = tc_skip_hw(f->flags);
3072 
3073 	if (fl_dump_key(skb, net, key, mask))
3074 		goto nla_put_failure_locked;
3075 
3076 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3077 		goto nla_put_failure_locked;
3078 
3079 	spin_unlock(&tp->lock);
3080 
3081 	if (!skip_hw)
3082 		fl_hw_update_stats(tp, f, rtnl_held);
3083 
3084 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3085 		goto nla_put_failure;
3086 
3087 	if (tcf_exts_dump(skb, &f->exts))
3088 		goto nla_put_failure;
3089 
3090 	nla_nest_end(skb, nest);
3091 
3092 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3093 		goto nla_put_failure;
3094 
3095 	return skb->len;
3096 
3097 nla_put_failure_locked:
3098 	spin_unlock(&tp->lock);
3099 nla_put_failure:
3100 	nla_nest_cancel(skb, nest);
3101 	return -1;
3102 }
3103 
3104 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3105 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3106 {
3107 	struct cls_fl_filter *f = fh;
3108 	struct nlattr *nest;
3109 	bool skip_hw;
3110 
3111 	if (!f)
3112 		return skb->len;
3113 
3114 	t->tcm_handle = f->handle;
3115 
3116 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3117 	if (!nest)
3118 		goto nla_put_failure;
3119 
3120 	spin_lock(&tp->lock);
3121 
3122 	skip_hw = tc_skip_hw(f->flags);
3123 
3124 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3125 		goto nla_put_failure_locked;
3126 
3127 	spin_unlock(&tp->lock);
3128 
3129 	if (!skip_hw)
3130 		fl_hw_update_stats(tp, f, rtnl_held);
3131 
3132 	if (tcf_exts_terse_dump(skb, &f->exts))
3133 		goto nla_put_failure;
3134 
3135 	nla_nest_end(skb, nest);
3136 
3137 	return skb->len;
3138 
3139 nla_put_failure_locked:
3140 	spin_unlock(&tp->lock);
3141 nla_put_failure:
3142 	nla_nest_cancel(skb, nest);
3143 	return -1;
3144 }
3145 
3146 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3147 {
3148 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3149 	struct fl_flow_key *key, *mask;
3150 	struct nlattr *nest;
3151 
3152 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3153 	if (!nest)
3154 		goto nla_put_failure;
3155 
3156 	key = &tmplt->dummy_key;
3157 	mask = &tmplt->mask;
3158 
3159 	if (fl_dump_key(skb, net, key, mask))
3160 		goto nla_put_failure;
3161 
3162 	nla_nest_end(skb, nest);
3163 
3164 	return skb->len;
3165 
3166 nla_put_failure:
3167 	nla_nest_cancel(skb, nest);
3168 	return -EMSGSIZE;
3169 }
3170 
3171 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3172 			  unsigned long base)
3173 {
3174 	struct cls_fl_filter *f = fh;
3175 
3176 	if (f && f->res.classid == classid) {
3177 		if (cl)
3178 			__tcf_bind_filter(q, &f->res, base);
3179 		else
3180 			__tcf_unbind_filter(q, &f->res);
3181 	}
3182 }
3183 
3184 static bool fl_delete_empty(struct tcf_proto *tp)
3185 {
3186 	struct cls_fl_head *head = fl_head_dereference(tp);
3187 
3188 	spin_lock(&tp->lock);
3189 	tp->deleting = idr_is_empty(&head->handle_idr);
3190 	spin_unlock(&tp->lock);
3191 
3192 	return tp->deleting;
3193 }
3194 
3195 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3196 	.kind		= "flower",
3197 	.classify	= fl_classify,
3198 	.init		= fl_init,
3199 	.destroy	= fl_destroy,
3200 	.get		= fl_get,
3201 	.put		= fl_put,
3202 	.change		= fl_change,
3203 	.delete		= fl_delete,
3204 	.delete_empty	= fl_delete_empty,
3205 	.walk		= fl_walk,
3206 	.reoffload	= fl_reoffload,
3207 	.hw_add		= fl_hw_add,
3208 	.hw_del		= fl_hw_del,
3209 	.dump		= fl_dump,
3210 	.terse_dump	= fl_terse_dump,
3211 	.bind_class	= fl_bind_class,
3212 	.tmplt_create	= fl_tmplt_create,
3213 	.tmplt_destroy	= fl_tmplt_destroy,
3214 	.tmplt_dump	= fl_tmplt_dump,
3215 	.owner		= THIS_MODULE,
3216 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3217 };
3218 
3219 static int __init cls_fl_init(void)
3220 {
3221 	return register_tcf_proto_ops(&cls_fl_ops);
3222 }
3223 
3224 static void __exit cls_fl_exit(void)
3225 {
3226 	unregister_tcf_proto_ops(&cls_fl_ops);
3227 }
3228 
3229 module_init(cls_fl_init);
3230 module_exit(cls_fl_exit);
3231 
3232 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3233 MODULE_DESCRIPTION("Flower classifier");
3234 MODULE_LICENSE("GPL v2");
3235