xref: /openbmc/linux/net/sched/cls_flower.c (revision 0cc55e69)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 struct fl_flow_key {
34 	struct flow_dissector_key_meta meta;
35 	struct flow_dissector_key_control control;
36 	struct flow_dissector_key_control enc_control;
37 	struct flow_dissector_key_basic basic;
38 	struct flow_dissector_key_eth_addrs eth;
39 	struct flow_dissector_key_vlan vlan;
40 	struct flow_dissector_key_vlan cvlan;
41 	union {
42 		struct flow_dissector_key_ipv4_addrs ipv4;
43 		struct flow_dissector_key_ipv6_addrs ipv6;
44 	};
45 	struct flow_dissector_key_ports tp;
46 	struct flow_dissector_key_icmp icmp;
47 	struct flow_dissector_key_arp arp;
48 	struct flow_dissector_key_keyid enc_key_id;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 	};
53 	struct flow_dissector_key_ports enc_tp;
54 	struct flow_dissector_key_mpls mpls;
55 	struct flow_dissector_key_tcp tcp;
56 	struct flow_dissector_key_ip ip;
57 	struct flow_dissector_key_ip enc_ip;
58 	struct flow_dissector_key_enc_opts enc_opts;
59 	union {
60 		struct flow_dissector_key_ports tp;
61 		struct {
62 			struct flow_dissector_key_ports tp_min;
63 			struct flow_dissector_key_ports tp_max;
64 		};
65 	} tp_range;
66 	struct flow_dissector_key_ct ct;
67 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
68 
69 struct fl_flow_mask_range {
70 	unsigned short int start;
71 	unsigned short int end;
72 };
73 
74 struct fl_flow_mask {
75 	struct fl_flow_key key;
76 	struct fl_flow_mask_range range;
77 	u32 flags;
78 	struct rhash_head ht_node;
79 	struct rhashtable ht;
80 	struct rhashtable_params filter_ht_params;
81 	struct flow_dissector dissector;
82 	struct list_head filters;
83 	struct rcu_work rwork;
84 	struct list_head list;
85 	refcount_t refcnt;
86 };
87 
88 struct fl_flow_tmplt {
89 	struct fl_flow_key dummy_key;
90 	struct fl_flow_key mask;
91 	struct flow_dissector dissector;
92 	struct tcf_chain *chain;
93 };
94 
95 struct cls_fl_head {
96 	struct rhashtable ht;
97 	spinlock_t masks_lock; /* Protect masks list */
98 	struct list_head masks;
99 	struct list_head hw_filters;
100 	struct rcu_work rwork;
101 	struct idr handle_idr;
102 };
103 
104 struct cls_fl_filter {
105 	struct fl_flow_mask *mask;
106 	struct rhash_head ht_node;
107 	struct fl_flow_key mkey;
108 	struct tcf_exts exts;
109 	struct tcf_result res;
110 	struct fl_flow_key key;
111 	struct list_head list;
112 	struct list_head hw_list;
113 	u32 handle;
114 	u32 flags;
115 	u32 in_hw_count;
116 	struct rcu_work rwork;
117 	struct net_device *hw_dev;
118 	/* Flower classifier is unlocked, which means that its reference counter
119 	 * can be changed concurrently without any kind of external
120 	 * synchronization. Use atomic reference counter to be concurrency-safe.
121 	 */
122 	refcount_t refcnt;
123 	bool deleted;
124 };
125 
126 static const struct rhashtable_params mask_ht_params = {
127 	.key_offset = offsetof(struct fl_flow_mask, key),
128 	.key_len = sizeof(struct fl_flow_key),
129 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
130 	.automatic_shrinking = true,
131 };
132 
133 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
134 {
135 	return mask->range.end - mask->range.start;
136 }
137 
138 static void fl_mask_update_range(struct fl_flow_mask *mask)
139 {
140 	const u8 *bytes = (const u8 *) &mask->key;
141 	size_t size = sizeof(mask->key);
142 	size_t i, first = 0, last;
143 
144 	for (i = 0; i < size; i++) {
145 		if (bytes[i]) {
146 			first = i;
147 			break;
148 		}
149 	}
150 	last = first;
151 	for (i = size - 1; i != first; i--) {
152 		if (bytes[i]) {
153 			last = i;
154 			break;
155 		}
156 	}
157 	mask->range.start = rounddown(first, sizeof(long));
158 	mask->range.end = roundup(last + 1, sizeof(long));
159 }
160 
161 static void *fl_key_get_start(struct fl_flow_key *key,
162 			      const struct fl_flow_mask *mask)
163 {
164 	return (u8 *) key + mask->range.start;
165 }
166 
167 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
168 			      struct fl_flow_mask *mask)
169 {
170 	const long *lkey = fl_key_get_start(key, mask);
171 	const long *lmask = fl_key_get_start(&mask->key, mask);
172 	long *lmkey = fl_key_get_start(mkey, mask);
173 	int i;
174 
175 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
176 		*lmkey++ = *lkey++ & *lmask++;
177 }
178 
179 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
180 			       struct fl_flow_mask *mask)
181 {
182 	const long *lmask = fl_key_get_start(&mask->key, mask);
183 	const long *ltmplt;
184 	int i;
185 
186 	if (!tmplt)
187 		return true;
188 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
189 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
190 		if (~*ltmplt++ & *lmask++)
191 			return false;
192 	}
193 	return true;
194 }
195 
196 static void fl_clear_masked_range(struct fl_flow_key *key,
197 				  struct fl_flow_mask *mask)
198 {
199 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
200 }
201 
202 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
203 				  struct fl_flow_key *key,
204 				  struct fl_flow_key *mkey)
205 {
206 	__be16 min_mask, max_mask, min_val, max_val;
207 
208 	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
209 	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
210 	min_val = htons(filter->key.tp_range.tp_min.dst);
211 	max_val = htons(filter->key.tp_range.tp_max.dst);
212 
213 	if (min_mask && max_mask) {
214 		if (htons(key->tp_range.tp.dst) < min_val ||
215 		    htons(key->tp_range.tp.dst) > max_val)
216 			return false;
217 
218 		/* skb does not have min and max values */
219 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
220 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
221 	}
222 	return true;
223 }
224 
225 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
226 				  struct fl_flow_key *key,
227 				  struct fl_flow_key *mkey)
228 {
229 	__be16 min_mask, max_mask, min_val, max_val;
230 
231 	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
232 	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
233 	min_val = htons(filter->key.tp_range.tp_min.src);
234 	max_val = htons(filter->key.tp_range.tp_max.src);
235 
236 	if (min_mask && max_mask) {
237 		if (htons(key->tp_range.tp.src) < min_val ||
238 		    htons(key->tp_range.tp.src) > max_val)
239 			return false;
240 
241 		/* skb does not have min and max values */
242 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
243 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
244 	}
245 	return true;
246 }
247 
248 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
249 					 struct fl_flow_key *mkey)
250 {
251 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
252 				      mask->filter_ht_params);
253 }
254 
255 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
256 					     struct fl_flow_key *mkey,
257 					     struct fl_flow_key *key)
258 {
259 	struct cls_fl_filter *filter, *f;
260 
261 	list_for_each_entry_rcu(filter, &mask->filters, list) {
262 		if (!fl_range_port_dst_cmp(filter, key, mkey))
263 			continue;
264 
265 		if (!fl_range_port_src_cmp(filter, key, mkey))
266 			continue;
267 
268 		f = __fl_lookup(mask, mkey);
269 		if (f)
270 			return f;
271 	}
272 	return NULL;
273 }
274 
275 static noinline_for_stack
276 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
277 {
278 	struct fl_flow_key mkey;
279 
280 	fl_set_masked_key(&mkey, key, mask);
281 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
282 		return fl_lookup_range(mask, &mkey, key);
283 
284 	return __fl_lookup(mask, &mkey);
285 }
286 
287 static u16 fl_ct_info_to_flower_map[] = {
288 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
289 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
290 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
291 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
292 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
293 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
294 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
296 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
298 };
299 
300 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
301 		       struct tcf_result *res)
302 {
303 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
304 	struct fl_flow_key skb_key;
305 	struct fl_flow_mask *mask;
306 	struct cls_fl_filter *f;
307 
308 	list_for_each_entry_rcu(mask, &head->masks, list) {
309 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
310 		fl_clear_masked_range(&skb_key, mask);
311 
312 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
313 		/* skb_flow_dissect() does not set n_proto in case an unknown
314 		 * protocol, so do it rather here.
315 		 */
316 		skb_key.basic.n_proto = skb->protocol;
317 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
318 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
319 				    fl_ct_info_to_flower_map,
320 				    ARRAY_SIZE(fl_ct_info_to_flower_map));
321 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
322 
323 		f = fl_mask_lookup(mask, &skb_key);
324 		if (f && !tc_skip_sw(f->flags)) {
325 			*res = f->res;
326 			return tcf_exts_exec(skb, &f->exts, res);
327 		}
328 	}
329 	return -1;
330 }
331 
332 static int fl_init(struct tcf_proto *tp)
333 {
334 	struct cls_fl_head *head;
335 
336 	head = kzalloc(sizeof(*head), GFP_KERNEL);
337 	if (!head)
338 		return -ENOBUFS;
339 
340 	spin_lock_init(&head->masks_lock);
341 	INIT_LIST_HEAD_RCU(&head->masks);
342 	INIT_LIST_HEAD(&head->hw_filters);
343 	rcu_assign_pointer(tp->root, head);
344 	idr_init(&head->handle_idr);
345 
346 	return rhashtable_init(&head->ht, &mask_ht_params);
347 }
348 
349 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
350 {
351 	/* temporary masks don't have their filters list and ht initialized */
352 	if (mask_init_done) {
353 		WARN_ON(!list_empty(&mask->filters));
354 		rhashtable_destroy(&mask->ht);
355 	}
356 	kfree(mask);
357 }
358 
359 static void fl_mask_free_work(struct work_struct *work)
360 {
361 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
362 						 struct fl_flow_mask, rwork);
363 
364 	fl_mask_free(mask, true);
365 }
366 
367 static void fl_uninit_mask_free_work(struct work_struct *work)
368 {
369 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
370 						 struct fl_flow_mask, rwork);
371 
372 	fl_mask_free(mask, false);
373 }
374 
375 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
376 {
377 	if (!refcount_dec_and_test(&mask->refcnt))
378 		return false;
379 
380 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
381 
382 	spin_lock(&head->masks_lock);
383 	list_del_rcu(&mask->list);
384 	spin_unlock(&head->masks_lock);
385 
386 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
387 
388 	return true;
389 }
390 
391 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
392 {
393 	/* Flower classifier only changes root pointer during init and destroy.
394 	 * Users must obtain reference to tcf_proto instance before calling its
395 	 * API, so tp->root pointer is protected from concurrent call to
396 	 * fl_destroy() by reference counting.
397 	 */
398 	return rcu_dereference_raw(tp->root);
399 }
400 
401 static void __fl_destroy_filter(struct cls_fl_filter *f)
402 {
403 	tcf_exts_destroy(&f->exts);
404 	tcf_exts_put_net(&f->exts);
405 	kfree(f);
406 }
407 
408 static void fl_destroy_filter_work(struct work_struct *work)
409 {
410 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
411 					struct cls_fl_filter, rwork);
412 
413 	__fl_destroy_filter(f);
414 }
415 
416 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
417 				 bool rtnl_held, struct netlink_ext_ack *extack)
418 {
419 	struct tcf_block *block = tp->chain->block;
420 	struct flow_cls_offload cls_flower = {};
421 
422 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
423 	cls_flower.command = FLOW_CLS_DESTROY;
424 	cls_flower.cookie = (unsigned long) f;
425 
426 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
427 			    &f->flags, &f->in_hw_count, rtnl_held);
428 
429 }
430 
431 static int fl_hw_replace_filter(struct tcf_proto *tp,
432 				struct cls_fl_filter *f, bool rtnl_held,
433 				struct netlink_ext_ack *extack)
434 {
435 	struct tcf_block *block = tp->chain->block;
436 	struct flow_cls_offload cls_flower = {};
437 	bool skip_sw = tc_skip_sw(f->flags);
438 	int err = 0;
439 
440 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
441 	if (!cls_flower.rule)
442 		return -ENOMEM;
443 
444 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
445 	cls_flower.command = FLOW_CLS_REPLACE;
446 	cls_flower.cookie = (unsigned long) f;
447 	cls_flower.rule->match.dissector = &f->mask->dissector;
448 	cls_flower.rule->match.mask = &f->mask->key;
449 	cls_flower.rule->match.key = &f->mkey;
450 	cls_flower.classid = f->res.classid;
451 
452 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
453 	if (err) {
454 		kfree(cls_flower.rule);
455 		if (skip_sw) {
456 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
457 			return err;
458 		}
459 		return 0;
460 	}
461 
462 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
463 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
464 	tc_cleanup_flow_action(&cls_flower.rule->action);
465 	kfree(cls_flower.rule);
466 
467 	if (err) {
468 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
469 		return err;
470 	}
471 
472 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
473 		return -EINVAL;
474 
475 	return 0;
476 }
477 
478 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
479 			       bool rtnl_held)
480 {
481 	struct tcf_block *block = tp->chain->block;
482 	struct flow_cls_offload cls_flower = {};
483 
484 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
485 	cls_flower.command = FLOW_CLS_STATS;
486 	cls_flower.cookie = (unsigned long) f;
487 	cls_flower.classid = f->res.classid;
488 
489 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
490 			 rtnl_held);
491 
492 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
493 			      cls_flower.stats.pkts,
494 			      cls_flower.stats.drops,
495 			      cls_flower.stats.lastused,
496 			      cls_flower.stats.used_hw_stats,
497 			      cls_flower.stats.used_hw_stats_valid);
498 }
499 
500 static void __fl_put(struct cls_fl_filter *f)
501 {
502 	if (!refcount_dec_and_test(&f->refcnt))
503 		return;
504 
505 	if (tcf_exts_get_net(&f->exts))
506 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
507 	else
508 		__fl_destroy_filter(f);
509 }
510 
511 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
512 {
513 	struct cls_fl_filter *f;
514 
515 	rcu_read_lock();
516 	f = idr_find(&head->handle_idr, handle);
517 	if (f && !refcount_inc_not_zero(&f->refcnt))
518 		f = NULL;
519 	rcu_read_unlock();
520 
521 	return f;
522 }
523 
524 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
525 		       bool *last, bool rtnl_held,
526 		       struct netlink_ext_ack *extack)
527 {
528 	struct cls_fl_head *head = fl_head_dereference(tp);
529 
530 	*last = false;
531 
532 	spin_lock(&tp->lock);
533 	if (f->deleted) {
534 		spin_unlock(&tp->lock);
535 		return -ENOENT;
536 	}
537 
538 	f->deleted = true;
539 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
540 			       f->mask->filter_ht_params);
541 	idr_remove(&head->handle_idr, f->handle);
542 	list_del_rcu(&f->list);
543 	spin_unlock(&tp->lock);
544 
545 	*last = fl_mask_put(head, f->mask);
546 	if (!tc_skip_hw(f->flags))
547 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
548 	tcf_unbind_filter(tp, &f->res);
549 	__fl_put(f);
550 
551 	return 0;
552 }
553 
554 static void fl_destroy_sleepable(struct work_struct *work)
555 {
556 	struct cls_fl_head *head = container_of(to_rcu_work(work),
557 						struct cls_fl_head,
558 						rwork);
559 
560 	rhashtable_destroy(&head->ht);
561 	kfree(head);
562 	module_put(THIS_MODULE);
563 }
564 
565 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
566 		       struct netlink_ext_ack *extack)
567 {
568 	struct cls_fl_head *head = fl_head_dereference(tp);
569 	struct fl_flow_mask *mask, *next_mask;
570 	struct cls_fl_filter *f, *next;
571 	bool last;
572 
573 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
574 		list_for_each_entry_safe(f, next, &mask->filters, list) {
575 			__fl_delete(tp, f, &last, rtnl_held, extack);
576 			if (last)
577 				break;
578 		}
579 	}
580 	idr_destroy(&head->handle_idr);
581 
582 	__module_get(THIS_MODULE);
583 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
584 }
585 
586 static void fl_put(struct tcf_proto *tp, void *arg)
587 {
588 	struct cls_fl_filter *f = arg;
589 
590 	__fl_put(f);
591 }
592 
593 static void *fl_get(struct tcf_proto *tp, u32 handle)
594 {
595 	struct cls_fl_head *head = fl_head_dereference(tp);
596 
597 	return __fl_get(head, handle);
598 }
599 
600 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
601 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
602 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
603 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
604 					    .len = IFNAMSIZ },
605 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
606 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
607 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
608 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
609 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
610 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
611 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
612 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
613 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
614 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
615 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
616 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
617 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
618 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
619 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
620 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
621 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
622 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
623 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
624 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
625 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
626 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
628 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
629 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
630 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
631 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
632 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
633 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
634 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
635 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
641 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
642 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
648 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
649 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
650 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
651 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
652 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
653 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
654 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
655 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
656 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
657 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
658 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
659 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
660 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
661 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
664 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
665 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
666 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
667 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
672 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
673 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
674 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
679 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
681 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
686 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
687 	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
688 	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
689 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
690 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
691 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
692 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
693 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
694 					    .len = 128 / BITS_PER_BYTE },
695 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
696 					    .len = 128 / BITS_PER_BYTE },
697 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
698 };
699 
700 static const struct nla_policy
701 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
702 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
703 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
704 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
705 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
706 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
707 };
708 
709 static const struct nla_policy
710 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
711 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
712 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
713 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
714 						       .len = 128 },
715 };
716 
717 static const struct nla_policy
718 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
719 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
720 };
721 
722 static const struct nla_policy
723 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
724 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
725 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
726 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
727 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
728 };
729 
730 static const struct nla_policy
731 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
732 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
733 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
734 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
735 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
736 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
737 };
738 
739 static void fl_set_key_val(struct nlattr **tb,
740 			   void *val, int val_type,
741 			   void *mask, int mask_type, int len)
742 {
743 	if (!tb[val_type])
744 		return;
745 	nla_memcpy(val, tb[val_type], len);
746 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
747 		memset(mask, 0xff, len);
748 	else
749 		nla_memcpy(mask, tb[mask_type], len);
750 }
751 
752 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
753 				 struct fl_flow_key *mask,
754 				 struct netlink_ext_ack *extack)
755 {
756 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
757 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
758 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
759 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
760 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
761 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
762 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
763 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
764 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
765 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
766 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
767 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
768 
769 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
770 	    htons(key->tp_range.tp_max.dst) <=
771 	    htons(key->tp_range.tp_min.dst)) {
772 		NL_SET_ERR_MSG_ATTR(extack,
773 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
774 				    "Invalid destination port range (min must be strictly smaller than max)");
775 		return -EINVAL;
776 	}
777 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
778 	    htons(key->tp_range.tp_max.src) <=
779 	    htons(key->tp_range.tp_min.src)) {
780 		NL_SET_ERR_MSG_ATTR(extack,
781 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
782 				    "Invalid source port range (min must be strictly smaller than max)");
783 		return -EINVAL;
784 	}
785 
786 	return 0;
787 }
788 
789 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
790 			       struct flow_dissector_key_mpls *key_val,
791 			       struct flow_dissector_key_mpls *key_mask,
792 			       struct netlink_ext_ack *extack)
793 {
794 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
795 	struct flow_dissector_mpls_lse *lse_mask;
796 	struct flow_dissector_mpls_lse *lse_val;
797 	u8 lse_index;
798 	u8 depth;
799 	int err;
800 
801 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
802 			       mpls_stack_entry_policy, extack);
803 	if (err < 0)
804 		return err;
805 
806 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
807 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
808 		return -EINVAL;
809 	}
810 
811 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
812 
813 	/* LSE depth starts at 1, for consistency with terminology used by
814 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
815 	 */
816 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
817 		NL_SET_ERR_MSG_ATTR(extack,
818 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
819 				    "Invalid MPLS depth");
820 		return -EINVAL;
821 	}
822 	lse_index = depth - 1;
823 
824 	dissector_set_mpls_lse(key_val, lse_index);
825 	dissector_set_mpls_lse(key_mask, lse_index);
826 
827 	lse_val = &key_val->ls[lse_index];
828 	lse_mask = &key_mask->ls[lse_index];
829 
830 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
831 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
832 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
833 	}
834 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
835 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
836 
837 		if (bos & ~MPLS_BOS_MASK) {
838 			NL_SET_ERR_MSG_ATTR(extack,
839 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
840 					    "Bottom Of Stack (BOS) must be 0 or 1");
841 			return -EINVAL;
842 		}
843 		lse_val->mpls_bos = bos;
844 		lse_mask->mpls_bos = MPLS_BOS_MASK;
845 	}
846 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
847 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
848 
849 		if (tc & ~MPLS_TC_MASK) {
850 			NL_SET_ERR_MSG_ATTR(extack,
851 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
852 					    "Traffic Class (TC) must be between 0 and 7");
853 			return -EINVAL;
854 		}
855 		lse_val->mpls_tc = tc;
856 		lse_mask->mpls_tc = MPLS_TC_MASK;
857 	}
858 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
859 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
860 
861 		if (label & ~MPLS_LABEL_MASK) {
862 			NL_SET_ERR_MSG_ATTR(extack,
863 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
864 					    "Label must be between 0 and 1048575");
865 			return -EINVAL;
866 		}
867 		lse_val->mpls_label = label;
868 		lse_mask->mpls_label = MPLS_LABEL_MASK;
869 	}
870 
871 	return 0;
872 }
873 
874 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
875 				struct flow_dissector_key_mpls *key_val,
876 				struct flow_dissector_key_mpls *key_mask,
877 				struct netlink_ext_ack *extack)
878 {
879 	struct nlattr *nla_lse;
880 	int rem;
881 	int err;
882 
883 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
884 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
885 				    "NLA_F_NESTED is missing");
886 		return -EINVAL;
887 	}
888 
889 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
890 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
891 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
892 					    "Invalid MPLS option type");
893 			return -EINVAL;
894 		}
895 
896 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
897 		if (err < 0)
898 			return err;
899 	}
900 	if (rem) {
901 		NL_SET_ERR_MSG(extack,
902 			       "Bytes leftover after parsing MPLS options");
903 		return -EINVAL;
904 	}
905 
906 	return 0;
907 }
908 
909 static int fl_set_key_mpls(struct nlattr **tb,
910 			   struct flow_dissector_key_mpls *key_val,
911 			   struct flow_dissector_key_mpls *key_mask,
912 			   struct netlink_ext_ack *extack)
913 {
914 	struct flow_dissector_mpls_lse *lse_mask;
915 	struct flow_dissector_mpls_lse *lse_val;
916 
917 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
918 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
919 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
920 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
921 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
922 			NL_SET_ERR_MSG_ATTR(extack,
923 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
924 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
925 			return -EBADMSG;
926 		}
927 
928 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
929 					    key_val, key_mask, extack);
930 	}
931 
932 	lse_val = &key_val->ls[0];
933 	lse_mask = &key_mask->ls[0];
934 
935 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
936 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
937 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
938 		dissector_set_mpls_lse(key_val, 0);
939 		dissector_set_mpls_lse(key_mask, 0);
940 	}
941 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
942 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
943 
944 		if (bos & ~MPLS_BOS_MASK) {
945 			NL_SET_ERR_MSG_ATTR(extack,
946 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
947 					    "Bottom Of Stack (BOS) must be 0 or 1");
948 			return -EINVAL;
949 		}
950 		lse_val->mpls_bos = bos;
951 		lse_mask->mpls_bos = MPLS_BOS_MASK;
952 		dissector_set_mpls_lse(key_val, 0);
953 		dissector_set_mpls_lse(key_mask, 0);
954 	}
955 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
956 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
957 
958 		if (tc & ~MPLS_TC_MASK) {
959 			NL_SET_ERR_MSG_ATTR(extack,
960 					    tb[TCA_FLOWER_KEY_MPLS_TC],
961 					    "Traffic Class (TC) must be between 0 and 7");
962 			return -EINVAL;
963 		}
964 		lse_val->mpls_tc = tc;
965 		lse_mask->mpls_tc = MPLS_TC_MASK;
966 		dissector_set_mpls_lse(key_val, 0);
967 		dissector_set_mpls_lse(key_mask, 0);
968 	}
969 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
970 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
971 
972 		if (label & ~MPLS_LABEL_MASK) {
973 			NL_SET_ERR_MSG_ATTR(extack,
974 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
975 					    "Label must be between 0 and 1048575");
976 			return -EINVAL;
977 		}
978 		lse_val->mpls_label = label;
979 		lse_mask->mpls_label = MPLS_LABEL_MASK;
980 		dissector_set_mpls_lse(key_val, 0);
981 		dissector_set_mpls_lse(key_mask, 0);
982 	}
983 	return 0;
984 }
985 
986 static void fl_set_key_vlan(struct nlattr **tb,
987 			    __be16 ethertype,
988 			    int vlan_id_key, int vlan_prio_key,
989 			    struct flow_dissector_key_vlan *key_val,
990 			    struct flow_dissector_key_vlan *key_mask)
991 {
992 #define VLAN_PRIORITY_MASK	0x7
993 
994 	if (tb[vlan_id_key]) {
995 		key_val->vlan_id =
996 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
997 		key_mask->vlan_id = VLAN_VID_MASK;
998 	}
999 	if (tb[vlan_prio_key]) {
1000 		key_val->vlan_priority =
1001 			nla_get_u8(tb[vlan_prio_key]) &
1002 			VLAN_PRIORITY_MASK;
1003 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1004 	}
1005 	key_val->vlan_tpid = ethertype;
1006 	key_mask->vlan_tpid = cpu_to_be16(~0);
1007 }
1008 
1009 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1010 			    u32 *dissector_key, u32 *dissector_mask,
1011 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1012 {
1013 	if (flower_mask & flower_flag_bit) {
1014 		*dissector_mask |= dissector_flag_bit;
1015 		if (flower_key & flower_flag_bit)
1016 			*dissector_key |= dissector_flag_bit;
1017 	}
1018 }
1019 
1020 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1021 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1022 {
1023 	u32 key, mask;
1024 
1025 	/* mask is mandatory for flags */
1026 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1027 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1028 		return -EINVAL;
1029 	}
1030 
1031 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
1032 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1033 
1034 	*flags_key  = 0;
1035 	*flags_mask = 0;
1036 
1037 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1038 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1039 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1040 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1041 			FLOW_DIS_FIRST_FRAG);
1042 
1043 	return 0;
1044 }
1045 
1046 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1047 			  struct flow_dissector_key_ip *key,
1048 			  struct flow_dissector_key_ip *mask)
1049 {
1050 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1051 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1052 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1053 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1054 
1055 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1056 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1057 }
1058 
1059 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1060 			     int depth, int option_len,
1061 			     struct netlink_ext_ack *extack)
1062 {
1063 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1064 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1065 	struct geneve_opt *opt;
1066 	int err, data_len = 0;
1067 
1068 	if (option_len > sizeof(struct geneve_opt))
1069 		data_len = option_len - sizeof(struct geneve_opt);
1070 
1071 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1072 	memset(opt, 0xff, option_len);
1073 	opt->length = data_len / 4;
1074 	opt->r1 = 0;
1075 	opt->r2 = 0;
1076 	opt->r3 = 0;
1077 
1078 	/* If no mask has been prodived we assume an exact match. */
1079 	if (!depth)
1080 		return sizeof(struct geneve_opt) + data_len;
1081 
1082 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1083 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1084 		return -EINVAL;
1085 	}
1086 
1087 	err = nla_parse_nested_deprecated(tb,
1088 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1089 					  nla, geneve_opt_policy, extack);
1090 	if (err < 0)
1091 		return err;
1092 
1093 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1094 	 * fields from the key.
1095 	 */
1096 	if (!option_len &&
1097 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1098 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1099 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1100 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1101 		return -EINVAL;
1102 	}
1103 
1104 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1105 	 * for the mask.
1106 	 */
1107 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1108 		int new_len = key->enc_opts.len;
1109 
1110 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1111 		data_len = nla_len(data);
1112 		if (data_len < 4) {
1113 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1114 			return -ERANGE;
1115 		}
1116 		if (data_len % 4) {
1117 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1118 			return -ERANGE;
1119 		}
1120 
1121 		new_len += sizeof(struct geneve_opt) + data_len;
1122 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1123 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1124 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1125 			return -ERANGE;
1126 		}
1127 		opt->length = data_len / 4;
1128 		memcpy(opt->opt_data, nla_data(data), data_len);
1129 	}
1130 
1131 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1132 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1133 		opt->opt_class = nla_get_be16(class);
1134 	}
1135 
1136 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1137 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1138 		opt->type = nla_get_u8(type);
1139 	}
1140 
1141 	return sizeof(struct geneve_opt) + data_len;
1142 }
1143 
1144 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1145 			    int depth, int option_len,
1146 			    struct netlink_ext_ack *extack)
1147 {
1148 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1149 	struct vxlan_metadata *md;
1150 	int err;
1151 
1152 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1153 	memset(md, 0xff, sizeof(*md));
1154 
1155 	if (!depth)
1156 		return sizeof(*md);
1157 
1158 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1159 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1160 		return -EINVAL;
1161 	}
1162 
1163 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1164 			       vxlan_opt_policy, extack);
1165 	if (err < 0)
1166 		return err;
1167 
1168 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1169 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1170 		return -EINVAL;
1171 	}
1172 
1173 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
1174 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1175 
1176 	return sizeof(*md);
1177 }
1178 
1179 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1180 			     int depth, int option_len,
1181 			     struct netlink_ext_ack *extack)
1182 {
1183 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1184 	struct erspan_metadata *md;
1185 	int err;
1186 
1187 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1188 	memset(md, 0xff, sizeof(*md));
1189 	md->version = 1;
1190 
1191 	if (!depth)
1192 		return sizeof(*md);
1193 
1194 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1195 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1196 		return -EINVAL;
1197 	}
1198 
1199 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1200 			       erspan_opt_policy, extack);
1201 	if (err < 0)
1202 		return err;
1203 
1204 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1205 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1206 		return -EINVAL;
1207 	}
1208 
1209 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1210 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1211 
1212 	if (md->version == 1) {
1213 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1214 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1215 			return -EINVAL;
1216 		}
1217 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1218 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1219 			md->u.index = nla_get_be32(nla);
1220 		}
1221 	} else if (md->version == 2) {
1222 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1223 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1224 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1225 			return -EINVAL;
1226 		}
1227 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1228 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1229 			md->u.md2.dir = nla_get_u8(nla);
1230 		}
1231 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1232 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1233 			set_hwid(&md->u.md2, nla_get_u8(nla));
1234 		}
1235 	} else {
1236 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1237 		return -EINVAL;
1238 	}
1239 
1240 	return sizeof(*md);
1241 }
1242 
1243 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1244 			  struct fl_flow_key *mask,
1245 			  struct netlink_ext_ack *extack)
1246 {
1247 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1248 	int err, option_len, key_depth, msk_depth = 0;
1249 
1250 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1251 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1252 					     enc_opts_policy, extack);
1253 	if (err)
1254 		return err;
1255 
1256 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1257 
1258 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1259 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1260 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1261 						     enc_opts_policy, extack);
1262 		if (err)
1263 			return err;
1264 
1265 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1266 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1267 	}
1268 
1269 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1270 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1271 		switch (nla_type(nla_opt_key)) {
1272 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1273 			if (key->enc_opts.dst_opt_type &&
1274 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1275 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1276 				return -EINVAL;
1277 			}
1278 			option_len = 0;
1279 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1280 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1281 						       key_depth, option_len,
1282 						       extack);
1283 			if (option_len < 0)
1284 				return option_len;
1285 
1286 			key->enc_opts.len += option_len;
1287 			/* At the same time we need to parse through the mask
1288 			 * in order to verify exact and mask attribute lengths.
1289 			 */
1290 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1291 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1292 						       msk_depth, option_len,
1293 						       extack);
1294 			if (option_len < 0)
1295 				return option_len;
1296 
1297 			mask->enc_opts.len += option_len;
1298 			if (key->enc_opts.len != mask->enc_opts.len) {
1299 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1300 				return -EINVAL;
1301 			}
1302 
1303 			if (msk_depth)
1304 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1305 			break;
1306 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1307 			if (key->enc_opts.dst_opt_type) {
1308 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1309 				return -EINVAL;
1310 			}
1311 			option_len = 0;
1312 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1313 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1314 						      key_depth, option_len,
1315 						      extack);
1316 			if (option_len < 0)
1317 				return option_len;
1318 
1319 			key->enc_opts.len += option_len;
1320 			/* At the same time we need to parse through the mask
1321 			 * in order to verify exact and mask attribute lengths.
1322 			 */
1323 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1324 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1325 						      msk_depth, option_len,
1326 						      extack);
1327 			if (option_len < 0)
1328 				return option_len;
1329 
1330 			mask->enc_opts.len += option_len;
1331 			if (key->enc_opts.len != mask->enc_opts.len) {
1332 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1333 				return -EINVAL;
1334 			}
1335 
1336 			if (msk_depth)
1337 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1338 			break;
1339 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1340 			if (key->enc_opts.dst_opt_type) {
1341 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1342 				return -EINVAL;
1343 			}
1344 			option_len = 0;
1345 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1346 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1347 						       key_depth, option_len,
1348 						       extack);
1349 			if (option_len < 0)
1350 				return option_len;
1351 
1352 			key->enc_opts.len += option_len;
1353 			/* At the same time we need to parse through the mask
1354 			 * in order to verify exact and mask attribute lengths.
1355 			 */
1356 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1357 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1358 						       msk_depth, option_len,
1359 						       extack);
1360 			if (option_len < 0)
1361 				return option_len;
1362 
1363 			mask->enc_opts.len += option_len;
1364 			if (key->enc_opts.len != mask->enc_opts.len) {
1365 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1366 				return -EINVAL;
1367 			}
1368 
1369 			if (msk_depth)
1370 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1371 			break;
1372 		default:
1373 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1374 			return -EINVAL;
1375 		}
1376 	}
1377 
1378 	return 0;
1379 }
1380 
1381 static int fl_set_key_ct(struct nlattr **tb,
1382 			 struct flow_dissector_key_ct *key,
1383 			 struct flow_dissector_key_ct *mask,
1384 			 struct netlink_ext_ack *extack)
1385 {
1386 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1387 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1388 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1389 			return -EOPNOTSUPP;
1390 		}
1391 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1392 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1393 			       sizeof(key->ct_state));
1394 	}
1395 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1396 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1397 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1398 			return -EOPNOTSUPP;
1399 		}
1400 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1401 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1402 			       sizeof(key->ct_zone));
1403 	}
1404 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1405 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1406 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1407 			return -EOPNOTSUPP;
1408 		}
1409 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1410 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1411 			       sizeof(key->ct_mark));
1412 	}
1413 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1414 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1415 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1416 			return -EOPNOTSUPP;
1417 		}
1418 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1419 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1420 			       sizeof(key->ct_labels));
1421 	}
1422 
1423 	return 0;
1424 }
1425 
1426 static int fl_set_key(struct net *net, struct nlattr **tb,
1427 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1428 		      struct netlink_ext_ack *extack)
1429 {
1430 	__be16 ethertype;
1431 	int ret = 0;
1432 
1433 	if (tb[TCA_FLOWER_INDEV]) {
1434 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1435 		if (err < 0)
1436 			return err;
1437 		key->meta.ingress_ifindex = err;
1438 		mask->meta.ingress_ifindex = 0xffffffff;
1439 	}
1440 
1441 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1442 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1443 		       sizeof(key->eth.dst));
1444 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1445 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1446 		       sizeof(key->eth.src));
1447 
1448 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1449 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1450 
1451 		if (eth_type_vlan(ethertype)) {
1452 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1453 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1454 					&mask->vlan);
1455 
1456 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1457 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1458 				if (eth_type_vlan(ethertype)) {
1459 					fl_set_key_vlan(tb, ethertype,
1460 							TCA_FLOWER_KEY_CVLAN_ID,
1461 							TCA_FLOWER_KEY_CVLAN_PRIO,
1462 							&key->cvlan, &mask->cvlan);
1463 					fl_set_key_val(tb, &key->basic.n_proto,
1464 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1465 						       &mask->basic.n_proto,
1466 						       TCA_FLOWER_UNSPEC,
1467 						       sizeof(key->basic.n_proto));
1468 				} else {
1469 					key->basic.n_proto = ethertype;
1470 					mask->basic.n_proto = cpu_to_be16(~0);
1471 				}
1472 			}
1473 		} else {
1474 			key->basic.n_proto = ethertype;
1475 			mask->basic.n_proto = cpu_to_be16(~0);
1476 		}
1477 	}
1478 
1479 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1480 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1481 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1482 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1483 			       sizeof(key->basic.ip_proto));
1484 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1485 	}
1486 
1487 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1488 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1489 		mask->control.addr_type = ~0;
1490 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1491 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1492 			       sizeof(key->ipv4.src));
1493 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1494 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1495 			       sizeof(key->ipv4.dst));
1496 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1497 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1498 		mask->control.addr_type = ~0;
1499 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1500 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1501 			       sizeof(key->ipv6.src));
1502 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1503 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1504 			       sizeof(key->ipv6.dst));
1505 	}
1506 
1507 	if (key->basic.ip_proto == IPPROTO_TCP) {
1508 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1509 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1510 			       sizeof(key->tp.src));
1511 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1512 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1513 			       sizeof(key->tp.dst));
1514 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1515 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1516 			       sizeof(key->tcp.flags));
1517 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1518 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1519 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1520 			       sizeof(key->tp.src));
1521 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1522 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1523 			       sizeof(key->tp.dst));
1524 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1525 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1526 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1527 			       sizeof(key->tp.src));
1528 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1529 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1530 			       sizeof(key->tp.dst));
1531 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1532 		   key->basic.ip_proto == IPPROTO_ICMP) {
1533 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1534 			       &mask->icmp.type,
1535 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1536 			       sizeof(key->icmp.type));
1537 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1538 			       &mask->icmp.code,
1539 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1540 			       sizeof(key->icmp.code));
1541 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1542 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1543 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1544 			       &mask->icmp.type,
1545 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1546 			       sizeof(key->icmp.type));
1547 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1548 			       &mask->icmp.code,
1549 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1550 			       sizeof(key->icmp.code));
1551 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1552 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1553 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1554 		if (ret)
1555 			return ret;
1556 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1557 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1558 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1559 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1560 			       sizeof(key->arp.sip));
1561 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1562 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1563 			       sizeof(key->arp.tip));
1564 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1565 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1566 			       sizeof(key->arp.op));
1567 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1568 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1569 			       sizeof(key->arp.sha));
1570 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1571 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1572 			       sizeof(key->arp.tha));
1573 	}
1574 
1575 	if (key->basic.ip_proto == IPPROTO_TCP ||
1576 	    key->basic.ip_proto == IPPROTO_UDP ||
1577 	    key->basic.ip_proto == IPPROTO_SCTP) {
1578 		ret = fl_set_key_port_range(tb, key, mask, extack);
1579 		if (ret)
1580 			return ret;
1581 	}
1582 
1583 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1584 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1585 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1586 		mask->enc_control.addr_type = ~0;
1587 		fl_set_key_val(tb, &key->enc_ipv4.src,
1588 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1589 			       &mask->enc_ipv4.src,
1590 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1591 			       sizeof(key->enc_ipv4.src));
1592 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1593 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1594 			       &mask->enc_ipv4.dst,
1595 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1596 			       sizeof(key->enc_ipv4.dst));
1597 	}
1598 
1599 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1600 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1601 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1602 		mask->enc_control.addr_type = ~0;
1603 		fl_set_key_val(tb, &key->enc_ipv6.src,
1604 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1605 			       &mask->enc_ipv6.src,
1606 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1607 			       sizeof(key->enc_ipv6.src));
1608 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1609 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1610 			       &mask->enc_ipv6.dst,
1611 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1612 			       sizeof(key->enc_ipv6.dst));
1613 	}
1614 
1615 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1616 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1617 		       sizeof(key->enc_key_id.keyid));
1618 
1619 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1620 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1621 		       sizeof(key->enc_tp.src));
1622 
1623 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1624 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1625 		       sizeof(key->enc_tp.dst));
1626 
1627 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1628 
1629 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1630 		ret = fl_set_enc_opt(tb, key, mask, extack);
1631 		if (ret)
1632 			return ret;
1633 	}
1634 
1635 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1636 	if (ret)
1637 		return ret;
1638 
1639 	if (tb[TCA_FLOWER_KEY_FLAGS])
1640 		ret = fl_set_key_flags(tb, &key->control.flags,
1641 				       &mask->control.flags, extack);
1642 
1643 	return ret;
1644 }
1645 
1646 static void fl_mask_copy(struct fl_flow_mask *dst,
1647 			 struct fl_flow_mask *src)
1648 {
1649 	const void *psrc = fl_key_get_start(&src->key, src);
1650 	void *pdst = fl_key_get_start(&dst->key, src);
1651 
1652 	memcpy(pdst, psrc, fl_mask_range(src));
1653 	dst->range = src->range;
1654 }
1655 
1656 static const struct rhashtable_params fl_ht_params = {
1657 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1658 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1659 	.automatic_shrinking = true,
1660 };
1661 
1662 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1663 {
1664 	mask->filter_ht_params = fl_ht_params;
1665 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1666 	mask->filter_ht_params.key_offset += mask->range.start;
1667 
1668 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1669 }
1670 
1671 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1672 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1673 
1674 #define FL_KEY_IS_MASKED(mask, member)						\
1675 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1676 		   0, FL_KEY_MEMBER_SIZE(member))				\
1677 
1678 #define FL_KEY_SET(keys, cnt, id, member)					\
1679 	do {									\
1680 		keys[cnt].key_id = id;						\
1681 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1682 		cnt++;								\
1683 	} while(0);
1684 
1685 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1686 	do {									\
1687 		if (FL_KEY_IS_MASKED(mask, member))				\
1688 			FL_KEY_SET(keys, cnt, id, member);			\
1689 	} while(0);
1690 
1691 static void fl_init_dissector(struct flow_dissector *dissector,
1692 			      struct fl_flow_key *mask)
1693 {
1694 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1695 	size_t cnt = 0;
1696 
1697 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1698 			     FLOW_DISSECTOR_KEY_META, meta);
1699 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1700 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1701 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1702 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1703 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1704 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1705 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1706 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1707 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1708 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1709 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1710 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1711 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1712 			     FLOW_DISSECTOR_KEY_IP, ip);
1713 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1714 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1715 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1716 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1717 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1718 			     FLOW_DISSECTOR_KEY_ARP, arp);
1719 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1720 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1721 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1722 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1723 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1724 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1725 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1726 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1727 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1728 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1729 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1730 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1731 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1732 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1733 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1734 			   enc_control);
1735 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1736 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1737 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1738 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1739 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1740 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1741 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1742 			     FLOW_DISSECTOR_KEY_CT, ct);
1743 
1744 	skb_flow_dissector_init(dissector, keys, cnt);
1745 }
1746 
1747 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1748 					       struct fl_flow_mask *mask)
1749 {
1750 	struct fl_flow_mask *newmask;
1751 	int err;
1752 
1753 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1754 	if (!newmask)
1755 		return ERR_PTR(-ENOMEM);
1756 
1757 	fl_mask_copy(newmask, mask);
1758 
1759 	if ((newmask->key.tp_range.tp_min.dst &&
1760 	     newmask->key.tp_range.tp_max.dst) ||
1761 	    (newmask->key.tp_range.tp_min.src &&
1762 	     newmask->key.tp_range.tp_max.src))
1763 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1764 
1765 	err = fl_init_mask_hashtable(newmask);
1766 	if (err)
1767 		goto errout_free;
1768 
1769 	fl_init_dissector(&newmask->dissector, &newmask->key);
1770 
1771 	INIT_LIST_HEAD_RCU(&newmask->filters);
1772 
1773 	refcount_set(&newmask->refcnt, 1);
1774 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1775 				      &newmask->ht_node, mask_ht_params);
1776 	if (err)
1777 		goto errout_destroy;
1778 
1779 	spin_lock(&head->masks_lock);
1780 	list_add_tail_rcu(&newmask->list, &head->masks);
1781 	spin_unlock(&head->masks_lock);
1782 
1783 	return newmask;
1784 
1785 errout_destroy:
1786 	rhashtable_destroy(&newmask->ht);
1787 errout_free:
1788 	kfree(newmask);
1789 
1790 	return ERR_PTR(err);
1791 }
1792 
1793 static int fl_check_assign_mask(struct cls_fl_head *head,
1794 				struct cls_fl_filter *fnew,
1795 				struct cls_fl_filter *fold,
1796 				struct fl_flow_mask *mask)
1797 {
1798 	struct fl_flow_mask *newmask;
1799 	int ret = 0;
1800 
1801 	rcu_read_lock();
1802 
1803 	/* Insert mask as temporary node to prevent concurrent creation of mask
1804 	 * with same key. Any concurrent lookups with same key will return
1805 	 * -EAGAIN because mask's refcnt is zero.
1806 	 */
1807 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1808 						       &mask->ht_node,
1809 						       mask_ht_params);
1810 	if (!fnew->mask) {
1811 		rcu_read_unlock();
1812 
1813 		if (fold) {
1814 			ret = -EINVAL;
1815 			goto errout_cleanup;
1816 		}
1817 
1818 		newmask = fl_create_new_mask(head, mask);
1819 		if (IS_ERR(newmask)) {
1820 			ret = PTR_ERR(newmask);
1821 			goto errout_cleanup;
1822 		}
1823 
1824 		fnew->mask = newmask;
1825 		return 0;
1826 	} else if (IS_ERR(fnew->mask)) {
1827 		ret = PTR_ERR(fnew->mask);
1828 	} else if (fold && fold->mask != fnew->mask) {
1829 		ret = -EINVAL;
1830 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1831 		/* Mask was deleted concurrently, try again */
1832 		ret = -EAGAIN;
1833 	}
1834 	rcu_read_unlock();
1835 	return ret;
1836 
1837 errout_cleanup:
1838 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1839 			       mask_ht_params);
1840 	return ret;
1841 }
1842 
1843 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1844 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1845 			unsigned long base, struct nlattr **tb,
1846 			struct nlattr *est, bool ovr,
1847 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1848 			struct netlink_ext_ack *extack)
1849 {
1850 	int err;
1851 
1852 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1853 				extack);
1854 	if (err < 0)
1855 		return err;
1856 
1857 	if (tb[TCA_FLOWER_CLASSID]) {
1858 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1859 		if (!rtnl_held)
1860 			rtnl_lock();
1861 		tcf_bind_filter(tp, &f->res, base);
1862 		if (!rtnl_held)
1863 			rtnl_unlock();
1864 	}
1865 
1866 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1867 	if (err)
1868 		return err;
1869 
1870 	fl_mask_update_range(mask);
1871 	fl_set_masked_key(&f->mkey, &f->key, mask);
1872 
1873 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1874 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1875 		return -EINVAL;
1876 	}
1877 
1878 	return 0;
1879 }
1880 
1881 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1882 			       struct cls_fl_filter *fold,
1883 			       bool *in_ht)
1884 {
1885 	struct fl_flow_mask *mask = fnew->mask;
1886 	int err;
1887 
1888 	err = rhashtable_lookup_insert_fast(&mask->ht,
1889 					    &fnew->ht_node,
1890 					    mask->filter_ht_params);
1891 	if (err) {
1892 		*in_ht = false;
1893 		/* It is okay if filter with same key exists when
1894 		 * overwriting.
1895 		 */
1896 		return fold && err == -EEXIST ? 0 : err;
1897 	}
1898 
1899 	*in_ht = true;
1900 	return 0;
1901 }
1902 
1903 static int fl_change(struct net *net, struct sk_buff *in_skb,
1904 		     struct tcf_proto *tp, unsigned long base,
1905 		     u32 handle, struct nlattr **tca,
1906 		     void **arg, bool ovr, bool rtnl_held,
1907 		     struct netlink_ext_ack *extack)
1908 {
1909 	struct cls_fl_head *head = fl_head_dereference(tp);
1910 	struct cls_fl_filter *fold = *arg;
1911 	struct cls_fl_filter *fnew;
1912 	struct fl_flow_mask *mask;
1913 	struct nlattr **tb;
1914 	bool in_ht;
1915 	int err;
1916 
1917 	if (!tca[TCA_OPTIONS]) {
1918 		err = -EINVAL;
1919 		goto errout_fold;
1920 	}
1921 
1922 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1923 	if (!mask) {
1924 		err = -ENOBUFS;
1925 		goto errout_fold;
1926 	}
1927 
1928 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1929 	if (!tb) {
1930 		err = -ENOBUFS;
1931 		goto errout_mask_alloc;
1932 	}
1933 
1934 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1935 					  tca[TCA_OPTIONS], fl_policy, NULL);
1936 	if (err < 0)
1937 		goto errout_tb;
1938 
1939 	if (fold && handle && fold->handle != handle) {
1940 		err = -EINVAL;
1941 		goto errout_tb;
1942 	}
1943 
1944 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1945 	if (!fnew) {
1946 		err = -ENOBUFS;
1947 		goto errout_tb;
1948 	}
1949 	INIT_LIST_HEAD(&fnew->hw_list);
1950 	refcount_set(&fnew->refcnt, 1);
1951 
1952 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1953 	if (err < 0)
1954 		goto errout;
1955 
1956 	if (tb[TCA_FLOWER_FLAGS]) {
1957 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1958 
1959 		if (!tc_flags_valid(fnew->flags)) {
1960 			err = -EINVAL;
1961 			goto errout;
1962 		}
1963 	}
1964 
1965 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1966 			   tp->chain->tmplt_priv, rtnl_held, extack);
1967 	if (err)
1968 		goto errout;
1969 
1970 	err = fl_check_assign_mask(head, fnew, fold, mask);
1971 	if (err)
1972 		goto errout;
1973 
1974 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1975 	if (err)
1976 		goto errout_mask;
1977 
1978 	if (!tc_skip_hw(fnew->flags)) {
1979 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1980 		if (err)
1981 			goto errout_ht;
1982 	}
1983 
1984 	if (!tc_in_hw(fnew->flags))
1985 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1986 
1987 	spin_lock(&tp->lock);
1988 
1989 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1990 	 * proto again or create new one, if necessary.
1991 	 */
1992 	if (tp->deleting) {
1993 		err = -EAGAIN;
1994 		goto errout_hw;
1995 	}
1996 
1997 	if (fold) {
1998 		/* Fold filter was deleted concurrently. Retry lookup. */
1999 		if (fold->deleted) {
2000 			err = -EAGAIN;
2001 			goto errout_hw;
2002 		}
2003 
2004 		fnew->handle = handle;
2005 
2006 		if (!in_ht) {
2007 			struct rhashtable_params params =
2008 				fnew->mask->filter_ht_params;
2009 
2010 			err = rhashtable_insert_fast(&fnew->mask->ht,
2011 						     &fnew->ht_node,
2012 						     params);
2013 			if (err)
2014 				goto errout_hw;
2015 			in_ht = true;
2016 		}
2017 
2018 		refcount_inc(&fnew->refcnt);
2019 		rhashtable_remove_fast(&fold->mask->ht,
2020 				       &fold->ht_node,
2021 				       fold->mask->filter_ht_params);
2022 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2023 		list_replace_rcu(&fold->list, &fnew->list);
2024 		fold->deleted = true;
2025 
2026 		spin_unlock(&tp->lock);
2027 
2028 		fl_mask_put(head, fold->mask);
2029 		if (!tc_skip_hw(fold->flags))
2030 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2031 		tcf_unbind_filter(tp, &fold->res);
2032 		/* Caller holds reference to fold, so refcnt is always > 0
2033 		 * after this.
2034 		 */
2035 		refcount_dec(&fold->refcnt);
2036 		__fl_put(fold);
2037 	} else {
2038 		if (handle) {
2039 			/* user specifies a handle and it doesn't exist */
2040 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2041 					    handle, GFP_ATOMIC);
2042 
2043 			/* Filter with specified handle was concurrently
2044 			 * inserted after initial check in cls_api. This is not
2045 			 * necessarily an error if NLM_F_EXCL is not set in
2046 			 * message flags. Returning EAGAIN will cause cls_api to
2047 			 * try to update concurrently inserted rule.
2048 			 */
2049 			if (err == -ENOSPC)
2050 				err = -EAGAIN;
2051 		} else {
2052 			handle = 1;
2053 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2054 					    INT_MAX, GFP_ATOMIC);
2055 		}
2056 		if (err)
2057 			goto errout_hw;
2058 
2059 		refcount_inc(&fnew->refcnt);
2060 		fnew->handle = handle;
2061 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2062 		spin_unlock(&tp->lock);
2063 	}
2064 
2065 	*arg = fnew;
2066 
2067 	kfree(tb);
2068 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2069 	return 0;
2070 
2071 errout_ht:
2072 	spin_lock(&tp->lock);
2073 errout_hw:
2074 	fnew->deleted = true;
2075 	spin_unlock(&tp->lock);
2076 	if (!tc_skip_hw(fnew->flags))
2077 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2078 	if (in_ht)
2079 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2080 				       fnew->mask->filter_ht_params);
2081 errout_mask:
2082 	fl_mask_put(head, fnew->mask);
2083 errout:
2084 	__fl_put(fnew);
2085 errout_tb:
2086 	kfree(tb);
2087 errout_mask_alloc:
2088 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2089 errout_fold:
2090 	if (fold)
2091 		__fl_put(fold);
2092 	return err;
2093 }
2094 
2095 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2096 		     bool rtnl_held, struct netlink_ext_ack *extack)
2097 {
2098 	struct cls_fl_head *head = fl_head_dereference(tp);
2099 	struct cls_fl_filter *f = arg;
2100 	bool last_on_mask;
2101 	int err = 0;
2102 
2103 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2104 	*last = list_empty(&head->masks);
2105 	__fl_put(f);
2106 
2107 	return err;
2108 }
2109 
2110 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2111 		    bool rtnl_held)
2112 {
2113 	struct cls_fl_head *head = fl_head_dereference(tp);
2114 	unsigned long id = arg->cookie, tmp;
2115 	struct cls_fl_filter *f;
2116 
2117 	arg->count = arg->skip;
2118 
2119 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2120 		/* don't return filters that are being deleted */
2121 		if (!refcount_inc_not_zero(&f->refcnt))
2122 			continue;
2123 		if (arg->fn(tp, f, arg) < 0) {
2124 			__fl_put(f);
2125 			arg->stop = 1;
2126 			break;
2127 		}
2128 		__fl_put(f);
2129 		arg->count++;
2130 	}
2131 	arg->cookie = id;
2132 }
2133 
2134 static struct cls_fl_filter *
2135 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2136 {
2137 	struct cls_fl_head *head = fl_head_dereference(tp);
2138 
2139 	spin_lock(&tp->lock);
2140 	if (list_empty(&head->hw_filters)) {
2141 		spin_unlock(&tp->lock);
2142 		return NULL;
2143 	}
2144 
2145 	if (!f)
2146 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2147 			       hw_list);
2148 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2149 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2150 			spin_unlock(&tp->lock);
2151 			return f;
2152 		}
2153 	}
2154 
2155 	spin_unlock(&tp->lock);
2156 	return NULL;
2157 }
2158 
2159 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2160 			void *cb_priv, struct netlink_ext_ack *extack)
2161 {
2162 	struct tcf_block *block = tp->chain->block;
2163 	struct flow_cls_offload cls_flower = {};
2164 	struct cls_fl_filter *f = NULL;
2165 	int err;
2166 
2167 	/* hw_filters list can only be changed by hw offload functions after
2168 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2169 	 * iterating it.
2170 	 */
2171 	ASSERT_RTNL();
2172 
2173 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2174 		cls_flower.rule =
2175 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2176 		if (!cls_flower.rule) {
2177 			__fl_put(f);
2178 			return -ENOMEM;
2179 		}
2180 
2181 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2182 					   extack);
2183 		cls_flower.command = add ?
2184 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2185 		cls_flower.cookie = (unsigned long)f;
2186 		cls_flower.rule->match.dissector = &f->mask->dissector;
2187 		cls_flower.rule->match.mask = &f->mask->key;
2188 		cls_flower.rule->match.key = &f->mkey;
2189 
2190 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2191 		if (err) {
2192 			kfree(cls_flower.rule);
2193 			if (tc_skip_sw(f->flags)) {
2194 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2195 				__fl_put(f);
2196 				return err;
2197 			}
2198 			goto next_flow;
2199 		}
2200 
2201 		cls_flower.classid = f->res.classid;
2202 
2203 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2204 					    TC_SETUP_CLSFLOWER, &cls_flower,
2205 					    cb_priv, &f->flags,
2206 					    &f->in_hw_count);
2207 		tc_cleanup_flow_action(&cls_flower.rule->action);
2208 		kfree(cls_flower.rule);
2209 
2210 		if (err) {
2211 			__fl_put(f);
2212 			return err;
2213 		}
2214 next_flow:
2215 		__fl_put(f);
2216 	}
2217 
2218 	return 0;
2219 }
2220 
2221 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2222 {
2223 	struct flow_cls_offload *cls_flower = type_data;
2224 	struct cls_fl_filter *f =
2225 		(struct cls_fl_filter *) cls_flower->cookie;
2226 	struct cls_fl_head *head = fl_head_dereference(tp);
2227 
2228 	spin_lock(&tp->lock);
2229 	list_add(&f->hw_list, &head->hw_filters);
2230 	spin_unlock(&tp->lock);
2231 }
2232 
2233 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2234 {
2235 	struct flow_cls_offload *cls_flower = type_data;
2236 	struct cls_fl_filter *f =
2237 		(struct cls_fl_filter *) cls_flower->cookie;
2238 
2239 	spin_lock(&tp->lock);
2240 	if (!list_empty(&f->hw_list))
2241 		list_del_init(&f->hw_list);
2242 	spin_unlock(&tp->lock);
2243 }
2244 
2245 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2246 			      struct fl_flow_tmplt *tmplt)
2247 {
2248 	struct flow_cls_offload cls_flower = {};
2249 	struct tcf_block *block = chain->block;
2250 
2251 	cls_flower.rule = flow_rule_alloc(0);
2252 	if (!cls_flower.rule)
2253 		return -ENOMEM;
2254 
2255 	cls_flower.common.chain_index = chain->index;
2256 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2257 	cls_flower.cookie = (unsigned long) tmplt;
2258 	cls_flower.rule->match.dissector = &tmplt->dissector;
2259 	cls_flower.rule->match.mask = &tmplt->mask;
2260 	cls_flower.rule->match.key = &tmplt->dummy_key;
2261 
2262 	/* We don't care if driver (any of them) fails to handle this
2263 	 * call. It serves just as a hint for it.
2264 	 */
2265 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2266 	kfree(cls_flower.rule);
2267 
2268 	return 0;
2269 }
2270 
2271 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2272 				struct fl_flow_tmplt *tmplt)
2273 {
2274 	struct flow_cls_offload cls_flower = {};
2275 	struct tcf_block *block = chain->block;
2276 
2277 	cls_flower.common.chain_index = chain->index;
2278 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2279 	cls_flower.cookie = (unsigned long) tmplt;
2280 
2281 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2282 }
2283 
2284 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2285 			     struct nlattr **tca,
2286 			     struct netlink_ext_ack *extack)
2287 {
2288 	struct fl_flow_tmplt *tmplt;
2289 	struct nlattr **tb;
2290 	int err;
2291 
2292 	if (!tca[TCA_OPTIONS])
2293 		return ERR_PTR(-EINVAL);
2294 
2295 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2296 	if (!tb)
2297 		return ERR_PTR(-ENOBUFS);
2298 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2299 					  tca[TCA_OPTIONS], fl_policy, NULL);
2300 	if (err)
2301 		goto errout_tb;
2302 
2303 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2304 	if (!tmplt) {
2305 		err = -ENOMEM;
2306 		goto errout_tb;
2307 	}
2308 	tmplt->chain = chain;
2309 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2310 	if (err)
2311 		goto errout_tmplt;
2312 
2313 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2314 
2315 	err = fl_hw_create_tmplt(chain, tmplt);
2316 	if (err)
2317 		goto errout_tmplt;
2318 
2319 	kfree(tb);
2320 	return tmplt;
2321 
2322 errout_tmplt:
2323 	kfree(tmplt);
2324 errout_tb:
2325 	kfree(tb);
2326 	return ERR_PTR(err);
2327 }
2328 
2329 static void fl_tmplt_destroy(void *tmplt_priv)
2330 {
2331 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2332 
2333 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2334 	kfree(tmplt);
2335 }
2336 
2337 static int fl_dump_key_val(struct sk_buff *skb,
2338 			   void *val, int val_type,
2339 			   void *mask, int mask_type, int len)
2340 {
2341 	int err;
2342 
2343 	if (!memchr_inv(mask, 0, len))
2344 		return 0;
2345 	err = nla_put(skb, val_type, len, val);
2346 	if (err)
2347 		return err;
2348 	if (mask_type != TCA_FLOWER_UNSPEC) {
2349 		err = nla_put(skb, mask_type, len, mask);
2350 		if (err)
2351 			return err;
2352 	}
2353 	return 0;
2354 }
2355 
2356 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2357 				  struct fl_flow_key *mask)
2358 {
2359 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2360 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2361 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2362 			    sizeof(key->tp_range.tp_min.dst)) ||
2363 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2364 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2365 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2366 			    sizeof(key->tp_range.tp_max.dst)) ||
2367 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2368 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2369 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2370 			    sizeof(key->tp_range.tp_min.src)) ||
2371 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2372 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2373 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2374 			    sizeof(key->tp_range.tp_max.src)))
2375 		return -1;
2376 
2377 	return 0;
2378 }
2379 
2380 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2381 				    struct flow_dissector_key_mpls *mpls_key,
2382 				    struct flow_dissector_key_mpls *mpls_mask,
2383 				    u8 lse_index)
2384 {
2385 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2386 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2387 	int err;
2388 
2389 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2390 			 lse_index + 1);
2391 	if (err)
2392 		return err;
2393 
2394 	if (lse_mask->mpls_ttl) {
2395 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2396 				 lse_key->mpls_ttl);
2397 		if (err)
2398 			return err;
2399 	}
2400 	if (lse_mask->mpls_bos) {
2401 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2402 				 lse_key->mpls_bos);
2403 		if (err)
2404 			return err;
2405 	}
2406 	if (lse_mask->mpls_tc) {
2407 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2408 				 lse_key->mpls_tc);
2409 		if (err)
2410 			return err;
2411 	}
2412 	if (lse_mask->mpls_label) {
2413 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2414 				 lse_key->mpls_label);
2415 		if (err)
2416 			return err;
2417 	}
2418 
2419 	return 0;
2420 }
2421 
2422 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2423 				 struct flow_dissector_key_mpls *mpls_key,
2424 				 struct flow_dissector_key_mpls *mpls_mask)
2425 {
2426 	struct nlattr *opts;
2427 	struct nlattr *lse;
2428 	u8 lse_index;
2429 	int err;
2430 
2431 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2432 	if (!opts)
2433 		return -EMSGSIZE;
2434 
2435 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2436 		if (!(mpls_mask->used_lses & 1 << lse_index))
2437 			continue;
2438 
2439 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2440 		if (!lse) {
2441 			err = -EMSGSIZE;
2442 			goto err_opts;
2443 		}
2444 
2445 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2446 					       lse_index);
2447 		if (err)
2448 			goto err_opts_lse;
2449 		nla_nest_end(skb, lse);
2450 	}
2451 	nla_nest_end(skb, opts);
2452 
2453 	return 0;
2454 
2455 err_opts_lse:
2456 	nla_nest_cancel(skb, lse);
2457 err_opts:
2458 	nla_nest_cancel(skb, opts);
2459 
2460 	return err;
2461 }
2462 
2463 static int fl_dump_key_mpls(struct sk_buff *skb,
2464 			    struct flow_dissector_key_mpls *mpls_key,
2465 			    struct flow_dissector_key_mpls *mpls_mask)
2466 {
2467 	struct flow_dissector_mpls_lse *lse_mask;
2468 	struct flow_dissector_mpls_lse *lse_key;
2469 	int err;
2470 
2471 	if (!mpls_mask->used_lses)
2472 		return 0;
2473 
2474 	lse_mask = &mpls_mask->ls[0];
2475 	lse_key = &mpls_key->ls[0];
2476 
2477 	/* For backward compatibility, don't use the MPLS nested attributes if
2478 	 * the rule can be expressed using the old attributes.
2479 	 */
2480 	if (mpls_mask->used_lses & ~1 ||
2481 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2482 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2483 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2484 
2485 	if (lse_mask->mpls_ttl) {
2486 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2487 				 lse_key->mpls_ttl);
2488 		if (err)
2489 			return err;
2490 	}
2491 	if (lse_mask->mpls_tc) {
2492 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2493 				 lse_key->mpls_tc);
2494 		if (err)
2495 			return err;
2496 	}
2497 	if (lse_mask->mpls_label) {
2498 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2499 				  lse_key->mpls_label);
2500 		if (err)
2501 			return err;
2502 	}
2503 	if (lse_mask->mpls_bos) {
2504 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2505 				 lse_key->mpls_bos);
2506 		if (err)
2507 			return err;
2508 	}
2509 	return 0;
2510 }
2511 
2512 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2513 			  struct flow_dissector_key_ip *key,
2514 			  struct flow_dissector_key_ip *mask)
2515 {
2516 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2517 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2518 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2519 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2520 
2521 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2522 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2523 		return -1;
2524 
2525 	return 0;
2526 }
2527 
2528 static int fl_dump_key_vlan(struct sk_buff *skb,
2529 			    int vlan_id_key, int vlan_prio_key,
2530 			    struct flow_dissector_key_vlan *vlan_key,
2531 			    struct flow_dissector_key_vlan *vlan_mask)
2532 {
2533 	int err;
2534 
2535 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2536 		return 0;
2537 	if (vlan_mask->vlan_id) {
2538 		err = nla_put_u16(skb, vlan_id_key,
2539 				  vlan_key->vlan_id);
2540 		if (err)
2541 			return err;
2542 	}
2543 	if (vlan_mask->vlan_priority) {
2544 		err = nla_put_u8(skb, vlan_prio_key,
2545 				 vlan_key->vlan_priority);
2546 		if (err)
2547 			return err;
2548 	}
2549 	return 0;
2550 }
2551 
2552 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2553 			    u32 *flower_key, u32 *flower_mask,
2554 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2555 {
2556 	if (dissector_mask & dissector_flag_bit) {
2557 		*flower_mask |= flower_flag_bit;
2558 		if (dissector_key & dissector_flag_bit)
2559 			*flower_key |= flower_flag_bit;
2560 	}
2561 }
2562 
2563 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2564 {
2565 	u32 key, mask;
2566 	__be32 _key, _mask;
2567 	int err;
2568 
2569 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2570 		return 0;
2571 
2572 	key = 0;
2573 	mask = 0;
2574 
2575 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2576 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2577 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2578 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2579 			FLOW_DIS_FIRST_FRAG);
2580 
2581 	_key = cpu_to_be32(key);
2582 	_mask = cpu_to_be32(mask);
2583 
2584 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2585 	if (err)
2586 		return err;
2587 
2588 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2589 }
2590 
2591 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2592 				  struct flow_dissector_key_enc_opts *enc_opts)
2593 {
2594 	struct geneve_opt *opt;
2595 	struct nlattr *nest;
2596 	int opt_off = 0;
2597 
2598 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2599 	if (!nest)
2600 		goto nla_put_failure;
2601 
2602 	while (enc_opts->len > opt_off) {
2603 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2604 
2605 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2606 				 opt->opt_class))
2607 			goto nla_put_failure;
2608 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2609 			       opt->type))
2610 			goto nla_put_failure;
2611 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2612 			    opt->length * 4, opt->opt_data))
2613 			goto nla_put_failure;
2614 
2615 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2616 	}
2617 	nla_nest_end(skb, nest);
2618 	return 0;
2619 
2620 nla_put_failure:
2621 	nla_nest_cancel(skb, nest);
2622 	return -EMSGSIZE;
2623 }
2624 
2625 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2626 				 struct flow_dissector_key_enc_opts *enc_opts)
2627 {
2628 	struct vxlan_metadata *md;
2629 	struct nlattr *nest;
2630 
2631 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2632 	if (!nest)
2633 		goto nla_put_failure;
2634 
2635 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2636 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2637 		goto nla_put_failure;
2638 
2639 	nla_nest_end(skb, nest);
2640 	return 0;
2641 
2642 nla_put_failure:
2643 	nla_nest_cancel(skb, nest);
2644 	return -EMSGSIZE;
2645 }
2646 
2647 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2648 				  struct flow_dissector_key_enc_opts *enc_opts)
2649 {
2650 	struct erspan_metadata *md;
2651 	struct nlattr *nest;
2652 
2653 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2654 	if (!nest)
2655 		goto nla_put_failure;
2656 
2657 	md = (struct erspan_metadata *)&enc_opts->data[0];
2658 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2659 		goto nla_put_failure;
2660 
2661 	if (md->version == 1 &&
2662 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2663 		goto nla_put_failure;
2664 
2665 	if (md->version == 2 &&
2666 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2667 			md->u.md2.dir) ||
2668 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2669 			get_hwid(&md->u.md2))))
2670 		goto nla_put_failure;
2671 
2672 	nla_nest_end(skb, nest);
2673 	return 0;
2674 
2675 nla_put_failure:
2676 	nla_nest_cancel(skb, nest);
2677 	return -EMSGSIZE;
2678 }
2679 
2680 static int fl_dump_key_ct(struct sk_buff *skb,
2681 			  struct flow_dissector_key_ct *key,
2682 			  struct flow_dissector_key_ct *mask)
2683 {
2684 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2685 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2686 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2687 			    sizeof(key->ct_state)))
2688 		goto nla_put_failure;
2689 
2690 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2691 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2692 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2693 			    sizeof(key->ct_zone)))
2694 		goto nla_put_failure;
2695 
2696 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2697 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2698 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2699 			    sizeof(key->ct_mark)))
2700 		goto nla_put_failure;
2701 
2702 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2703 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2704 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2705 			    sizeof(key->ct_labels)))
2706 		goto nla_put_failure;
2707 
2708 	return 0;
2709 
2710 nla_put_failure:
2711 	return -EMSGSIZE;
2712 }
2713 
2714 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2715 			       struct flow_dissector_key_enc_opts *enc_opts)
2716 {
2717 	struct nlattr *nest;
2718 	int err;
2719 
2720 	if (!enc_opts->len)
2721 		return 0;
2722 
2723 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2724 	if (!nest)
2725 		goto nla_put_failure;
2726 
2727 	switch (enc_opts->dst_opt_type) {
2728 	case TUNNEL_GENEVE_OPT:
2729 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2730 		if (err)
2731 			goto nla_put_failure;
2732 		break;
2733 	case TUNNEL_VXLAN_OPT:
2734 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2735 		if (err)
2736 			goto nla_put_failure;
2737 		break;
2738 	case TUNNEL_ERSPAN_OPT:
2739 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2740 		if (err)
2741 			goto nla_put_failure;
2742 		break;
2743 	default:
2744 		goto nla_put_failure;
2745 	}
2746 	nla_nest_end(skb, nest);
2747 	return 0;
2748 
2749 nla_put_failure:
2750 	nla_nest_cancel(skb, nest);
2751 	return -EMSGSIZE;
2752 }
2753 
2754 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2755 			       struct flow_dissector_key_enc_opts *key_opts,
2756 			       struct flow_dissector_key_enc_opts *msk_opts)
2757 {
2758 	int err;
2759 
2760 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2761 	if (err)
2762 		return err;
2763 
2764 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2765 }
2766 
2767 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2768 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2769 {
2770 	if (mask->meta.ingress_ifindex) {
2771 		struct net_device *dev;
2772 
2773 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2774 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2775 			goto nla_put_failure;
2776 	}
2777 
2778 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2779 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2780 			    sizeof(key->eth.dst)) ||
2781 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2782 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2783 			    sizeof(key->eth.src)) ||
2784 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2785 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2786 			    sizeof(key->basic.n_proto)))
2787 		goto nla_put_failure;
2788 
2789 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2790 		goto nla_put_failure;
2791 
2792 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2793 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2794 		goto nla_put_failure;
2795 
2796 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2797 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2798 			     &key->cvlan, &mask->cvlan) ||
2799 	    (mask->cvlan.vlan_tpid &&
2800 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2801 			  key->cvlan.vlan_tpid)))
2802 		goto nla_put_failure;
2803 
2804 	if (mask->basic.n_proto) {
2805 		if (mask->cvlan.vlan_tpid) {
2806 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2807 					 key->basic.n_proto))
2808 				goto nla_put_failure;
2809 		} else if (mask->vlan.vlan_tpid) {
2810 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2811 					 key->basic.n_proto))
2812 				goto nla_put_failure;
2813 		}
2814 	}
2815 
2816 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2817 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2818 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2819 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2820 			    sizeof(key->basic.ip_proto)) ||
2821 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2822 		goto nla_put_failure;
2823 
2824 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2825 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2826 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2827 			     sizeof(key->ipv4.src)) ||
2828 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2829 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2830 			     sizeof(key->ipv4.dst))))
2831 		goto nla_put_failure;
2832 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2833 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2834 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2835 				  sizeof(key->ipv6.src)) ||
2836 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2837 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2838 				  sizeof(key->ipv6.dst))))
2839 		goto nla_put_failure;
2840 
2841 	if (key->basic.ip_proto == IPPROTO_TCP &&
2842 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2843 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2844 			     sizeof(key->tp.src)) ||
2845 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2846 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2847 			     sizeof(key->tp.dst)) ||
2848 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2849 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2850 			     sizeof(key->tcp.flags))))
2851 		goto nla_put_failure;
2852 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2853 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2854 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2855 				  sizeof(key->tp.src)) ||
2856 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2857 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2858 				  sizeof(key->tp.dst))))
2859 		goto nla_put_failure;
2860 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2861 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2862 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2863 				  sizeof(key->tp.src)) ||
2864 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2865 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2866 				  sizeof(key->tp.dst))))
2867 		goto nla_put_failure;
2868 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2869 		 key->basic.ip_proto == IPPROTO_ICMP &&
2870 		 (fl_dump_key_val(skb, &key->icmp.type,
2871 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2872 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2873 				  sizeof(key->icmp.type)) ||
2874 		  fl_dump_key_val(skb, &key->icmp.code,
2875 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2876 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2877 				  sizeof(key->icmp.code))))
2878 		goto nla_put_failure;
2879 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2880 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2881 		 (fl_dump_key_val(skb, &key->icmp.type,
2882 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2883 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2884 				  sizeof(key->icmp.type)) ||
2885 		  fl_dump_key_val(skb, &key->icmp.code,
2886 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2887 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2888 				  sizeof(key->icmp.code))))
2889 		goto nla_put_failure;
2890 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2891 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2892 		 (fl_dump_key_val(skb, &key->arp.sip,
2893 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2894 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2895 				  sizeof(key->arp.sip)) ||
2896 		  fl_dump_key_val(skb, &key->arp.tip,
2897 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2898 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2899 				  sizeof(key->arp.tip)) ||
2900 		  fl_dump_key_val(skb, &key->arp.op,
2901 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2902 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2903 				  sizeof(key->arp.op)) ||
2904 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2905 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2906 				  sizeof(key->arp.sha)) ||
2907 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2908 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2909 				  sizeof(key->arp.tha))))
2910 		goto nla_put_failure;
2911 
2912 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2913 	     key->basic.ip_proto == IPPROTO_UDP ||
2914 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2915 	     fl_dump_key_port_range(skb, key, mask))
2916 		goto nla_put_failure;
2917 
2918 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2919 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2920 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2921 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2922 			    sizeof(key->enc_ipv4.src)) ||
2923 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2924 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2925 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2926 			     sizeof(key->enc_ipv4.dst))))
2927 		goto nla_put_failure;
2928 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2929 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2930 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2931 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2932 			    sizeof(key->enc_ipv6.src)) ||
2933 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2934 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2935 				 &mask->enc_ipv6.dst,
2936 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2937 			    sizeof(key->enc_ipv6.dst))))
2938 		goto nla_put_failure;
2939 
2940 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2941 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2942 			    sizeof(key->enc_key_id)) ||
2943 	    fl_dump_key_val(skb, &key->enc_tp.src,
2944 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2945 			    &mask->enc_tp.src,
2946 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2947 			    sizeof(key->enc_tp.src)) ||
2948 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2949 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2950 			    &mask->enc_tp.dst,
2951 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2952 			    sizeof(key->enc_tp.dst)) ||
2953 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2954 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2955 		goto nla_put_failure;
2956 
2957 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2958 		goto nla_put_failure;
2959 
2960 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2961 		goto nla_put_failure;
2962 
2963 	return 0;
2964 
2965 nla_put_failure:
2966 	return -EMSGSIZE;
2967 }
2968 
2969 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2970 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2971 {
2972 	struct cls_fl_filter *f = fh;
2973 	struct nlattr *nest;
2974 	struct fl_flow_key *key, *mask;
2975 	bool skip_hw;
2976 
2977 	if (!f)
2978 		return skb->len;
2979 
2980 	t->tcm_handle = f->handle;
2981 
2982 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2983 	if (!nest)
2984 		goto nla_put_failure;
2985 
2986 	spin_lock(&tp->lock);
2987 
2988 	if (f->res.classid &&
2989 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2990 		goto nla_put_failure_locked;
2991 
2992 	key = &f->key;
2993 	mask = &f->mask->key;
2994 	skip_hw = tc_skip_hw(f->flags);
2995 
2996 	if (fl_dump_key(skb, net, key, mask))
2997 		goto nla_put_failure_locked;
2998 
2999 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3000 		goto nla_put_failure_locked;
3001 
3002 	spin_unlock(&tp->lock);
3003 
3004 	if (!skip_hw)
3005 		fl_hw_update_stats(tp, f, rtnl_held);
3006 
3007 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3008 		goto nla_put_failure;
3009 
3010 	if (tcf_exts_dump(skb, &f->exts))
3011 		goto nla_put_failure;
3012 
3013 	nla_nest_end(skb, nest);
3014 
3015 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3016 		goto nla_put_failure;
3017 
3018 	return skb->len;
3019 
3020 nla_put_failure_locked:
3021 	spin_unlock(&tp->lock);
3022 nla_put_failure:
3023 	nla_nest_cancel(skb, nest);
3024 	return -1;
3025 }
3026 
3027 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3028 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3029 {
3030 	struct cls_fl_filter *f = fh;
3031 	struct nlattr *nest;
3032 	bool skip_hw;
3033 
3034 	if (!f)
3035 		return skb->len;
3036 
3037 	t->tcm_handle = f->handle;
3038 
3039 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3040 	if (!nest)
3041 		goto nla_put_failure;
3042 
3043 	spin_lock(&tp->lock);
3044 
3045 	skip_hw = tc_skip_hw(f->flags);
3046 
3047 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3048 		goto nla_put_failure_locked;
3049 
3050 	spin_unlock(&tp->lock);
3051 
3052 	if (!skip_hw)
3053 		fl_hw_update_stats(tp, f, rtnl_held);
3054 
3055 	if (tcf_exts_terse_dump(skb, &f->exts))
3056 		goto nla_put_failure;
3057 
3058 	nla_nest_end(skb, nest);
3059 
3060 	return skb->len;
3061 
3062 nla_put_failure_locked:
3063 	spin_unlock(&tp->lock);
3064 nla_put_failure:
3065 	nla_nest_cancel(skb, nest);
3066 	return -1;
3067 }
3068 
3069 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3070 {
3071 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3072 	struct fl_flow_key *key, *mask;
3073 	struct nlattr *nest;
3074 
3075 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3076 	if (!nest)
3077 		goto nla_put_failure;
3078 
3079 	key = &tmplt->dummy_key;
3080 	mask = &tmplt->mask;
3081 
3082 	if (fl_dump_key(skb, net, key, mask))
3083 		goto nla_put_failure;
3084 
3085 	nla_nest_end(skb, nest);
3086 
3087 	return skb->len;
3088 
3089 nla_put_failure:
3090 	nla_nest_cancel(skb, nest);
3091 	return -EMSGSIZE;
3092 }
3093 
3094 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3095 			  unsigned long base)
3096 {
3097 	struct cls_fl_filter *f = fh;
3098 
3099 	if (f && f->res.classid == classid) {
3100 		if (cl)
3101 			__tcf_bind_filter(q, &f->res, base);
3102 		else
3103 			__tcf_unbind_filter(q, &f->res);
3104 	}
3105 }
3106 
3107 static bool fl_delete_empty(struct tcf_proto *tp)
3108 {
3109 	struct cls_fl_head *head = fl_head_dereference(tp);
3110 
3111 	spin_lock(&tp->lock);
3112 	tp->deleting = idr_is_empty(&head->handle_idr);
3113 	spin_unlock(&tp->lock);
3114 
3115 	return tp->deleting;
3116 }
3117 
3118 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3119 	.kind		= "flower",
3120 	.classify	= fl_classify,
3121 	.init		= fl_init,
3122 	.destroy	= fl_destroy,
3123 	.get		= fl_get,
3124 	.put		= fl_put,
3125 	.change		= fl_change,
3126 	.delete		= fl_delete,
3127 	.delete_empty	= fl_delete_empty,
3128 	.walk		= fl_walk,
3129 	.reoffload	= fl_reoffload,
3130 	.hw_add		= fl_hw_add,
3131 	.hw_del		= fl_hw_del,
3132 	.dump		= fl_dump,
3133 	.terse_dump	= fl_terse_dump,
3134 	.bind_class	= fl_bind_class,
3135 	.tmplt_create	= fl_tmplt_create,
3136 	.tmplt_destroy	= fl_tmplt_destroy,
3137 	.tmplt_dump	= fl_tmplt_dump,
3138 	.owner		= THIS_MODULE,
3139 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3140 };
3141 
3142 static int __init cls_fl_init(void)
3143 {
3144 	return register_tcf_proto_ops(&cls_fl_ops);
3145 }
3146 
3147 static void __exit cls_fl_exit(void)
3148 {
3149 	unregister_tcf_proto_ops(&cls_fl_ops);
3150 }
3151 
3152 module_init(cls_fl_init);
3153 module_exit(cls_fl_exit);
3154 
3155 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3156 MODULE_DESCRIPTION("Flower classifier");
3157 MODULE_LICENSE("GPL v2");
3158