xref: /openbmc/linux/net/sched/cls_flower.c (revision 77423a62)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 #include <linux/ppp_defs.h>
20 
21 #include <net/sch_generic.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
24 #include <net/ip.h>
25 #include <net/flow_dissector.h>
26 #include <net/geneve.h>
27 #include <net/vxlan.h>
28 #include <net/erspan.h>
29 #include <net/gtp.h>
30 
31 #include <net/dst.h>
32 #include <net/dst_metadata.h>
33 
34 #include <uapi/linux/netfilter/nf_conntrack_common.h>
35 
36 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
37 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
38 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
39 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
40 
41 struct fl_flow_key {
42 	struct flow_dissector_key_meta meta;
43 	struct flow_dissector_key_control control;
44 	struct flow_dissector_key_control enc_control;
45 	struct flow_dissector_key_basic basic;
46 	struct flow_dissector_key_eth_addrs eth;
47 	struct flow_dissector_key_vlan vlan;
48 	struct flow_dissector_key_vlan cvlan;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs ipv4;
51 		struct flow_dissector_key_ipv6_addrs ipv6;
52 	};
53 	struct flow_dissector_key_ports tp;
54 	struct flow_dissector_key_icmp icmp;
55 	struct flow_dissector_key_arp arp;
56 	struct flow_dissector_key_keyid enc_key_id;
57 	union {
58 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
59 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
60 	};
61 	struct flow_dissector_key_ports enc_tp;
62 	struct flow_dissector_key_mpls mpls;
63 	struct flow_dissector_key_tcp tcp;
64 	struct flow_dissector_key_ip ip;
65 	struct flow_dissector_key_ip enc_ip;
66 	struct flow_dissector_key_enc_opts enc_opts;
67 	struct flow_dissector_key_ports_range tp_range;
68 	struct flow_dissector_key_ct ct;
69 	struct flow_dissector_key_hash hash;
70 	struct flow_dissector_key_num_of_vlans num_of_vlans;
71 	struct flow_dissector_key_pppoe pppoe;
72 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
73 
74 struct fl_flow_mask_range {
75 	unsigned short int start;
76 	unsigned short int end;
77 };
78 
79 struct fl_flow_mask {
80 	struct fl_flow_key key;
81 	struct fl_flow_mask_range range;
82 	u32 flags;
83 	struct rhash_head ht_node;
84 	struct rhashtable ht;
85 	struct rhashtable_params filter_ht_params;
86 	struct flow_dissector dissector;
87 	struct list_head filters;
88 	struct rcu_work rwork;
89 	struct list_head list;
90 	refcount_t refcnt;
91 };
92 
93 struct fl_flow_tmplt {
94 	struct fl_flow_key dummy_key;
95 	struct fl_flow_key mask;
96 	struct flow_dissector dissector;
97 	struct tcf_chain *chain;
98 };
99 
100 struct cls_fl_head {
101 	struct rhashtable ht;
102 	spinlock_t masks_lock; /* Protect masks list */
103 	struct list_head masks;
104 	struct list_head hw_filters;
105 	struct rcu_work rwork;
106 	struct idr handle_idr;
107 };
108 
109 struct cls_fl_filter {
110 	struct fl_flow_mask *mask;
111 	struct rhash_head ht_node;
112 	struct fl_flow_key mkey;
113 	struct tcf_exts exts;
114 	struct tcf_result res;
115 	struct fl_flow_key key;
116 	struct list_head list;
117 	struct list_head hw_list;
118 	u32 handle;
119 	u32 flags;
120 	u32 in_hw_count;
121 	struct rcu_work rwork;
122 	struct net_device *hw_dev;
123 	/* Flower classifier is unlocked, which means that its reference counter
124 	 * can be changed concurrently without any kind of external
125 	 * synchronization. Use atomic reference counter to be concurrency-safe.
126 	 */
127 	refcount_t refcnt;
128 	bool deleted;
129 };
130 
131 static const struct rhashtable_params mask_ht_params = {
132 	.key_offset = offsetof(struct fl_flow_mask, key),
133 	.key_len = sizeof(struct fl_flow_key),
134 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
135 	.automatic_shrinking = true,
136 };
137 
138 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
139 {
140 	return mask->range.end - mask->range.start;
141 }
142 
143 static void fl_mask_update_range(struct fl_flow_mask *mask)
144 {
145 	const u8 *bytes = (const u8 *) &mask->key;
146 	size_t size = sizeof(mask->key);
147 	size_t i, first = 0, last;
148 
149 	for (i = 0; i < size; i++) {
150 		if (bytes[i]) {
151 			first = i;
152 			break;
153 		}
154 	}
155 	last = first;
156 	for (i = size - 1; i != first; i--) {
157 		if (bytes[i]) {
158 			last = i;
159 			break;
160 		}
161 	}
162 	mask->range.start = rounddown(first, sizeof(long));
163 	mask->range.end = roundup(last + 1, sizeof(long));
164 }
165 
166 static void *fl_key_get_start(struct fl_flow_key *key,
167 			      const struct fl_flow_mask *mask)
168 {
169 	return (u8 *) key + mask->range.start;
170 }
171 
172 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
173 			      struct fl_flow_mask *mask)
174 {
175 	const long *lkey = fl_key_get_start(key, mask);
176 	const long *lmask = fl_key_get_start(&mask->key, mask);
177 	long *lmkey = fl_key_get_start(mkey, mask);
178 	int i;
179 
180 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
181 		*lmkey++ = *lkey++ & *lmask++;
182 }
183 
184 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
185 			       struct fl_flow_mask *mask)
186 {
187 	const long *lmask = fl_key_get_start(&mask->key, mask);
188 	const long *ltmplt;
189 	int i;
190 
191 	if (!tmplt)
192 		return true;
193 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
194 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
195 		if (~*ltmplt++ & *lmask++)
196 			return false;
197 	}
198 	return true;
199 }
200 
201 static void fl_clear_masked_range(struct fl_flow_key *key,
202 				  struct fl_flow_mask *mask)
203 {
204 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
205 }
206 
207 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
208 				  struct fl_flow_key *key,
209 				  struct fl_flow_key *mkey)
210 {
211 	u16 min_mask, max_mask, min_val, max_val;
212 
213 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
214 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
215 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
216 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
217 
218 	if (min_mask && max_mask) {
219 		if (ntohs(key->tp_range.tp.dst) < min_val ||
220 		    ntohs(key->tp_range.tp.dst) > max_val)
221 			return false;
222 
223 		/* skb does not have min and max values */
224 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
225 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
226 	}
227 	return true;
228 }
229 
230 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
231 				  struct fl_flow_key *key,
232 				  struct fl_flow_key *mkey)
233 {
234 	u16 min_mask, max_mask, min_val, max_val;
235 
236 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
237 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
238 	min_val = ntohs(filter->key.tp_range.tp_min.src);
239 	max_val = ntohs(filter->key.tp_range.tp_max.src);
240 
241 	if (min_mask && max_mask) {
242 		if (ntohs(key->tp_range.tp.src) < min_val ||
243 		    ntohs(key->tp_range.tp.src) > max_val)
244 			return false;
245 
246 		/* skb does not have min and max values */
247 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
248 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
249 	}
250 	return true;
251 }
252 
253 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
254 					 struct fl_flow_key *mkey)
255 {
256 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
257 				      mask->filter_ht_params);
258 }
259 
260 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
261 					     struct fl_flow_key *mkey,
262 					     struct fl_flow_key *key)
263 {
264 	struct cls_fl_filter *filter, *f;
265 
266 	list_for_each_entry_rcu(filter, &mask->filters, list) {
267 		if (!fl_range_port_dst_cmp(filter, key, mkey))
268 			continue;
269 
270 		if (!fl_range_port_src_cmp(filter, key, mkey))
271 			continue;
272 
273 		f = __fl_lookup(mask, mkey);
274 		if (f)
275 			return f;
276 	}
277 	return NULL;
278 }
279 
280 static noinline_for_stack
281 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
282 {
283 	struct fl_flow_key mkey;
284 
285 	fl_set_masked_key(&mkey, key, mask);
286 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
287 		return fl_lookup_range(mask, &mkey, key);
288 
289 	return __fl_lookup(mask, &mkey);
290 }
291 
292 static u16 fl_ct_info_to_flower_map[] = {
293 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
294 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
295 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
296 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
297 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
298 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
299 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
300 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
301 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
302 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
303 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
304 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
305 };
306 
307 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
308 		       struct tcf_result *res)
309 {
310 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
311 	bool post_ct = tc_skb_cb(skb)->post_ct;
312 	u16 zone = tc_skb_cb(skb)->zone;
313 	struct fl_flow_key skb_key;
314 	struct fl_flow_mask *mask;
315 	struct cls_fl_filter *f;
316 
317 	list_for_each_entry_rcu(mask, &head->masks, list) {
318 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
319 		fl_clear_masked_range(&skb_key, mask);
320 
321 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
322 		/* skb_flow_dissect() does not set n_proto in case an unknown
323 		 * protocol, so do it rather here.
324 		 */
325 		skb_key.basic.n_proto = skb_protocol(skb, false);
326 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
327 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
328 				    fl_ct_info_to_flower_map,
329 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
330 				    post_ct, zone);
331 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
332 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
333 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
334 
335 		f = fl_mask_lookup(mask, &skb_key);
336 		if (f && !tc_skip_sw(f->flags)) {
337 			*res = f->res;
338 			return tcf_exts_exec(skb, &f->exts, res);
339 		}
340 	}
341 	return -1;
342 }
343 
344 static int fl_init(struct tcf_proto *tp)
345 {
346 	struct cls_fl_head *head;
347 
348 	head = kzalloc(sizeof(*head), GFP_KERNEL);
349 	if (!head)
350 		return -ENOBUFS;
351 
352 	spin_lock_init(&head->masks_lock);
353 	INIT_LIST_HEAD_RCU(&head->masks);
354 	INIT_LIST_HEAD(&head->hw_filters);
355 	rcu_assign_pointer(tp->root, head);
356 	idr_init(&head->handle_idr);
357 
358 	return rhashtable_init(&head->ht, &mask_ht_params);
359 }
360 
361 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
362 {
363 	/* temporary masks don't have their filters list and ht initialized */
364 	if (mask_init_done) {
365 		WARN_ON(!list_empty(&mask->filters));
366 		rhashtable_destroy(&mask->ht);
367 	}
368 	kfree(mask);
369 }
370 
371 static void fl_mask_free_work(struct work_struct *work)
372 {
373 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
374 						 struct fl_flow_mask, rwork);
375 
376 	fl_mask_free(mask, true);
377 }
378 
379 static void fl_uninit_mask_free_work(struct work_struct *work)
380 {
381 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
382 						 struct fl_flow_mask, rwork);
383 
384 	fl_mask_free(mask, false);
385 }
386 
387 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
388 {
389 	if (!refcount_dec_and_test(&mask->refcnt))
390 		return false;
391 
392 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
393 
394 	spin_lock(&head->masks_lock);
395 	list_del_rcu(&mask->list);
396 	spin_unlock(&head->masks_lock);
397 
398 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
399 
400 	return true;
401 }
402 
403 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
404 {
405 	/* Flower classifier only changes root pointer during init and destroy.
406 	 * Users must obtain reference to tcf_proto instance before calling its
407 	 * API, so tp->root pointer is protected from concurrent call to
408 	 * fl_destroy() by reference counting.
409 	 */
410 	return rcu_dereference_raw(tp->root);
411 }
412 
413 static void __fl_destroy_filter(struct cls_fl_filter *f)
414 {
415 	tcf_exts_destroy(&f->exts);
416 	tcf_exts_put_net(&f->exts);
417 	kfree(f);
418 }
419 
420 static void fl_destroy_filter_work(struct work_struct *work)
421 {
422 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
423 					struct cls_fl_filter, rwork);
424 
425 	__fl_destroy_filter(f);
426 }
427 
428 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
429 				 bool rtnl_held, struct netlink_ext_ack *extack)
430 {
431 	struct tcf_block *block = tp->chain->block;
432 	struct flow_cls_offload cls_flower = {};
433 
434 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
435 	cls_flower.command = FLOW_CLS_DESTROY;
436 	cls_flower.cookie = (unsigned long) f;
437 
438 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
439 			    &f->flags, &f->in_hw_count, rtnl_held);
440 
441 }
442 
443 static int fl_hw_replace_filter(struct tcf_proto *tp,
444 				struct cls_fl_filter *f, bool rtnl_held,
445 				struct netlink_ext_ack *extack)
446 {
447 	struct tcf_block *block = tp->chain->block;
448 	struct flow_cls_offload cls_flower = {};
449 	bool skip_sw = tc_skip_sw(f->flags);
450 	int err = 0;
451 
452 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
453 	if (!cls_flower.rule)
454 		return -ENOMEM;
455 
456 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
457 	cls_flower.command = FLOW_CLS_REPLACE;
458 	cls_flower.cookie = (unsigned long) f;
459 	cls_flower.rule->match.dissector = &f->mask->dissector;
460 	cls_flower.rule->match.mask = &f->mask->key;
461 	cls_flower.rule->match.key = &f->mkey;
462 	cls_flower.classid = f->res.classid;
463 
464 	err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
465 				      cls_flower.common.extack);
466 	if (err) {
467 		kfree(cls_flower.rule);
468 
469 		return skip_sw ? err : 0;
470 	}
471 
472 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
473 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
474 	tc_cleanup_offload_action(&cls_flower.rule->action);
475 	kfree(cls_flower.rule);
476 
477 	if (err) {
478 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
479 		return err;
480 	}
481 
482 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
483 		return -EINVAL;
484 
485 	return 0;
486 }
487 
488 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
489 			       bool rtnl_held)
490 {
491 	struct tcf_block *block = tp->chain->block;
492 	struct flow_cls_offload cls_flower = {};
493 
494 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
495 	cls_flower.command = FLOW_CLS_STATS;
496 	cls_flower.cookie = (unsigned long) f;
497 	cls_flower.classid = f->res.classid;
498 
499 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
500 			 rtnl_held);
501 
502 	tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
503 				 cls_flower.stats.pkts,
504 				 cls_flower.stats.drops,
505 				 cls_flower.stats.lastused,
506 				 cls_flower.stats.used_hw_stats,
507 				 cls_flower.stats.used_hw_stats_valid);
508 }
509 
510 static void __fl_put(struct cls_fl_filter *f)
511 {
512 	if (!refcount_dec_and_test(&f->refcnt))
513 		return;
514 
515 	if (tcf_exts_get_net(&f->exts))
516 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
517 	else
518 		__fl_destroy_filter(f);
519 }
520 
521 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
522 {
523 	struct cls_fl_filter *f;
524 
525 	rcu_read_lock();
526 	f = idr_find(&head->handle_idr, handle);
527 	if (f && !refcount_inc_not_zero(&f->refcnt))
528 		f = NULL;
529 	rcu_read_unlock();
530 
531 	return f;
532 }
533 
534 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
535 		       bool *last, bool rtnl_held,
536 		       struct netlink_ext_ack *extack)
537 {
538 	struct cls_fl_head *head = fl_head_dereference(tp);
539 
540 	*last = false;
541 
542 	spin_lock(&tp->lock);
543 	if (f->deleted) {
544 		spin_unlock(&tp->lock);
545 		return -ENOENT;
546 	}
547 
548 	f->deleted = true;
549 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
550 			       f->mask->filter_ht_params);
551 	idr_remove(&head->handle_idr, f->handle);
552 	list_del_rcu(&f->list);
553 	spin_unlock(&tp->lock);
554 
555 	*last = fl_mask_put(head, f->mask);
556 	if (!tc_skip_hw(f->flags))
557 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
558 	tcf_unbind_filter(tp, &f->res);
559 	__fl_put(f);
560 
561 	return 0;
562 }
563 
564 static void fl_destroy_sleepable(struct work_struct *work)
565 {
566 	struct cls_fl_head *head = container_of(to_rcu_work(work),
567 						struct cls_fl_head,
568 						rwork);
569 
570 	rhashtable_destroy(&head->ht);
571 	kfree(head);
572 	module_put(THIS_MODULE);
573 }
574 
575 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
576 		       struct netlink_ext_ack *extack)
577 {
578 	struct cls_fl_head *head = fl_head_dereference(tp);
579 	struct fl_flow_mask *mask, *next_mask;
580 	struct cls_fl_filter *f, *next;
581 	bool last;
582 
583 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
584 		list_for_each_entry_safe(f, next, &mask->filters, list) {
585 			__fl_delete(tp, f, &last, rtnl_held, extack);
586 			if (last)
587 				break;
588 		}
589 	}
590 	idr_destroy(&head->handle_idr);
591 
592 	__module_get(THIS_MODULE);
593 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
594 }
595 
596 static void fl_put(struct tcf_proto *tp, void *arg)
597 {
598 	struct cls_fl_filter *f = arg;
599 
600 	__fl_put(f);
601 }
602 
603 static void *fl_get(struct tcf_proto *tp, u32 handle)
604 {
605 	struct cls_fl_head *head = fl_head_dereference(tp);
606 
607 	return __fl_get(head, handle);
608 }
609 
610 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
611 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
612 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
613 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
614 					    .len = IFNAMSIZ },
615 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
616 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
617 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
618 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
619 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
620 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
621 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
622 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
624 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
626 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
627 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
628 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
629 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
630 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
635 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
637 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
639 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
640 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
642 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
643 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
644 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
645 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
658 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
659 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
660 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
668 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
669 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
670 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
672 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
674 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
675 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
676 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
677 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
681 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
682 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
683 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
684 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
686 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
689 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
691 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
694 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
696 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
697 	[TCA_FLOWER_KEY_CT_STATE]	=
698 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
699 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
700 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
701 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
702 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
703 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
704 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
705 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
706 					    .len = 128 / BITS_PER_BYTE },
707 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
708 					    .len = 128 / BITS_PER_BYTE },
709 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
710 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
711 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
712 	[TCA_FLOWER_KEY_NUM_OF_VLANS]	= { .type = NLA_U8 },
713 	[TCA_FLOWER_KEY_PPPOE_SID]	= { .type = NLA_U16 },
714 	[TCA_FLOWER_KEY_PPP_PROTO]	= { .type = NLA_U16 },
715 
716 };
717 
718 static const struct nla_policy
719 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
720 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
721 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
722 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
723 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
724 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
725 	[TCA_FLOWER_KEY_ENC_OPTS_GTP]		= { .type = NLA_NESTED },
726 };
727 
728 static const struct nla_policy
729 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
730 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
731 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
732 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
733 						       .len = 128 },
734 };
735 
736 static const struct nla_policy
737 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
738 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
739 };
740 
741 static const struct nla_policy
742 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
743 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
744 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
745 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
746 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
747 };
748 
749 static const struct nla_policy
750 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
751 	[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]	   = { .type = NLA_U8 },
752 	[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]	   = { .type = NLA_U8 },
753 };
754 
755 static const struct nla_policy
756 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
757 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
758 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
759 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
760 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
761 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
762 };
763 
764 static void fl_set_key_val(struct nlattr **tb,
765 			   void *val, int val_type,
766 			   void *mask, int mask_type, int len)
767 {
768 	if (!tb[val_type])
769 		return;
770 	nla_memcpy(val, tb[val_type], len);
771 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
772 		memset(mask, 0xff, len);
773 	else
774 		nla_memcpy(mask, tb[mask_type], len);
775 }
776 
777 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
778 				 struct fl_flow_key *mask,
779 				 struct netlink_ext_ack *extack)
780 {
781 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
782 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
783 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
784 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
785 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
786 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
787 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
788 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
789 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
790 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
791 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
792 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
793 
794 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
795 	    ntohs(key->tp_range.tp_max.dst) <=
796 	    ntohs(key->tp_range.tp_min.dst)) {
797 		NL_SET_ERR_MSG_ATTR(extack,
798 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
799 				    "Invalid destination port range (min must be strictly smaller than max)");
800 		return -EINVAL;
801 	}
802 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
803 	    ntohs(key->tp_range.tp_max.src) <=
804 	    ntohs(key->tp_range.tp_min.src)) {
805 		NL_SET_ERR_MSG_ATTR(extack,
806 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
807 				    "Invalid source port range (min must be strictly smaller than max)");
808 		return -EINVAL;
809 	}
810 
811 	return 0;
812 }
813 
814 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
815 			       struct flow_dissector_key_mpls *key_val,
816 			       struct flow_dissector_key_mpls *key_mask,
817 			       struct netlink_ext_ack *extack)
818 {
819 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
820 	struct flow_dissector_mpls_lse *lse_mask;
821 	struct flow_dissector_mpls_lse *lse_val;
822 	u8 lse_index;
823 	u8 depth;
824 	int err;
825 
826 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
827 			       mpls_stack_entry_policy, extack);
828 	if (err < 0)
829 		return err;
830 
831 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
832 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
833 		return -EINVAL;
834 	}
835 
836 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
837 
838 	/* LSE depth starts at 1, for consistency with terminology used by
839 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
840 	 */
841 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
842 		NL_SET_ERR_MSG_ATTR(extack,
843 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
844 				    "Invalid MPLS depth");
845 		return -EINVAL;
846 	}
847 	lse_index = depth - 1;
848 
849 	dissector_set_mpls_lse(key_val, lse_index);
850 	dissector_set_mpls_lse(key_mask, lse_index);
851 
852 	lse_val = &key_val->ls[lse_index];
853 	lse_mask = &key_mask->ls[lse_index];
854 
855 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
856 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
857 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
858 	}
859 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
860 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
861 
862 		if (bos & ~MPLS_BOS_MASK) {
863 			NL_SET_ERR_MSG_ATTR(extack,
864 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
865 					    "Bottom Of Stack (BOS) must be 0 or 1");
866 			return -EINVAL;
867 		}
868 		lse_val->mpls_bos = bos;
869 		lse_mask->mpls_bos = MPLS_BOS_MASK;
870 	}
871 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
872 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
873 
874 		if (tc & ~MPLS_TC_MASK) {
875 			NL_SET_ERR_MSG_ATTR(extack,
876 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
877 					    "Traffic Class (TC) must be between 0 and 7");
878 			return -EINVAL;
879 		}
880 		lse_val->mpls_tc = tc;
881 		lse_mask->mpls_tc = MPLS_TC_MASK;
882 	}
883 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
884 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
885 
886 		if (label & ~MPLS_LABEL_MASK) {
887 			NL_SET_ERR_MSG_ATTR(extack,
888 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
889 					    "Label must be between 0 and 1048575");
890 			return -EINVAL;
891 		}
892 		lse_val->mpls_label = label;
893 		lse_mask->mpls_label = MPLS_LABEL_MASK;
894 	}
895 
896 	return 0;
897 }
898 
899 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
900 				struct flow_dissector_key_mpls *key_val,
901 				struct flow_dissector_key_mpls *key_mask,
902 				struct netlink_ext_ack *extack)
903 {
904 	struct nlattr *nla_lse;
905 	int rem;
906 	int err;
907 
908 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
909 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
910 				    "NLA_F_NESTED is missing");
911 		return -EINVAL;
912 	}
913 
914 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
915 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
916 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
917 					    "Invalid MPLS option type");
918 			return -EINVAL;
919 		}
920 
921 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
922 		if (err < 0)
923 			return err;
924 	}
925 	if (rem) {
926 		NL_SET_ERR_MSG(extack,
927 			       "Bytes leftover after parsing MPLS options");
928 		return -EINVAL;
929 	}
930 
931 	return 0;
932 }
933 
934 static int fl_set_key_mpls(struct nlattr **tb,
935 			   struct flow_dissector_key_mpls *key_val,
936 			   struct flow_dissector_key_mpls *key_mask,
937 			   struct netlink_ext_ack *extack)
938 {
939 	struct flow_dissector_mpls_lse *lse_mask;
940 	struct flow_dissector_mpls_lse *lse_val;
941 
942 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
943 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
944 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
945 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
946 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
947 			NL_SET_ERR_MSG_ATTR(extack,
948 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
949 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
950 			return -EBADMSG;
951 		}
952 
953 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
954 					    key_val, key_mask, extack);
955 	}
956 
957 	lse_val = &key_val->ls[0];
958 	lse_mask = &key_mask->ls[0];
959 
960 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
961 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
962 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
963 		dissector_set_mpls_lse(key_val, 0);
964 		dissector_set_mpls_lse(key_mask, 0);
965 	}
966 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
967 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
968 
969 		if (bos & ~MPLS_BOS_MASK) {
970 			NL_SET_ERR_MSG_ATTR(extack,
971 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
972 					    "Bottom Of Stack (BOS) must be 0 or 1");
973 			return -EINVAL;
974 		}
975 		lse_val->mpls_bos = bos;
976 		lse_mask->mpls_bos = MPLS_BOS_MASK;
977 		dissector_set_mpls_lse(key_val, 0);
978 		dissector_set_mpls_lse(key_mask, 0);
979 	}
980 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
981 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
982 
983 		if (tc & ~MPLS_TC_MASK) {
984 			NL_SET_ERR_MSG_ATTR(extack,
985 					    tb[TCA_FLOWER_KEY_MPLS_TC],
986 					    "Traffic Class (TC) must be between 0 and 7");
987 			return -EINVAL;
988 		}
989 		lse_val->mpls_tc = tc;
990 		lse_mask->mpls_tc = MPLS_TC_MASK;
991 		dissector_set_mpls_lse(key_val, 0);
992 		dissector_set_mpls_lse(key_mask, 0);
993 	}
994 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
995 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
996 
997 		if (label & ~MPLS_LABEL_MASK) {
998 			NL_SET_ERR_MSG_ATTR(extack,
999 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
1000 					    "Label must be between 0 and 1048575");
1001 			return -EINVAL;
1002 		}
1003 		lse_val->mpls_label = label;
1004 		lse_mask->mpls_label = MPLS_LABEL_MASK;
1005 		dissector_set_mpls_lse(key_val, 0);
1006 		dissector_set_mpls_lse(key_mask, 0);
1007 	}
1008 	return 0;
1009 }
1010 
1011 static void fl_set_key_vlan(struct nlattr **tb,
1012 			    __be16 ethertype,
1013 			    int vlan_id_key, int vlan_prio_key,
1014 			    int vlan_next_eth_type_key,
1015 			    struct flow_dissector_key_vlan *key_val,
1016 			    struct flow_dissector_key_vlan *key_mask)
1017 {
1018 #define VLAN_PRIORITY_MASK	0x7
1019 
1020 	if (tb[vlan_id_key]) {
1021 		key_val->vlan_id =
1022 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1023 		key_mask->vlan_id = VLAN_VID_MASK;
1024 	}
1025 	if (tb[vlan_prio_key]) {
1026 		key_val->vlan_priority =
1027 			nla_get_u8(tb[vlan_prio_key]) &
1028 			VLAN_PRIORITY_MASK;
1029 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1030 	}
1031 	if (ethertype) {
1032 		key_val->vlan_tpid = ethertype;
1033 		key_mask->vlan_tpid = cpu_to_be16(~0);
1034 	}
1035 	if (tb[vlan_next_eth_type_key]) {
1036 		key_val->vlan_eth_type =
1037 			nla_get_be16(tb[vlan_next_eth_type_key]);
1038 		key_mask->vlan_eth_type = cpu_to_be16(~0);
1039 	}
1040 }
1041 
1042 static void fl_set_key_pppoe(struct nlattr **tb,
1043 			     struct flow_dissector_key_pppoe *key_val,
1044 			     struct flow_dissector_key_pppoe *key_mask,
1045 			     struct fl_flow_key *key,
1046 			     struct fl_flow_key *mask)
1047 {
1048 	/* key_val::type must be set to ETH_P_PPP_SES
1049 	 * because ETH_P_PPP_SES was stored in basic.n_proto
1050 	 * which might get overwritten by ppp_proto
1051 	 * or might be set to 0, the role of key_val::type
1052 	 * is simmilar to vlan_key::tpid
1053 	 */
1054 	key_val->type = htons(ETH_P_PPP_SES);
1055 	key_mask->type = cpu_to_be16(~0);
1056 
1057 	if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1058 		key_val->session_id =
1059 			nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1060 		key_mask->session_id = cpu_to_be16(~0);
1061 	}
1062 	if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1063 		key_val->ppp_proto =
1064 			nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1065 		key_mask->ppp_proto = cpu_to_be16(~0);
1066 
1067 		if (key_val->ppp_proto == htons(PPP_IP)) {
1068 			key->basic.n_proto = htons(ETH_P_IP);
1069 			mask->basic.n_proto = cpu_to_be16(~0);
1070 		} else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1071 			key->basic.n_proto = htons(ETH_P_IPV6);
1072 			mask->basic.n_proto = cpu_to_be16(~0);
1073 		} else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1074 			key->basic.n_proto = htons(ETH_P_MPLS_UC);
1075 			mask->basic.n_proto = cpu_to_be16(~0);
1076 		} else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1077 			key->basic.n_proto = htons(ETH_P_MPLS_MC);
1078 			mask->basic.n_proto = cpu_to_be16(~0);
1079 		}
1080 	} else {
1081 		key->basic.n_proto = 0;
1082 		mask->basic.n_proto = cpu_to_be16(0);
1083 	}
1084 }
1085 
1086 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1087 			    u32 *dissector_key, u32 *dissector_mask,
1088 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1089 {
1090 	if (flower_mask & flower_flag_bit) {
1091 		*dissector_mask |= dissector_flag_bit;
1092 		if (flower_key & flower_flag_bit)
1093 			*dissector_key |= dissector_flag_bit;
1094 	}
1095 }
1096 
1097 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1098 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1099 {
1100 	u32 key, mask;
1101 
1102 	/* mask is mandatory for flags */
1103 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1104 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1105 		return -EINVAL;
1106 	}
1107 
1108 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1109 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1110 
1111 	*flags_key  = 0;
1112 	*flags_mask = 0;
1113 
1114 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1115 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1116 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1117 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1118 			FLOW_DIS_FIRST_FRAG);
1119 
1120 	return 0;
1121 }
1122 
1123 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1124 			  struct flow_dissector_key_ip *key,
1125 			  struct flow_dissector_key_ip *mask)
1126 {
1127 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1128 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1129 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1130 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1131 
1132 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1133 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1134 }
1135 
1136 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1137 			     int depth, int option_len,
1138 			     struct netlink_ext_ack *extack)
1139 {
1140 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1141 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1142 	struct geneve_opt *opt;
1143 	int err, data_len = 0;
1144 
1145 	if (option_len > sizeof(struct geneve_opt))
1146 		data_len = option_len - sizeof(struct geneve_opt);
1147 
1148 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1149 	memset(opt, 0xff, option_len);
1150 	opt->length = data_len / 4;
1151 	opt->r1 = 0;
1152 	opt->r2 = 0;
1153 	opt->r3 = 0;
1154 
1155 	/* If no mask has been prodived we assume an exact match. */
1156 	if (!depth)
1157 		return sizeof(struct geneve_opt) + data_len;
1158 
1159 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1160 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1161 		return -EINVAL;
1162 	}
1163 
1164 	err = nla_parse_nested_deprecated(tb,
1165 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1166 					  nla, geneve_opt_policy, extack);
1167 	if (err < 0)
1168 		return err;
1169 
1170 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1171 	 * fields from the key.
1172 	 */
1173 	if (!option_len &&
1174 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1175 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1176 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1177 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1178 		return -EINVAL;
1179 	}
1180 
1181 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1182 	 * for the mask.
1183 	 */
1184 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1185 		int new_len = key->enc_opts.len;
1186 
1187 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1188 		data_len = nla_len(data);
1189 		if (data_len < 4) {
1190 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1191 			return -ERANGE;
1192 		}
1193 		if (data_len % 4) {
1194 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1195 			return -ERANGE;
1196 		}
1197 
1198 		new_len += sizeof(struct geneve_opt) + data_len;
1199 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1200 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1201 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1202 			return -ERANGE;
1203 		}
1204 		opt->length = data_len / 4;
1205 		memcpy(opt->opt_data, nla_data(data), data_len);
1206 	}
1207 
1208 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1209 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1210 		opt->opt_class = nla_get_be16(class);
1211 	}
1212 
1213 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1214 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1215 		opt->type = nla_get_u8(type);
1216 	}
1217 
1218 	return sizeof(struct geneve_opt) + data_len;
1219 }
1220 
1221 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1222 			    int depth, int option_len,
1223 			    struct netlink_ext_ack *extack)
1224 {
1225 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1226 	struct vxlan_metadata *md;
1227 	int err;
1228 
1229 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1230 	memset(md, 0xff, sizeof(*md));
1231 
1232 	if (!depth)
1233 		return sizeof(*md);
1234 
1235 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1236 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1237 		return -EINVAL;
1238 	}
1239 
1240 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1241 			       vxlan_opt_policy, extack);
1242 	if (err < 0)
1243 		return err;
1244 
1245 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1246 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1247 		return -EINVAL;
1248 	}
1249 
1250 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1251 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1252 		md->gbp &= VXLAN_GBP_MASK;
1253 	}
1254 
1255 	return sizeof(*md);
1256 }
1257 
1258 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1259 			     int depth, int option_len,
1260 			     struct netlink_ext_ack *extack)
1261 {
1262 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1263 	struct erspan_metadata *md;
1264 	int err;
1265 
1266 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1267 	memset(md, 0xff, sizeof(*md));
1268 	md->version = 1;
1269 
1270 	if (!depth)
1271 		return sizeof(*md);
1272 
1273 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1274 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1275 		return -EINVAL;
1276 	}
1277 
1278 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1279 			       erspan_opt_policy, extack);
1280 	if (err < 0)
1281 		return err;
1282 
1283 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1284 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1285 		return -EINVAL;
1286 	}
1287 
1288 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1289 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1290 
1291 	if (md->version == 1) {
1292 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1293 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1294 			return -EINVAL;
1295 		}
1296 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1297 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1298 			memset(&md->u, 0x00, sizeof(md->u));
1299 			md->u.index = nla_get_be32(nla);
1300 		}
1301 	} else if (md->version == 2) {
1302 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1303 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1304 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1305 			return -EINVAL;
1306 		}
1307 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1308 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1309 			md->u.md2.dir = nla_get_u8(nla);
1310 		}
1311 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1312 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1313 			set_hwid(&md->u.md2, nla_get_u8(nla));
1314 		}
1315 	} else {
1316 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1317 		return -EINVAL;
1318 	}
1319 
1320 	return sizeof(*md);
1321 }
1322 
1323 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1324 			  int depth, int option_len,
1325 			  struct netlink_ext_ack *extack)
1326 {
1327 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1328 	struct gtp_pdu_session_info *sinfo;
1329 	u8 len = key->enc_opts.len;
1330 	int err;
1331 
1332 	sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1333 	memset(sinfo, 0xff, option_len);
1334 
1335 	if (!depth)
1336 		return sizeof(*sinfo);
1337 
1338 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1339 		NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1340 		return -EINVAL;
1341 	}
1342 
1343 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1344 			       gtp_opt_policy, extack);
1345 	if (err < 0)
1346 		return err;
1347 
1348 	if (!option_len &&
1349 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1350 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1351 		NL_SET_ERR_MSG_MOD(extack,
1352 				   "Missing tunnel key gtp option pdu type or qfi");
1353 		return -EINVAL;
1354 	}
1355 
1356 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1357 		sinfo->pdu_type =
1358 			nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1359 
1360 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1361 		sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1362 
1363 	return sizeof(*sinfo);
1364 }
1365 
1366 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1367 			  struct fl_flow_key *mask,
1368 			  struct netlink_ext_ack *extack)
1369 {
1370 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1371 	int err, option_len, key_depth, msk_depth = 0;
1372 
1373 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1374 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1375 					     enc_opts_policy, extack);
1376 	if (err)
1377 		return err;
1378 
1379 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1380 
1381 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1382 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1383 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1384 						     enc_opts_policy, extack);
1385 		if (err)
1386 			return err;
1387 
1388 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1389 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1390 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1391 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1392 			return -EINVAL;
1393 		}
1394 	}
1395 
1396 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1397 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1398 		switch (nla_type(nla_opt_key)) {
1399 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1400 			if (key->enc_opts.dst_opt_type &&
1401 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1402 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1403 				return -EINVAL;
1404 			}
1405 			option_len = 0;
1406 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1407 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1408 						       key_depth, option_len,
1409 						       extack);
1410 			if (option_len < 0)
1411 				return option_len;
1412 
1413 			key->enc_opts.len += option_len;
1414 			/* At the same time we need to parse through the mask
1415 			 * in order to verify exact and mask attribute lengths.
1416 			 */
1417 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1418 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1419 						       msk_depth, option_len,
1420 						       extack);
1421 			if (option_len < 0)
1422 				return option_len;
1423 
1424 			mask->enc_opts.len += option_len;
1425 			if (key->enc_opts.len != mask->enc_opts.len) {
1426 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1427 				return -EINVAL;
1428 			}
1429 			break;
1430 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1431 			if (key->enc_opts.dst_opt_type) {
1432 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1433 				return -EINVAL;
1434 			}
1435 			option_len = 0;
1436 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1437 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1438 						      key_depth, option_len,
1439 						      extack);
1440 			if (option_len < 0)
1441 				return option_len;
1442 
1443 			key->enc_opts.len += option_len;
1444 			/* At the same time we need to parse through the mask
1445 			 * in order to verify exact and mask attribute lengths.
1446 			 */
1447 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1448 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1449 						      msk_depth, option_len,
1450 						      extack);
1451 			if (option_len < 0)
1452 				return option_len;
1453 
1454 			mask->enc_opts.len += option_len;
1455 			if (key->enc_opts.len != mask->enc_opts.len) {
1456 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1457 				return -EINVAL;
1458 			}
1459 			break;
1460 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1461 			if (key->enc_opts.dst_opt_type) {
1462 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1463 				return -EINVAL;
1464 			}
1465 			option_len = 0;
1466 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1467 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1468 						       key_depth, option_len,
1469 						       extack);
1470 			if (option_len < 0)
1471 				return option_len;
1472 
1473 			key->enc_opts.len += option_len;
1474 			/* At the same time we need to parse through the mask
1475 			 * in order to verify exact and mask attribute lengths.
1476 			 */
1477 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1478 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1479 						       msk_depth, option_len,
1480 						       extack);
1481 			if (option_len < 0)
1482 				return option_len;
1483 
1484 			mask->enc_opts.len += option_len;
1485 			if (key->enc_opts.len != mask->enc_opts.len) {
1486 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1487 				return -EINVAL;
1488 			}
1489 			break;
1490 		case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1491 			if (key->enc_opts.dst_opt_type) {
1492 				NL_SET_ERR_MSG_MOD(extack,
1493 						   "Duplicate type for gtp options");
1494 				return -EINVAL;
1495 			}
1496 			option_len = 0;
1497 			key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1498 			option_len = fl_set_gtp_opt(nla_opt_key, key,
1499 						    key_depth, option_len,
1500 						    extack);
1501 			if (option_len < 0)
1502 				return option_len;
1503 
1504 			key->enc_opts.len += option_len;
1505 			/* At the same time we need to parse through the mask
1506 			 * in order to verify exact and mask attribute lengths.
1507 			 */
1508 			mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1509 			option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1510 						    msk_depth, option_len,
1511 						    extack);
1512 			if (option_len < 0)
1513 				return option_len;
1514 
1515 			mask->enc_opts.len += option_len;
1516 			if (key->enc_opts.len != mask->enc_opts.len) {
1517 				NL_SET_ERR_MSG_MOD(extack,
1518 						   "Key and mask miss aligned");
1519 				return -EINVAL;
1520 			}
1521 			break;
1522 		default:
1523 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1524 			return -EINVAL;
1525 		}
1526 
1527 		if (!msk_depth)
1528 			continue;
1529 
1530 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1531 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1532 			return -EINVAL;
1533 		}
1534 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1535 	}
1536 
1537 	return 0;
1538 }
1539 
1540 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1541 				struct netlink_ext_ack *extack)
1542 {
1543 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1544 		NL_SET_ERR_MSG_ATTR(extack, tb,
1545 				    "no trk, so no other flag can be set");
1546 		return -EINVAL;
1547 	}
1548 
1549 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1550 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1551 		NL_SET_ERR_MSG_ATTR(extack, tb,
1552 				    "new and est are mutually exclusive");
1553 		return -EINVAL;
1554 	}
1555 
1556 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1557 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1558 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1559 		NL_SET_ERR_MSG_ATTR(extack, tb,
1560 				    "when inv is set, only trk may be set");
1561 		return -EINVAL;
1562 	}
1563 
1564 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1565 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1566 		NL_SET_ERR_MSG_ATTR(extack, tb,
1567 				    "new and rpl are mutually exclusive");
1568 		return -EINVAL;
1569 	}
1570 
1571 	return 0;
1572 }
1573 
1574 static int fl_set_key_ct(struct nlattr **tb,
1575 			 struct flow_dissector_key_ct *key,
1576 			 struct flow_dissector_key_ct *mask,
1577 			 struct netlink_ext_ack *extack)
1578 {
1579 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1580 		int err;
1581 
1582 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1583 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1584 			return -EOPNOTSUPP;
1585 		}
1586 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1587 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1588 			       sizeof(key->ct_state));
1589 
1590 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1591 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1592 					   extack);
1593 		if (err)
1594 			return err;
1595 
1596 	}
1597 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1598 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1599 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1600 			return -EOPNOTSUPP;
1601 		}
1602 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1603 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1604 			       sizeof(key->ct_zone));
1605 	}
1606 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1607 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1608 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1609 			return -EOPNOTSUPP;
1610 		}
1611 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1612 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1613 			       sizeof(key->ct_mark));
1614 	}
1615 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1616 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1617 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1618 			return -EOPNOTSUPP;
1619 		}
1620 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1621 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1622 			       sizeof(key->ct_labels));
1623 	}
1624 
1625 	return 0;
1626 }
1627 
1628 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1629 			struct fl_flow_key *key, struct fl_flow_key *mask,
1630 			int vthresh)
1631 {
1632 	const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1633 
1634 	if (!tb) {
1635 		*ethertype = 0;
1636 		return good_num_of_vlans;
1637 	}
1638 
1639 	*ethertype = nla_get_be16(tb);
1640 	if (good_num_of_vlans || eth_type_vlan(*ethertype))
1641 		return true;
1642 
1643 	key->basic.n_proto = *ethertype;
1644 	mask->basic.n_proto = cpu_to_be16(~0);
1645 	return false;
1646 }
1647 
1648 static int fl_set_key(struct net *net, struct nlattr **tb,
1649 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1650 		      struct netlink_ext_ack *extack)
1651 {
1652 	__be16 ethertype;
1653 	int ret = 0;
1654 
1655 	if (tb[TCA_FLOWER_INDEV]) {
1656 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1657 		if (err < 0)
1658 			return err;
1659 		key->meta.ingress_ifindex = err;
1660 		mask->meta.ingress_ifindex = 0xffffffff;
1661 	}
1662 
1663 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1664 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1665 		       sizeof(key->eth.dst));
1666 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1667 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1668 		       sizeof(key->eth.src));
1669 	fl_set_key_val(tb, &key->num_of_vlans,
1670 		       TCA_FLOWER_KEY_NUM_OF_VLANS,
1671 		       &mask->num_of_vlans,
1672 		       TCA_FLOWER_UNSPEC,
1673 		       sizeof(key->num_of_vlans));
1674 
1675 	if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], &ethertype, key, mask, 0)) {
1676 		fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1677 				TCA_FLOWER_KEY_VLAN_PRIO,
1678 				TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1679 				&key->vlan, &mask->vlan);
1680 
1681 		if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1682 				&ethertype, key, mask, 1)) {
1683 			fl_set_key_vlan(tb, ethertype,
1684 					TCA_FLOWER_KEY_CVLAN_ID,
1685 					TCA_FLOWER_KEY_CVLAN_PRIO,
1686 					TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1687 					&key->cvlan, &mask->cvlan);
1688 			fl_set_key_val(tb, &key->basic.n_proto,
1689 				       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1690 				       &mask->basic.n_proto,
1691 				       TCA_FLOWER_UNSPEC,
1692 				       sizeof(key->basic.n_proto));
1693 		}
1694 	}
1695 
1696 	if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1697 		fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1698 
1699 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1700 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1701 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1702 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1703 			       sizeof(key->basic.ip_proto));
1704 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1705 	}
1706 
1707 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1708 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1709 		mask->control.addr_type = ~0;
1710 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1711 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1712 			       sizeof(key->ipv4.src));
1713 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1714 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1715 			       sizeof(key->ipv4.dst));
1716 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1717 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1718 		mask->control.addr_type = ~0;
1719 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1720 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1721 			       sizeof(key->ipv6.src));
1722 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1723 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1724 			       sizeof(key->ipv6.dst));
1725 	}
1726 
1727 	if (key->basic.ip_proto == IPPROTO_TCP) {
1728 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1729 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1730 			       sizeof(key->tp.src));
1731 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1732 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1733 			       sizeof(key->tp.dst));
1734 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1735 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1736 			       sizeof(key->tcp.flags));
1737 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1738 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1739 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1740 			       sizeof(key->tp.src));
1741 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1742 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1743 			       sizeof(key->tp.dst));
1744 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1745 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1746 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1747 			       sizeof(key->tp.src));
1748 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1749 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1750 			       sizeof(key->tp.dst));
1751 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1752 		   key->basic.ip_proto == IPPROTO_ICMP) {
1753 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1754 			       &mask->icmp.type,
1755 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1756 			       sizeof(key->icmp.type));
1757 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1758 			       &mask->icmp.code,
1759 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1760 			       sizeof(key->icmp.code));
1761 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1762 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1763 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1764 			       &mask->icmp.type,
1765 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1766 			       sizeof(key->icmp.type));
1767 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1768 			       &mask->icmp.code,
1769 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1770 			       sizeof(key->icmp.code));
1771 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1772 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1773 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1774 		if (ret)
1775 			return ret;
1776 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1777 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1778 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1779 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1780 			       sizeof(key->arp.sip));
1781 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1782 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1783 			       sizeof(key->arp.tip));
1784 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1785 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1786 			       sizeof(key->arp.op));
1787 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1788 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1789 			       sizeof(key->arp.sha));
1790 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1791 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1792 			       sizeof(key->arp.tha));
1793 	}
1794 
1795 	if (key->basic.ip_proto == IPPROTO_TCP ||
1796 	    key->basic.ip_proto == IPPROTO_UDP ||
1797 	    key->basic.ip_proto == IPPROTO_SCTP) {
1798 		ret = fl_set_key_port_range(tb, key, mask, extack);
1799 		if (ret)
1800 			return ret;
1801 	}
1802 
1803 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1804 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1805 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1806 		mask->enc_control.addr_type = ~0;
1807 		fl_set_key_val(tb, &key->enc_ipv4.src,
1808 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1809 			       &mask->enc_ipv4.src,
1810 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1811 			       sizeof(key->enc_ipv4.src));
1812 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1813 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1814 			       &mask->enc_ipv4.dst,
1815 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1816 			       sizeof(key->enc_ipv4.dst));
1817 	}
1818 
1819 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1820 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1821 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1822 		mask->enc_control.addr_type = ~0;
1823 		fl_set_key_val(tb, &key->enc_ipv6.src,
1824 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1825 			       &mask->enc_ipv6.src,
1826 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1827 			       sizeof(key->enc_ipv6.src));
1828 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1829 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1830 			       &mask->enc_ipv6.dst,
1831 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1832 			       sizeof(key->enc_ipv6.dst));
1833 	}
1834 
1835 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1836 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1837 		       sizeof(key->enc_key_id.keyid));
1838 
1839 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1840 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1841 		       sizeof(key->enc_tp.src));
1842 
1843 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1844 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1845 		       sizeof(key->enc_tp.dst));
1846 
1847 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1848 
1849 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1850 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1851 		       sizeof(key->hash.hash));
1852 
1853 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1854 		ret = fl_set_enc_opt(tb, key, mask, extack);
1855 		if (ret)
1856 			return ret;
1857 	}
1858 
1859 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1860 	if (ret)
1861 		return ret;
1862 
1863 	if (tb[TCA_FLOWER_KEY_FLAGS])
1864 		ret = fl_set_key_flags(tb, &key->control.flags,
1865 				       &mask->control.flags, extack);
1866 
1867 	return ret;
1868 }
1869 
1870 static void fl_mask_copy(struct fl_flow_mask *dst,
1871 			 struct fl_flow_mask *src)
1872 {
1873 	const void *psrc = fl_key_get_start(&src->key, src);
1874 	void *pdst = fl_key_get_start(&dst->key, src);
1875 
1876 	memcpy(pdst, psrc, fl_mask_range(src));
1877 	dst->range = src->range;
1878 }
1879 
1880 static const struct rhashtable_params fl_ht_params = {
1881 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1882 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1883 	.automatic_shrinking = true,
1884 };
1885 
1886 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1887 {
1888 	mask->filter_ht_params = fl_ht_params;
1889 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1890 	mask->filter_ht_params.key_offset += mask->range.start;
1891 
1892 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1893 }
1894 
1895 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1896 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1897 
1898 #define FL_KEY_IS_MASKED(mask, member)						\
1899 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1900 		   0, FL_KEY_MEMBER_SIZE(member))				\
1901 
1902 #define FL_KEY_SET(keys, cnt, id, member)					\
1903 	do {									\
1904 		keys[cnt].key_id = id;						\
1905 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1906 		cnt++;								\
1907 	} while(0);
1908 
1909 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1910 	do {									\
1911 		if (FL_KEY_IS_MASKED(mask, member))				\
1912 			FL_KEY_SET(keys, cnt, id, member);			\
1913 	} while(0);
1914 
1915 static void fl_init_dissector(struct flow_dissector *dissector,
1916 			      struct fl_flow_key *mask)
1917 {
1918 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1919 	size_t cnt = 0;
1920 
1921 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1922 			     FLOW_DISSECTOR_KEY_META, meta);
1923 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1924 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1925 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1926 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1927 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1928 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1929 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1930 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1931 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1932 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1933 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1934 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1935 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1936 			     FLOW_DISSECTOR_KEY_IP, ip);
1937 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1938 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1939 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1940 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1941 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1942 			     FLOW_DISSECTOR_KEY_ARP, arp);
1943 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1944 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1945 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1946 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1947 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1948 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1949 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1950 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1951 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1952 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1953 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1954 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1955 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1956 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1957 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1958 			   enc_control);
1959 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1960 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1961 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1962 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1963 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1964 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1965 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1966 			     FLOW_DISSECTOR_KEY_CT, ct);
1967 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1968 			     FLOW_DISSECTOR_KEY_HASH, hash);
1969 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1970 			     FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
1971 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1972 			     FLOW_DISSECTOR_KEY_PPPOE, pppoe);
1973 
1974 	skb_flow_dissector_init(dissector, keys, cnt);
1975 }
1976 
1977 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1978 					       struct fl_flow_mask *mask)
1979 {
1980 	struct fl_flow_mask *newmask;
1981 	int err;
1982 
1983 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1984 	if (!newmask)
1985 		return ERR_PTR(-ENOMEM);
1986 
1987 	fl_mask_copy(newmask, mask);
1988 
1989 	if ((newmask->key.tp_range.tp_min.dst &&
1990 	     newmask->key.tp_range.tp_max.dst) ||
1991 	    (newmask->key.tp_range.tp_min.src &&
1992 	     newmask->key.tp_range.tp_max.src))
1993 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1994 
1995 	err = fl_init_mask_hashtable(newmask);
1996 	if (err)
1997 		goto errout_free;
1998 
1999 	fl_init_dissector(&newmask->dissector, &newmask->key);
2000 
2001 	INIT_LIST_HEAD_RCU(&newmask->filters);
2002 
2003 	refcount_set(&newmask->refcnt, 1);
2004 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2005 				      &newmask->ht_node, mask_ht_params);
2006 	if (err)
2007 		goto errout_destroy;
2008 
2009 	spin_lock(&head->masks_lock);
2010 	list_add_tail_rcu(&newmask->list, &head->masks);
2011 	spin_unlock(&head->masks_lock);
2012 
2013 	return newmask;
2014 
2015 errout_destroy:
2016 	rhashtable_destroy(&newmask->ht);
2017 errout_free:
2018 	kfree(newmask);
2019 
2020 	return ERR_PTR(err);
2021 }
2022 
2023 static int fl_check_assign_mask(struct cls_fl_head *head,
2024 				struct cls_fl_filter *fnew,
2025 				struct cls_fl_filter *fold,
2026 				struct fl_flow_mask *mask)
2027 {
2028 	struct fl_flow_mask *newmask;
2029 	int ret = 0;
2030 
2031 	rcu_read_lock();
2032 
2033 	/* Insert mask as temporary node to prevent concurrent creation of mask
2034 	 * with same key. Any concurrent lookups with same key will return
2035 	 * -EAGAIN because mask's refcnt is zero.
2036 	 */
2037 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2038 						       &mask->ht_node,
2039 						       mask_ht_params);
2040 	if (!fnew->mask) {
2041 		rcu_read_unlock();
2042 
2043 		if (fold) {
2044 			ret = -EINVAL;
2045 			goto errout_cleanup;
2046 		}
2047 
2048 		newmask = fl_create_new_mask(head, mask);
2049 		if (IS_ERR(newmask)) {
2050 			ret = PTR_ERR(newmask);
2051 			goto errout_cleanup;
2052 		}
2053 
2054 		fnew->mask = newmask;
2055 		return 0;
2056 	} else if (IS_ERR(fnew->mask)) {
2057 		ret = PTR_ERR(fnew->mask);
2058 	} else if (fold && fold->mask != fnew->mask) {
2059 		ret = -EINVAL;
2060 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2061 		/* Mask was deleted concurrently, try again */
2062 		ret = -EAGAIN;
2063 	}
2064 	rcu_read_unlock();
2065 	return ret;
2066 
2067 errout_cleanup:
2068 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
2069 			       mask_ht_params);
2070 	return ret;
2071 }
2072 
2073 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
2074 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
2075 			unsigned long base, struct nlattr **tb,
2076 			struct nlattr *est,
2077 			struct fl_flow_tmplt *tmplt,
2078 			u32 flags, u32 fl_flags,
2079 			struct netlink_ext_ack *extack)
2080 {
2081 	int err;
2082 
2083 	err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
2084 				   fl_flags, extack);
2085 	if (err < 0)
2086 		return err;
2087 
2088 	if (tb[TCA_FLOWER_CLASSID]) {
2089 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2090 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2091 			rtnl_lock();
2092 		tcf_bind_filter(tp, &f->res, base);
2093 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
2094 			rtnl_unlock();
2095 	}
2096 
2097 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
2098 	if (err)
2099 		return err;
2100 
2101 	fl_mask_update_range(mask);
2102 	fl_set_masked_key(&f->mkey, &f->key, mask);
2103 
2104 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
2105 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2106 		return -EINVAL;
2107 	}
2108 
2109 	return 0;
2110 }
2111 
2112 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2113 			       struct cls_fl_filter *fold,
2114 			       bool *in_ht)
2115 {
2116 	struct fl_flow_mask *mask = fnew->mask;
2117 	int err;
2118 
2119 	err = rhashtable_lookup_insert_fast(&mask->ht,
2120 					    &fnew->ht_node,
2121 					    mask->filter_ht_params);
2122 	if (err) {
2123 		*in_ht = false;
2124 		/* It is okay if filter with same key exists when
2125 		 * overwriting.
2126 		 */
2127 		return fold && err == -EEXIST ? 0 : err;
2128 	}
2129 
2130 	*in_ht = true;
2131 	return 0;
2132 }
2133 
2134 static int fl_change(struct net *net, struct sk_buff *in_skb,
2135 		     struct tcf_proto *tp, unsigned long base,
2136 		     u32 handle, struct nlattr **tca,
2137 		     void **arg, u32 flags,
2138 		     struct netlink_ext_ack *extack)
2139 {
2140 	struct cls_fl_head *head = fl_head_dereference(tp);
2141 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2142 	struct cls_fl_filter *fold = *arg;
2143 	struct cls_fl_filter *fnew;
2144 	struct fl_flow_mask *mask;
2145 	struct nlattr **tb;
2146 	bool in_ht;
2147 	int err;
2148 
2149 	if (!tca[TCA_OPTIONS]) {
2150 		err = -EINVAL;
2151 		goto errout_fold;
2152 	}
2153 
2154 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2155 	if (!mask) {
2156 		err = -ENOBUFS;
2157 		goto errout_fold;
2158 	}
2159 
2160 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2161 	if (!tb) {
2162 		err = -ENOBUFS;
2163 		goto errout_mask_alloc;
2164 	}
2165 
2166 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2167 					  tca[TCA_OPTIONS], fl_policy, NULL);
2168 	if (err < 0)
2169 		goto errout_tb;
2170 
2171 	if (fold && handle && fold->handle != handle) {
2172 		err = -EINVAL;
2173 		goto errout_tb;
2174 	}
2175 
2176 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2177 	if (!fnew) {
2178 		err = -ENOBUFS;
2179 		goto errout_tb;
2180 	}
2181 	INIT_LIST_HEAD(&fnew->hw_list);
2182 	refcount_set(&fnew->refcnt, 1);
2183 
2184 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2185 	if (err < 0)
2186 		goto errout;
2187 
2188 	if (tb[TCA_FLOWER_FLAGS]) {
2189 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2190 
2191 		if (!tc_flags_valid(fnew->flags)) {
2192 			err = -EINVAL;
2193 			goto errout;
2194 		}
2195 	}
2196 
2197 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2198 			   tp->chain->tmplt_priv, flags, fnew->flags,
2199 			   extack);
2200 	if (err)
2201 		goto errout;
2202 
2203 	err = fl_check_assign_mask(head, fnew, fold, mask);
2204 	if (err)
2205 		goto errout;
2206 
2207 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2208 	if (err)
2209 		goto errout_mask;
2210 
2211 	if (!tc_skip_hw(fnew->flags)) {
2212 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2213 		if (err)
2214 			goto errout_ht;
2215 	}
2216 
2217 	if (!tc_in_hw(fnew->flags))
2218 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2219 
2220 	spin_lock(&tp->lock);
2221 
2222 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2223 	 * proto again or create new one, if necessary.
2224 	 */
2225 	if (tp->deleting) {
2226 		err = -EAGAIN;
2227 		goto errout_hw;
2228 	}
2229 
2230 	if (fold) {
2231 		/* Fold filter was deleted concurrently. Retry lookup. */
2232 		if (fold->deleted) {
2233 			err = -EAGAIN;
2234 			goto errout_hw;
2235 		}
2236 
2237 		fnew->handle = handle;
2238 
2239 		if (!in_ht) {
2240 			struct rhashtable_params params =
2241 				fnew->mask->filter_ht_params;
2242 
2243 			err = rhashtable_insert_fast(&fnew->mask->ht,
2244 						     &fnew->ht_node,
2245 						     params);
2246 			if (err)
2247 				goto errout_hw;
2248 			in_ht = true;
2249 		}
2250 
2251 		refcount_inc(&fnew->refcnt);
2252 		rhashtable_remove_fast(&fold->mask->ht,
2253 				       &fold->ht_node,
2254 				       fold->mask->filter_ht_params);
2255 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2256 		list_replace_rcu(&fold->list, &fnew->list);
2257 		fold->deleted = true;
2258 
2259 		spin_unlock(&tp->lock);
2260 
2261 		fl_mask_put(head, fold->mask);
2262 		if (!tc_skip_hw(fold->flags))
2263 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2264 		tcf_unbind_filter(tp, &fold->res);
2265 		/* Caller holds reference to fold, so refcnt is always > 0
2266 		 * after this.
2267 		 */
2268 		refcount_dec(&fold->refcnt);
2269 		__fl_put(fold);
2270 	} else {
2271 		if (handle) {
2272 			/* user specifies a handle and it doesn't exist */
2273 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2274 					    handle, GFP_ATOMIC);
2275 
2276 			/* Filter with specified handle was concurrently
2277 			 * inserted after initial check in cls_api. This is not
2278 			 * necessarily an error if NLM_F_EXCL is not set in
2279 			 * message flags. Returning EAGAIN will cause cls_api to
2280 			 * try to update concurrently inserted rule.
2281 			 */
2282 			if (err == -ENOSPC)
2283 				err = -EAGAIN;
2284 		} else {
2285 			handle = 1;
2286 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2287 					    INT_MAX, GFP_ATOMIC);
2288 		}
2289 		if (err)
2290 			goto errout_hw;
2291 
2292 		refcount_inc(&fnew->refcnt);
2293 		fnew->handle = handle;
2294 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2295 		spin_unlock(&tp->lock);
2296 	}
2297 
2298 	*arg = fnew;
2299 
2300 	kfree(tb);
2301 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2302 	return 0;
2303 
2304 errout_ht:
2305 	spin_lock(&tp->lock);
2306 errout_hw:
2307 	fnew->deleted = true;
2308 	spin_unlock(&tp->lock);
2309 	if (!tc_skip_hw(fnew->flags))
2310 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2311 	if (in_ht)
2312 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2313 				       fnew->mask->filter_ht_params);
2314 errout_mask:
2315 	fl_mask_put(head, fnew->mask);
2316 errout:
2317 	__fl_put(fnew);
2318 errout_tb:
2319 	kfree(tb);
2320 errout_mask_alloc:
2321 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2322 errout_fold:
2323 	if (fold)
2324 		__fl_put(fold);
2325 	return err;
2326 }
2327 
2328 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2329 		     bool rtnl_held, struct netlink_ext_ack *extack)
2330 {
2331 	struct cls_fl_head *head = fl_head_dereference(tp);
2332 	struct cls_fl_filter *f = arg;
2333 	bool last_on_mask;
2334 	int err = 0;
2335 
2336 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2337 	*last = list_empty(&head->masks);
2338 	__fl_put(f);
2339 
2340 	return err;
2341 }
2342 
2343 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2344 		    bool rtnl_held)
2345 {
2346 	struct cls_fl_head *head = fl_head_dereference(tp);
2347 	unsigned long id = arg->cookie, tmp;
2348 	struct cls_fl_filter *f;
2349 
2350 	arg->count = arg->skip;
2351 
2352 	rcu_read_lock();
2353 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2354 		/* don't return filters that are being deleted */
2355 		if (!refcount_inc_not_zero(&f->refcnt))
2356 			continue;
2357 		rcu_read_unlock();
2358 
2359 		if (arg->fn(tp, f, arg) < 0) {
2360 			__fl_put(f);
2361 			arg->stop = 1;
2362 			rcu_read_lock();
2363 			break;
2364 		}
2365 		__fl_put(f);
2366 		arg->count++;
2367 		rcu_read_lock();
2368 	}
2369 	rcu_read_unlock();
2370 	arg->cookie = id;
2371 }
2372 
2373 static struct cls_fl_filter *
2374 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2375 {
2376 	struct cls_fl_head *head = fl_head_dereference(tp);
2377 
2378 	spin_lock(&tp->lock);
2379 	if (list_empty(&head->hw_filters)) {
2380 		spin_unlock(&tp->lock);
2381 		return NULL;
2382 	}
2383 
2384 	if (!f)
2385 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2386 			       hw_list);
2387 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2388 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2389 			spin_unlock(&tp->lock);
2390 			return f;
2391 		}
2392 	}
2393 
2394 	spin_unlock(&tp->lock);
2395 	return NULL;
2396 }
2397 
2398 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2399 			void *cb_priv, struct netlink_ext_ack *extack)
2400 {
2401 	struct tcf_block *block = tp->chain->block;
2402 	struct flow_cls_offload cls_flower = {};
2403 	struct cls_fl_filter *f = NULL;
2404 	int err;
2405 
2406 	/* hw_filters list can only be changed by hw offload functions after
2407 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2408 	 * iterating it.
2409 	 */
2410 	ASSERT_RTNL();
2411 
2412 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2413 		cls_flower.rule =
2414 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2415 		if (!cls_flower.rule) {
2416 			__fl_put(f);
2417 			return -ENOMEM;
2418 		}
2419 
2420 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2421 					   extack);
2422 		cls_flower.command = add ?
2423 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2424 		cls_flower.cookie = (unsigned long)f;
2425 		cls_flower.rule->match.dissector = &f->mask->dissector;
2426 		cls_flower.rule->match.mask = &f->mask->key;
2427 		cls_flower.rule->match.key = &f->mkey;
2428 
2429 		err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2430 					      cls_flower.common.extack);
2431 		if (err) {
2432 			kfree(cls_flower.rule);
2433 			if (tc_skip_sw(f->flags)) {
2434 				__fl_put(f);
2435 				return err;
2436 			}
2437 			goto next_flow;
2438 		}
2439 
2440 		cls_flower.classid = f->res.classid;
2441 
2442 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2443 					    TC_SETUP_CLSFLOWER, &cls_flower,
2444 					    cb_priv, &f->flags,
2445 					    &f->in_hw_count);
2446 		tc_cleanup_offload_action(&cls_flower.rule->action);
2447 		kfree(cls_flower.rule);
2448 
2449 		if (err) {
2450 			__fl_put(f);
2451 			return err;
2452 		}
2453 next_flow:
2454 		__fl_put(f);
2455 	}
2456 
2457 	return 0;
2458 }
2459 
2460 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2461 {
2462 	struct flow_cls_offload *cls_flower = type_data;
2463 	struct cls_fl_filter *f =
2464 		(struct cls_fl_filter *) cls_flower->cookie;
2465 	struct cls_fl_head *head = fl_head_dereference(tp);
2466 
2467 	spin_lock(&tp->lock);
2468 	list_add(&f->hw_list, &head->hw_filters);
2469 	spin_unlock(&tp->lock);
2470 }
2471 
2472 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2473 {
2474 	struct flow_cls_offload *cls_flower = type_data;
2475 	struct cls_fl_filter *f =
2476 		(struct cls_fl_filter *) cls_flower->cookie;
2477 
2478 	spin_lock(&tp->lock);
2479 	if (!list_empty(&f->hw_list))
2480 		list_del_init(&f->hw_list);
2481 	spin_unlock(&tp->lock);
2482 }
2483 
2484 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2485 			      struct fl_flow_tmplt *tmplt)
2486 {
2487 	struct flow_cls_offload cls_flower = {};
2488 	struct tcf_block *block = chain->block;
2489 
2490 	cls_flower.rule = flow_rule_alloc(0);
2491 	if (!cls_flower.rule)
2492 		return -ENOMEM;
2493 
2494 	cls_flower.common.chain_index = chain->index;
2495 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2496 	cls_flower.cookie = (unsigned long) tmplt;
2497 	cls_flower.rule->match.dissector = &tmplt->dissector;
2498 	cls_flower.rule->match.mask = &tmplt->mask;
2499 	cls_flower.rule->match.key = &tmplt->dummy_key;
2500 
2501 	/* We don't care if driver (any of them) fails to handle this
2502 	 * call. It serves just as a hint for it.
2503 	 */
2504 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2505 	kfree(cls_flower.rule);
2506 
2507 	return 0;
2508 }
2509 
2510 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2511 				struct fl_flow_tmplt *tmplt)
2512 {
2513 	struct flow_cls_offload cls_flower = {};
2514 	struct tcf_block *block = chain->block;
2515 
2516 	cls_flower.common.chain_index = chain->index;
2517 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2518 	cls_flower.cookie = (unsigned long) tmplt;
2519 
2520 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2521 }
2522 
2523 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2524 			     struct nlattr **tca,
2525 			     struct netlink_ext_ack *extack)
2526 {
2527 	struct fl_flow_tmplt *tmplt;
2528 	struct nlattr **tb;
2529 	int err;
2530 
2531 	if (!tca[TCA_OPTIONS])
2532 		return ERR_PTR(-EINVAL);
2533 
2534 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2535 	if (!tb)
2536 		return ERR_PTR(-ENOBUFS);
2537 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2538 					  tca[TCA_OPTIONS], fl_policy, NULL);
2539 	if (err)
2540 		goto errout_tb;
2541 
2542 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2543 	if (!tmplt) {
2544 		err = -ENOMEM;
2545 		goto errout_tb;
2546 	}
2547 	tmplt->chain = chain;
2548 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2549 	if (err)
2550 		goto errout_tmplt;
2551 
2552 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2553 
2554 	err = fl_hw_create_tmplt(chain, tmplt);
2555 	if (err)
2556 		goto errout_tmplt;
2557 
2558 	kfree(tb);
2559 	return tmplt;
2560 
2561 errout_tmplt:
2562 	kfree(tmplt);
2563 errout_tb:
2564 	kfree(tb);
2565 	return ERR_PTR(err);
2566 }
2567 
2568 static void fl_tmplt_destroy(void *tmplt_priv)
2569 {
2570 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2571 
2572 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2573 	kfree(tmplt);
2574 }
2575 
2576 static int fl_dump_key_val(struct sk_buff *skb,
2577 			   void *val, int val_type,
2578 			   void *mask, int mask_type, int len)
2579 {
2580 	int err;
2581 
2582 	if (!memchr_inv(mask, 0, len))
2583 		return 0;
2584 	err = nla_put(skb, val_type, len, val);
2585 	if (err)
2586 		return err;
2587 	if (mask_type != TCA_FLOWER_UNSPEC) {
2588 		err = nla_put(skb, mask_type, len, mask);
2589 		if (err)
2590 			return err;
2591 	}
2592 	return 0;
2593 }
2594 
2595 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2596 				  struct fl_flow_key *mask)
2597 {
2598 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2599 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2600 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2601 			    sizeof(key->tp_range.tp_min.dst)) ||
2602 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2603 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2604 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2605 			    sizeof(key->tp_range.tp_max.dst)) ||
2606 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2607 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2608 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2609 			    sizeof(key->tp_range.tp_min.src)) ||
2610 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2611 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2612 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2613 			    sizeof(key->tp_range.tp_max.src)))
2614 		return -1;
2615 
2616 	return 0;
2617 }
2618 
2619 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2620 				    struct flow_dissector_key_mpls *mpls_key,
2621 				    struct flow_dissector_key_mpls *mpls_mask,
2622 				    u8 lse_index)
2623 {
2624 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2625 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2626 	int err;
2627 
2628 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2629 			 lse_index + 1);
2630 	if (err)
2631 		return err;
2632 
2633 	if (lse_mask->mpls_ttl) {
2634 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2635 				 lse_key->mpls_ttl);
2636 		if (err)
2637 			return err;
2638 	}
2639 	if (lse_mask->mpls_bos) {
2640 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2641 				 lse_key->mpls_bos);
2642 		if (err)
2643 			return err;
2644 	}
2645 	if (lse_mask->mpls_tc) {
2646 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2647 				 lse_key->mpls_tc);
2648 		if (err)
2649 			return err;
2650 	}
2651 	if (lse_mask->mpls_label) {
2652 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2653 				  lse_key->mpls_label);
2654 		if (err)
2655 			return err;
2656 	}
2657 
2658 	return 0;
2659 }
2660 
2661 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2662 				 struct flow_dissector_key_mpls *mpls_key,
2663 				 struct flow_dissector_key_mpls *mpls_mask)
2664 {
2665 	struct nlattr *opts;
2666 	struct nlattr *lse;
2667 	u8 lse_index;
2668 	int err;
2669 
2670 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2671 	if (!opts)
2672 		return -EMSGSIZE;
2673 
2674 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2675 		if (!(mpls_mask->used_lses & 1 << lse_index))
2676 			continue;
2677 
2678 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2679 		if (!lse) {
2680 			err = -EMSGSIZE;
2681 			goto err_opts;
2682 		}
2683 
2684 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2685 					       lse_index);
2686 		if (err)
2687 			goto err_opts_lse;
2688 		nla_nest_end(skb, lse);
2689 	}
2690 	nla_nest_end(skb, opts);
2691 
2692 	return 0;
2693 
2694 err_opts_lse:
2695 	nla_nest_cancel(skb, lse);
2696 err_opts:
2697 	nla_nest_cancel(skb, opts);
2698 
2699 	return err;
2700 }
2701 
2702 static int fl_dump_key_mpls(struct sk_buff *skb,
2703 			    struct flow_dissector_key_mpls *mpls_key,
2704 			    struct flow_dissector_key_mpls *mpls_mask)
2705 {
2706 	struct flow_dissector_mpls_lse *lse_mask;
2707 	struct flow_dissector_mpls_lse *lse_key;
2708 	int err;
2709 
2710 	if (!mpls_mask->used_lses)
2711 		return 0;
2712 
2713 	lse_mask = &mpls_mask->ls[0];
2714 	lse_key = &mpls_key->ls[0];
2715 
2716 	/* For backward compatibility, don't use the MPLS nested attributes if
2717 	 * the rule can be expressed using the old attributes.
2718 	 */
2719 	if (mpls_mask->used_lses & ~1 ||
2720 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2721 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2722 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2723 
2724 	if (lse_mask->mpls_ttl) {
2725 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2726 				 lse_key->mpls_ttl);
2727 		if (err)
2728 			return err;
2729 	}
2730 	if (lse_mask->mpls_tc) {
2731 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2732 				 lse_key->mpls_tc);
2733 		if (err)
2734 			return err;
2735 	}
2736 	if (lse_mask->mpls_label) {
2737 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2738 				  lse_key->mpls_label);
2739 		if (err)
2740 			return err;
2741 	}
2742 	if (lse_mask->mpls_bos) {
2743 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2744 				 lse_key->mpls_bos);
2745 		if (err)
2746 			return err;
2747 	}
2748 	return 0;
2749 }
2750 
2751 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2752 			  struct flow_dissector_key_ip *key,
2753 			  struct flow_dissector_key_ip *mask)
2754 {
2755 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2756 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2757 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2758 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2759 
2760 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2761 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2762 		return -1;
2763 
2764 	return 0;
2765 }
2766 
2767 static int fl_dump_key_vlan(struct sk_buff *skb,
2768 			    int vlan_id_key, int vlan_prio_key,
2769 			    struct flow_dissector_key_vlan *vlan_key,
2770 			    struct flow_dissector_key_vlan *vlan_mask)
2771 {
2772 	int err;
2773 
2774 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2775 		return 0;
2776 	if (vlan_mask->vlan_id) {
2777 		err = nla_put_u16(skb, vlan_id_key,
2778 				  vlan_key->vlan_id);
2779 		if (err)
2780 			return err;
2781 	}
2782 	if (vlan_mask->vlan_priority) {
2783 		err = nla_put_u8(skb, vlan_prio_key,
2784 				 vlan_key->vlan_priority);
2785 		if (err)
2786 			return err;
2787 	}
2788 	return 0;
2789 }
2790 
2791 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2792 			    u32 *flower_key, u32 *flower_mask,
2793 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2794 {
2795 	if (dissector_mask & dissector_flag_bit) {
2796 		*flower_mask |= flower_flag_bit;
2797 		if (dissector_key & dissector_flag_bit)
2798 			*flower_key |= flower_flag_bit;
2799 	}
2800 }
2801 
2802 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2803 {
2804 	u32 key, mask;
2805 	__be32 _key, _mask;
2806 	int err;
2807 
2808 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2809 		return 0;
2810 
2811 	key = 0;
2812 	mask = 0;
2813 
2814 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2815 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2816 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2817 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2818 			FLOW_DIS_FIRST_FRAG);
2819 
2820 	_key = cpu_to_be32(key);
2821 	_mask = cpu_to_be32(mask);
2822 
2823 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2824 	if (err)
2825 		return err;
2826 
2827 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2828 }
2829 
2830 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2831 				  struct flow_dissector_key_enc_opts *enc_opts)
2832 {
2833 	struct geneve_opt *opt;
2834 	struct nlattr *nest;
2835 	int opt_off = 0;
2836 
2837 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2838 	if (!nest)
2839 		goto nla_put_failure;
2840 
2841 	while (enc_opts->len > opt_off) {
2842 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2843 
2844 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2845 				 opt->opt_class))
2846 			goto nla_put_failure;
2847 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2848 			       opt->type))
2849 			goto nla_put_failure;
2850 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2851 			    opt->length * 4, opt->opt_data))
2852 			goto nla_put_failure;
2853 
2854 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2855 	}
2856 	nla_nest_end(skb, nest);
2857 	return 0;
2858 
2859 nla_put_failure:
2860 	nla_nest_cancel(skb, nest);
2861 	return -EMSGSIZE;
2862 }
2863 
2864 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2865 				 struct flow_dissector_key_enc_opts *enc_opts)
2866 {
2867 	struct vxlan_metadata *md;
2868 	struct nlattr *nest;
2869 
2870 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2871 	if (!nest)
2872 		goto nla_put_failure;
2873 
2874 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2875 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2876 		goto nla_put_failure;
2877 
2878 	nla_nest_end(skb, nest);
2879 	return 0;
2880 
2881 nla_put_failure:
2882 	nla_nest_cancel(skb, nest);
2883 	return -EMSGSIZE;
2884 }
2885 
2886 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2887 				  struct flow_dissector_key_enc_opts *enc_opts)
2888 {
2889 	struct erspan_metadata *md;
2890 	struct nlattr *nest;
2891 
2892 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2893 	if (!nest)
2894 		goto nla_put_failure;
2895 
2896 	md = (struct erspan_metadata *)&enc_opts->data[0];
2897 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2898 		goto nla_put_failure;
2899 
2900 	if (md->version == 1 &&
2901 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2902 		goto nla_put_failure;
2903 
2904 	if (md->version == 2 &&
2905 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2906 			md->u.md2.dir) ||
2907 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2908 			get_hwid(&md->u.md2))))
2909 		goto nla_put_failure;
2910 
2911 	nla_nest_end(skb, nest);
2912 	return 0;
2913 
2914 nla_put_failure:
2915 	nla_nest_cancel(skb, nest);
2916 	return -EMSGSIZE;
2917 }
2918 
2919 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
2920 			       struct flow_dissector_key_enc_opts *enc_opts)
2921 
2922 {
2923 	struct gtp_pdu_session_info *session_info;
2924 	struct nlattr *nest;
2925 
2926 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
2927 	if (!nest)
2928 		goto nla_put_failure;
2929 
2930 	session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
2931 
2932 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
2933 		       session_info->pdu_type))
2934 		goto nla_put_failure;
2935 
2936 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
2937 		goto nla_put_failure;
2938 
2939 	nla_nest_end(skb, nest);
2940 	return 0;
2941 
2942 nla_put_failure:
2943 	nla_nest_cancel(skb, nest);
2944 	return -EMSGSIZE;
2945 }
2946 
2947 static int fl_dump_key_ct(struct sk_buff *skb,
2948 			  struct flow_dissector_key_ct *key,
2949 			  struct flow_dissector_key_ct *mask)
2950 {
2951 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2952 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2953 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2954 			    sizeof(key->ct_state)))
2955 		goto nla_put_failure;
2956 
2957 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2958 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2959 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2960 			    sizeof(key->ct_zone)))
2961 		goto nla_put_failure;
2962 
2963 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2964 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2965 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2966 			    sizeof(key->ct_mark)))
2967 		goto nla_put_failure;
2968 
2969 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2970 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2971 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2972 			    sizeof(key->ct_labels)))
2973 		goto nla_put_failure;
2974 
2975 	return 0;
2976 
2977 nla_put_failure:
2978 	return -EMSGSIZE;
2979 }
2980 
2981 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2982 			       struct flow_dissector_key_enc_opts *enc_opts)
2983 {
2984 	struct nlattr *nest;
2985 	int err;
2986 
2987 	if (!enc_opts->len)
2988 		return 0;
2989 
2990 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2991 	if (!nest)
2992 		goto nla_put_failure;
2993 
2994 	switch (enc_opts->dst_opt_type) {
2995 	case TUNNEL_GENEVE_OPT:
2996 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2997 		if (err)
2998 			goto nla_put_failure;
2999 		break;
3000 	case TUNNEL_VXLAN_OPT:
3001 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
3002 		if (err)
3003 			goto nla_put_failure;
3004 		break;
3005 	case TUNNEL_ERSPAN_OPT:
3006 		err = fl_dump_key_erspan_opt(skb, enc_opts);
3007 		if (err)
3008 			goto nla_put_failure;
3009 		break;
3010 	case TUNNEL_GTP_OPT:
3011 		err = fl_dump_key_gtp_opt(skb, enc_opts);
3012 		if (err)
3013 			goto nla_put_failure;
3014 		break;
3015 	default:
3016 		goto nla_put_failure;
3017 	}
3018 	nla_nest_end(skb, nest);
3019 	return 0;
3020 
3021 nla_put_failure:
3022 	nla_nest_cancel(skb, nest);
3023 	return -EMSGSIZE;
3024 }
3025 
3026 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3027 			       struct flow_dissector_key_enc_opts *key_opts,
3028 			       struct flow_dissector_key_enc_opts *msk_opts)
3029 {
3030 	int err;
3031 
3032 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3033 	if (err)
3034 		return err;
3035 
3036 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3037 }
3038 
3039 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3040 		       struct fl_flow_key *key, struct fl_flow_key *mask)
3041 {
3042 	if (mask->meta.ingress_ifindex) {
3043 		struct net_device *dev;
3044 
3045 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3046 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3047 			goto nla_put_failure;
3048 	}
3049 
3050 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3051 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3052 			    sizeof(key->eth.dst)) ||
3053 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3054 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3055 			    sizeof(key->eth.src)) ||
3056 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3057 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3058 			    sizeof(key->basic.n_proto)))
3059 		goto nla_put_failure;
3060 
3061 	if (mask->num_of_vlans.num_of_vlans) {
3062 		if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3063 			goto nla_put_failure;
3064 	}
3065 
3066 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3067 		goto nla_put_failure;
3068 
3069 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3070 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3071 		goto nla_put_failure;
3072 
3073 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3074 			     TCA_FLOWER_KEY_CVLAN_PRIO,
3075 			     &key->cvlan, &mask->cvlan) ||
3076 	    (mask->cvlan.vlan_tpid &&
3077 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3078 			  key->cvlan.vlan_tpid)))
3079 		goto nla_put_failure;
3080 
3081 	if (mask->basic.n_proto) {
3082 		if (mask->cvlan.vlan_eth_type) {
3083 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3084 					 key->basic.n_proto))
3085 				goto nla_put_failure;
3086 		} else if (mask->vlan.vlan_eth_type) {
3087 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3088 					 key->vlan.vlan_eth_type))
3089 				goto nla_put_failure;
3090 		}
3091 	}
3092 
3093 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
3094 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
3095 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3096 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3097 			    sizeof(key->basic.ip_proto)) ||
3098 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3099 		goto nla_put_failure;
3100 
3101 	if (mask->pppoe.session_id) {
3102 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3103 				 key->pppoe.session_id))
3104 			goto nla_put_failure;
3105 	}
3106 	if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3107 		if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3108 				 key->pppoe.ppp_proto))
3109 			goto nla_put_failure;
3110 	}
3111 
3112 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3113 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3114 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3115 			     sizeof(key->ipv4.src)) ||
3116 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3117 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3118 			     sizeof(key->ipv4.dst))))
3119 		goto nla_put_failure;
3120 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3121 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3122 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3123 				  sizeof(key->ipv6.src)) ||
3124 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3125 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3126 				  sizeof(key->ipv6.dst))))
3127 		goto nla_put_failure;
3128 
3129 	if (key->basic.ip_proto == IPPROTO_TCP &&
3130 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3131 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3132 			     sizeof(key->tp.src)) ||
3133 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3134 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3135 			     sizeof(key->tp.dst)) ||
3136 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3137 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3138 			     sizeof(key->tcp.flags))))
3139 		goto nla_put_failure;
3140 	else if (key->basic.ip_proto == IPPROTO_UDP &&
3141 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3142 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3143 				  sizeof(key->tp.src)) ||
3144 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3145 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3146 				  sizeof(key->tp.dst))))
3147 		goto nla_put_failure;
3148 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
3149 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3150 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3151 				  sizeof(key->tp.src)) ||
3152 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3153 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3154 				  sizeof(key->tp.dst))))
3155 		goto nla_put_failure;
3156 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
3157 		 key->basic.ip_proto == IPPROTO_ICMP &&
3158 		 (fl_dump_key_val(skb, &key->icmp.type,
3159 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3160 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3161 				  sizeof(key->icmp.type)) ||
3162 		  fl_dump_key_val(skb, &key->icmp.code,
3163 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3164 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3165 				  sizeof(key->icmp.code))))
3166 		goto nla_put_failure;
3167 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3168 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3169 		 (fl_dump_key_val(skb, &key->icmp.type,
3170 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3171 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3172 				  sizeof(key->icmp.type)) ||
3173 		  fl_dump_key_val(skb, &key->icmp.code,
3174 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3175 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3176 				  sizeof(key->icmp.code))))
3177 		goto nla_put_failure;
3178 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3179 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
3180 		 (fl_dump_key_val(skb, &key->arp.sip,
3181 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3182 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
3183 				  sizeof(key->arp.sip)) ||
3184 		  fl_dump_key_val(skb, &key->arp.tip,
3185 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3186 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
3187 				  sizeof(key->arp.tip)) ||
3188 		  fl_dump_key_val(skb, &key->arp.op,
3189 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3190 				  TCA_FLOWER_KEY_ARP_OP_MASK,
3191 				  sizeof(key->arp.op)) ||
3192 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3193 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3194 				  sizeof(key->arp.sha)) ||
3195 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3196 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3197 				  sizeof(key->arp.tha))))
3198 		goto nla_put_failure;
3199 
3200 	if ((key->basic.ip_proto == IPPROTO_TCP ||
3201 	     key->basic.ip_proto == IPPROTO_UDP ||
3202 	     key->basic.ip_proto == IPPROTO_SCTP) &&
3203 	     fl_dump_key_port_range(skb, key, mask))
3204 		goto nla_put_failure;
3205 
3206 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3207 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
3208 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3209 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3210 			    sizeof(key->enc_ipv4.src)) ||
3211 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3212 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3213 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3214 			     sizeof(key->enc_ipv4.dst))))
3215 		goto nla_put_failure;
3216 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3217 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3218 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3219 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3220 			    sizeof(key->enc_ipv6.src)) ||
3221 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3222 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3223 				 &mask->enc_ipv6.dst,
3224 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3225 			    sizeof(key->enc_ipv6.dst))))
3226 		goto nla_put_failure;
3227 
3228 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3229 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3230 			    sizeof(key->enc_key_id)) ||
3231 	    fl_dump_key_val(skb, &key->enc_tp.src,
3232 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3233 			    &mask->enc_tp.src,
3234 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3235 			    sizeof(key->enc_tp.src)) ||
3236 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3237 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3238 			    &mask->enc_tp.dst,
3239 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3240 			    sizeof(key->enc_tp.dst)) ||
3241 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3242 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3243 		goto nla_put_failure;
3244 
3245 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3246 		goto nla_put_failure;
3247 
3248 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3249 		goto nla_put_failure;
3250 
3251 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3252 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3253 			     sizeof(key->hash.hash)))
3254 		goto nla_put_failure;
3255 
3256 	return 0;
3257 
3258 nla_put_failure:
3259 	return -EMSGSIZE;
3260 }
3261 
3262 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3263 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3264 {
3265 	struct cls_fl_filter *f = fh;
3266 	struct nlattr *nest;
3267 	struct fl_flow_key *key, *mask;
3268 	bool skip_hw;
3269 
3270 	if (!f)
3271 		return skb->len;
3272 
3273 	t->tcm_handle = f->handle;
3274 
3275 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3276 	if (!nest)
3277 		goto nla_put_failure;
3278 
3279 	spin_lock(&tp->lock);
3280 
3281 	if (f->res.classid &&
3282 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3283 		goto nla_put_failure_locked;
3284 
3285 	key = &f->key;
3286 	mask = &f->mask->key;
3287 	skip_hw = tc_skip_hw(f->flags);
3288 
3289 	if (fl_dump_key(skb, net, key, mask))
3290 		goto nla_put_failure_locked;
3291 
3292 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3293 		goto nla_put_failure_locked;
3294 
3295 	spin_unlock(&tp->lock);
3296 
3297 	if (!skip_hw)
3298 		fl_hw_update_stats(tp, f, rtnl_held);
3299 
3300 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3301 		goto nla_put_failure;
3302 
3303 	if (tcf_exts_dump(skb, &f->exts))
3304 		goto nla_put_failure;
3305 
3306 	nla_nest_end(skb, nest);
3307 
3308 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3309 		goto nla_put_failure;
3310 
3311 	return skb->len;
3312 
3313 nla_put_failure_locked:
3314 	spin_unlock(&tp->lock);
3315 nla_put_failure:
3316 	nla_nest_cancel(skb, nest);
3317 	return -1;
3318 }
3319 
3320 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3321 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3322 {
3323 	struct cls_fl_filter *f = fh;
3324 	struct nlattr *nest;
3325 	bool skip_hw;
3326 
3327 	if (!f)
3328 		return skb->len;
3329 
3330 	t->tcm_handle = f->handle;
3331 
3332 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3333 	if (!nest)
3334 		goto nla_put_failure;
3335 
3336 	spin_lock(&tp->lock);
3337 
3338 	skip_hw = tc_skip_hw(f->flags);
3339 
3340 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3341 		goto nla_put_failure_locked;
3342 
3343 	spin_unlock(&tp->lock);
3344 
3345 	if (!skip_hw)
3346 		fl_hw_update_stats(tp, f, rtnl_held);
3347 
3348 	if (tcf_exts_terse_dump(skb, &f->exts))
3349 		goto nla_put_failure;
3350 
3351 	nla_nest_end(skb, nest);
3352 
3353 	return skb->len;
3354 
3355 nla_put_failure_locked:
3356 	spin_unlock(&tp->lock);
3357 nla_put_failure:
3358 	nla_nest_cancel(skb, nest);
3359 	return -1;
3360 }
3361 
3362 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3363 {
3364 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3365 	struct fl_flow_key *key, *mask;
3366 	struct nlattr *nest;
3367 
3368 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3369 	if (!nest)
3370 		goto nla_put_failure;
3371 
3372 	key = &tmplt->dummy_key;
3373 	mask = &tmplt->mask;
3374 
3375 	if (fl_dump_key(skb, net, key, mask))
3376 		goto nla_put_failure;
3377 
3378 	nla_nest_end(skb, nest);
3379 
3380 	return skb->len;
3381 
3382 nla_put_failure:
3383 	nla_nest_cancel(skb, nest);
3384 	return -EMSGSIZE;
3385 }
3386 
3387 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3388 			  unsigned long base)
3389 {
3390 	struct cls_fl_filter *f = fh;
3391 
3392 	if (f && f->res.classid == classid) {
3393 		if (cl)
3394 			__tcf_bind_filter(q, &f->res, base);
3395 		else
3396 			__tcf_unbind_filter(q, &f->res);
3397 	}
3398 }
3399 
3400 static bool fl_delete_empty(struct tcf_proto *tp)
3401 {
3402 	struct cls_fl_head *head = fl_head_dereference(tp);
3403 
3404 	spin_lock(&tp->lock);
3405 	tp->deleting = idr_is_empty(&head->handle_idr);
3406 	spin_unlock(&tp->lock);
3407 
3408 	return tp->deleting;
3409 }
3410 
3411 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3412 	.kind		= "flower",
3413 	.classify	= fl_classify,
3414 	.init		= fl_init,
3415 	.destroy	= fl_destroy,
3416 	.get		= fl_get,
3417 	.put		= fl_put,
3418 	.change		= fl_change,
3419 	.delete		= fl_delete,
3420 	.delete_empty	= fl_delete_empty,
3421 	.walk		= fl_walk,
3422 	.reoffload	= fl_reoffload,
3423 	.hw_add		= fl_hw_add,
3424 	.hw_del		= fl_hw_del,
3425 	.dump		= fl_dump,
3426 	.terse_dump	= fl_terse_dump,
3427 	.bind_class	= fl_bind_class,
3428 	.tmplt_create	= fl_tmplt_create,
3429 	.tmplt_destroy	= fl_tmplt_destroy,
3430 	.tmplt_dump	= fl_tmplt_dump,
3431 	.owner		= THIS_MODULE,
3432 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3433 };
3434 
3435 static int __init cls_fl_init(void)
3436 {
3437 	return register_tcf_proto_ops(&cls_fl_ops);
3438 }
3439 
3440 static void __exit cls_fl_exit(void)
3441 {
3442 	unregister_tcf_proto_ops(&cls_fl_ops);
3443 }
3444 
3445 module_init(cls_fl_init);
3446 module_exit(cls_fl_exit);
3447 
3448 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3449 MODULE_DESCRIPTION("Flower classifier");
3450 MODULE_LICENSE("GPL v2");
3451