xref: /openbmc/linux/net/sched/cls_flower.c (revision 76ce0265)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 struct fl_flow_key {
34 	struct flow_dissector_key_meta meta;
35 	struct flow_dissector_key_control control;
36 	struct flow_dissector_key_control enc_control;
37 	struct flow_dissector_key_basic basic;
38 	struct flow_dissector_key_eth_addrs eth;
39 	struct flow_dissector_key_vlan vlan;
40 	struct flow_dissector_key_vlan cvlan;
41 	union {
42 		struct flow_dissector_key_ipv4_addrs ipv4;
43 		struct flow_dissector_key_ipv6_addrs ipv6;
44 	};
45 	struct flow_dissector_key_ports tp;
46 	struct flow_dissector_key_icmp icmp;
47 	struct flow_dissector_key_arp arp;
48 	struct flow_dissector_key_keyid enc_key_id;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 	};
53 	struct flow_dissector_key_ports enc_tp;
54 	struct flow_dissector_key_mpls mpls;
55 	struct flow_dissector_key_tcp tcp;
56 	struct flow_dissector_key_ip ip;
57 	struct flow_dissector_key_ip enc_ip;
58 	struct flow_dissector_key_enc_opts enc_opts;
59 	union {
60 		struct flow_dissector_key_ports tp;
61 		struct {
62 			struct flow_dissector_key_ports tp_min;
63 			struct flow_dissector_key_ports tp_max;
64 		};
65 	} tp_range;
66 	struct flow_dissector_key_ct ct;
67 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
68 
69 struct fl_flow_mask_range {
70 	unsigned short int start;
71 	unsigned short int end;
72 };
73 
74 struct fl_flow_mask {
75 	struct fl_flow_key key;
76 	struct fl_flow_mask_range range;
77 	u32 flags;
78 	struct rhash_head ht_node;
79 	struct rhashtable ht;
80 	struct rhashtable_params filter_ht_params;
81 	struct flow_dissector dissector;
82 	struct list_head filters;
83 	struct rcu_work rwork;
84 	struct list_head list;
85 	refcount_t refcnt;
86 };
87 
88 struct fl_flow_tmplt {
89 	struct fl_flow_key dummy_key;
90 	struct fl_flow_key mask;
91 	struct flow_dissector dissector;
92 	struct tcf_chain *chain;
93 };
94 
95 struct cls_fl_head {
96 	struct rhashtable ht;
97 	spinlock_t masks_lock; /* Protect masks list */
98 	struct list_head masks;
99 	struct list_head hw_filters;
100 	struct rcu_work rwork;
101 	struct idr handle_idr;
102 };
103 
104 struct cls_fl_filter {
105 	struct fl_flow_mask *mask;
106 	struct rhash_head ht_node;
107 	struct fl_flow_key mkey;
108 	struct tcf_exts exts;
109 	struct tcf_result res;
110 	struct fl_flow_key key;
111 	struct list_head list;
112 	struct list_head hw_list;
113 	u32 handle;
114 	u32 flags;
115 	u32 in_hw_count;
116 	struct rcu_work rwork;
117 	struct net_device *hw_dev;
118 	/* Flower classifier is unlocked, which means that its reference counter
119 	 * can be changed concurrently without any kind of external
120 	 * synchronization. Use atomic reference counter to be concurrency-safe.
121 	 */
122 	refcount_t refcnt;
123 	bool deleted;
124 };
125 
126 static const struct rhashtable_params mask_ht_params = {
127 	.key_offset = offsetof(struct fl_flow_mask, key),
128 	.key_len = sizeof(struct fl_flow_key),
129 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
130 	.automatic_shrinking = true,
131 };
132 
133 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
134 {
135 	return mask->range.end - mask->range.start;
136 }
137 
138 static void fl_mask_update_range(struct fl_flow_mask *mask)
139 {
140 	const u8 *bytes = (const u8 *) &mask->key;
141 	size_t size = sizeof(mask->key);
142 	size_t i, first = 0, last;
143 
144 	for (i = 0; i < size; i++) {
145 		if (bytes[i]) {
146 			first = i;
147 			break;
148 		}
149 	}
150 	last = first;
151 	for (i = size - 1; i != first; i--) {
152 		if (bytes[i]) {
153 			last = i;
154 			break;
155 		}
156 	}
157 	mask->range.start = rounddown(first, sizeof(long));
158 	mask->range.end = roundup(last + 1, sizeof(long));
159 }
160 
161 static void *fl_key_get_start(struct fl_flow_key *key,
162 			      const struct fl_flow_mask *mask)
163 {
164 	return (u8 *) key + mask->range.start;
165 }
166 
167 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
168 			      struct fl_flow_mask *mask)
169 {
170 	const long *lkey = fl_key_get_start(key, mask);
171 	const long *lmask = fl_key_get_start(&mask->key, mask);
172 	long *lmkey = fl_key_get_start(mkey, mask);
173 	int i;
174 
175 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
176 		*lmkey++ = *lkey++ & *lmask++;
177 }
178 
179 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
180 			       struct fl_flow_mask *mask)
181 {
182 	const long *lmask = fl_key_get_start(&mask->key, mask);
183 	const long *ltmplt;
184 	int i;
185 
186 	if (!tmplt)
187 		return true;
188 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
189 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
190 		if (~*ltmplt++ & *lmask++)
191 			return false;
192 	}
193 	return true;
194 }
195 
196 static void fl_clear_masked_range(struct fl_flow_key *key,
197 				  struct fl_flow_mask *mask)
198 {
199 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
200 }
201 
202 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
203 				  struct fl_flow_key *key,
204 				  struct fl_flow_key *mkey)
205 {
206 	__be16 min_mask, max_mask, min_val, max_val;
207 
208 	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
209 	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
210 	min_val = htons(filter->key.tp_range.tp_min.dst);
211 	max_val = htons(filter->key.tp_range.tp_max.dst);
212 
213 	if (min_mask && max_mask) {
214 		if (htons(key->tp_range.tp.dst) < min_val ||
215 		    htons(key->tp_range.tp.dst) > max_val)
216 			return false;
217 
218 		/* skb does not have min and max values */
219 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
220 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
221 	}
222 	return true;
223 }
224 
225 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
226 				  struct fl_flow_key *key,
227 				  struct fl_flow_key *mkey)
228 {
229 	__be16 min_mask, max_mask, min_val, max_val;
230 
231 	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
232 	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
233 	min_val = htons(filter->key.tp_range.tp_min.src);
234 	max_val = htons(filter->key.tp_range.tp_max.src);
235 
236 	if (min_mask && max_mask) {
237 		if (htons(key->tp_range.tp.src) < min_val ||
238 		    htons(key->tp_range.tp.src) > max_val)
239 			return false;
240 
241 		/* skb does not have min and max values */
242 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
243 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
244 	}
245 	return true;
246 }
247 
248 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
249 					 struct fl_flow_key *mkey)
250 {
251 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
252 				      mask->filter_ht_params);
253 }
254 
255 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
256 					     struct fl_flow_key *mkey,
257 					     struct fl_flow_key *key)
258 {
259 	struct cls_fl_filter *filter, *f;
260 
261 	list_for_each_entry_rcu(filter, &mask->filters, list) {
262 		if (!fl_range_port_dst_cmp(filter, key, mkey))
263 			continue;
264 
265 		if (!fl_range_port_src_cmp(filter, key, mkey))
266 			continue;
267 
268 		f = __fl_lookup(mask, mkey);
269 		if (f)
270 			return f;
271 	}
272 	return NULL;
273 }
274 
275 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
276 				       struct fl_flow_key *mkey,
277 				       struct fl_flow_key *key)
278 {
279 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
280 		return fl_lookup_range(mask, mkey, key);
281 
282 	return __fl_lookup(mask, mkey);
283 }
284 
285 static u16 fl_ct_info_to_flower_map[] = {
286 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
287 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
288 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
289 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
290 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
291 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
292 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
293 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
294 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
296 };
297 
298 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
299 		       struct tcf_result *res)
300 {
301 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
302 	struct fl_flow_key skb_mkey;
303 	struct fl_flow_key skb_key;
304 	struct fl_flow_mask *mask;
305 	struct cls_fl_filter *f;
306 
307 	list_for_each_entry_rcu(mask, &head->masks, list) {
308 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
309 		fl_clear_masked_range(&skb_key, mask);
310 
311 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
312 		/* skb_flow_dissect() does not set n_proto in case an unknown
313 		 * protocol, so do it rather here.
314 		 */
315 		skb_key.basic.n_proto = skb->protocol;
316 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
317 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
318 				    fl_ct_info_to_flower_map,
319 				    ARRAY_SIZE(fl_ct_info_to_flower_map));
320 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
321 
322 		fl_set_masked_key(&skb_mkey, &skb_key, mask);
323 
324 		f = fl_lookup(mask, &skb_mkey, &skb_key);
325 		if (f && !tc_skip_sw(f->flags)) {
326 			*res = f->res;
327 			return tcf_exts_exec(skb, &f->exts, res);
328 		}
329 	}
330 	return -1;
331 }
332 
333 static int fl_init(struct tcf_proto *tp)
334 {
335 	struct cls_fl_head *head;
336 
337 	head = kzalloc(sizeof(*head), GFP_KERNEL);
338 	if (!head)
339 		return -ENOBUFS;
340 
341 	spin_lock_init(&head->masks_lock);
342 	INIT_LIST_HEAD_RCU(&head->masks);
343 	INIT_LIST_HEAD(&head->hw_filters);
344 	rcu_assign_pointer(tp->root, head);
345 	idr_init(&head->handle_idr);
346 
347 	return rhashtable_init(&head->ht, &mask_ht_params);
348 }
349 
350 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
351 {
352 	/* temporary masks don't have their filters list and ht initialized */
353 	if (mask_init_done) {
354 		WARN_ON(!list_empty(&mask->filters));
355 		rhashtable_destroy(&mask->ht);
356 	}
357 	kfree(mask);
358 }
359 
360 static void fl_mask_free_work(struct work_struct *work)
361 {
362 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
363 						 struct fl_flow_mask, rwork);
364 
365 	fl_mask_free(mask, true);
366 }
367 
368 static void fl_uninit_mask_free_work(struct work_struct *work)
369 {
370 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
371 						 struct fl_flow_mask, rwork);
372 
373 	fl_mask_free(mask, false);
374 }
375 
376 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
377 {
378 	if (!refcount_dec_and_test(&mask->refcnt))
379 		return false;
380 
381 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
382 
383 	spin_lock(&head->masks_lock);
384 	list_del_rcu(&mask->list);
385 	spin_unlock(&head->masks_lock);
386 
387 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
388 
389 	return true;
390 }
391 
392 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
393 {
394 	/* Flower classifier only changes root pointer during init and destroy.
395 	 * Users must obtain reference to tcf_proto instance before calling its
396 	 * API, so tp->root pointer is protected from concurrent call to
397 	 * fl_destroy() by reference counting.
398 	 */
399 	return rcu_dereference_raw(tp->root);
400 }
401 
402 static void __fl_destroy_filter(struct cls_fl_filter *f)
403 {
404 	tcf_exts_destroy(&f->exts);
405 	tcf_exts_put_net(&f->exts);
406 	kfree(f);
407 }
408 
409 static void fl_destroy_filter_work(struct work_struct *work)
410 {
411 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
412 					struct cls_fl_filter, rwork);
413 
414 	__fl_destroy_filter(f);
415 }
416 
417 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
418 				 bool rtnl_held, struct netlink_ext_ack *extack)
419 {
420 	struct tcf_block *block = tp->chain->block;
421 	struct flow_cls_offload cls_flower = {};
422 
423 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
424 	cls_flower.command = FLOW_CLS_DESTROY;
425 	cls_flower.cookie = (unsigned long) f;
426 
427 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
428 			    &f->flags, &f->in_hw_count, rtnl_held);
429 
430 }
431 
432 static int fl_hw_replace_filter(struct tcf_proto *tp,
433 				struct cls_fl_filter *f, bool rtnl_held,
434 				struct netlink_ext_ack *extack)
435 {
436 	struct tcf_block *block = tp->chain->block;
437 	struct flow_cls_offload cls_flower = {};
438 	bool skip_sw = tc_skip_sw(f->flags);
439 	int err = 0;
440 
441 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
442 	if (!cls_flower.rule)
443 		return -ENOMEM;
444 
445 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
446 	cls_flower.command = FLOW_CLS_REPLACE;
447 	cls_flower.cookie = (unsigned long) f;
448 	cls_flower.rule->match.dissector = &f->mask->dissector;
449 	cls_flower.rule->match.mask = &f->mask->key;
450 	cls_flower.rule->match.key = &f->mkey;
451 	cls_flower.classid = f->res.classid;
452 
453 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
454 				   rtnl_held);
455 	if (err) {
456 		kfree(cls_flower.rule);
457 		if (skip_sw) {
458 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
459 			return err;
460 		}
461 		return 0;
462 	}
463 
464 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
465 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
466 	tc_cleanup_flow_action(&cls_flower.rule->action);
467 	kfree(cls_flower.rule);
468 
469 	if (err) {
470 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
471 		return err;
472 	}
473 
474 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
475 		return -EINVAL;
476 
477 	return 0;
478 }
479 
480 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
481 			       bool rtnl_held)
482 {
483 	struct tcf_block *block = tp->chain->block;
484 	struct flow_cls_offload cls_flower = {};
485 
486 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
487 	cls_flower.command = FLOW_CLS_STATS;
488 	cls_flower.cookie = (unsigned long) f;
489 	cls_flower.classid = f->res.classid;
490 
491 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
492 			 rtnl_held);
493 
494 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
495 			      cls_flower.stats.pkts,
496 			      cls_flower.stats.lastused);
497 }
498 
499 static void __fl_put(struct cls_fl_filter *f)
500 {
501 	if (!refcount_dec_and_test(&f->refcnt))
502 		return;
503 
504 	if (tcf_exts_get_net(&f->exts))
505 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
506 	else
507 		__fl_destroy_filter(f);
508 }
509 
510 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
511 {
512 	struct cls_fl_filter *f;
513 
514 	rcu_read_lock();
515 	f = idr_find(&head->handle_idr, handle);
516 	if (f && !refcount_inc_not_zero(&f->refcnt))
517 		f = NULL;
518 	rcu_read_unlock();
519 
520 	return f;
521 }
522 
523 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
524 		       bool *last, bool rtnl_held,
525 		       struct netlink_ext_ack *extack)
526 {
527 	struct cls_fl_head *head = fl_head_dereference(tp);
528 
529 	*last = false;
530 
531 	spin_lock(&tp->lock);
532 	if (f->deleted) {
533 		spin_unlock(&tp->lock);
534 		return -ENOENT;
535 	}
536 
537 	f->deleted = true;
538 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
539 			       f->mask->filter_ht_params);
540 	idr_remove(&head->handle_idr, f->handle);
541 	list_del_rcu(&f->list);
542 	spin_unlock(&tp->lock);
543 
544 	*last = fl_mask_put(head, f->mask);
545 	if (!tc_skip_hw(f->flags))
546 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
547 	tcf_unbind_filter(tp, &f->res);
548 	__fl_put(f);
549 
550 	return 0;
551 }
552 
553 static void fl_destroy_sleepable(struct work_struct *work)
554 {
555 	struct cls_fl_head *head = container_of(to_rcu_work(work),
556 						struct cls_fl_head,
557 						rwork);
558 
559 	rhashtable_destroy(&head->ht);
560 	kfree(head);
561 	module_put(THIS_MODULE);
562 }
563 
564 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
565 		       struct netlink_ext_ack *extack)
566 {
567 	struct cls_fl_head *head = fl_head_dereference(tp);
568 	struct fl_flow_mask *mask, *next_mask;
569 	struct cls_fl_filter *f, *next;
570 	bool last;
571 
572 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
573 		list_for_each_entry_safe(f, next, &mask->filters, list) {
574 			__fl_delete(tp, f, &last, rtnl_held, extack);
575 			if (last)
576 				break;
577 		}
578 	}
579 	idr_destroy(&head->handle_idr);
580 
581 	__module_get(THIS_MODULE);
582 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
583 }
584 
585 static void fl_put(struct tcf_proto *tp, void *arg)
586 {
587 	struct cls_fl_filter *f = arg;
588 
589 	__fl_put(f);
590 }
591 
592 static void *fl_get(struct tcf_proto *tp, u32 handle)
593 {
594 	struct cls_fl_head *head = fl_head_dereference(tp);
595 
596 	return __fl_get(head, handle);
597 }
598 
599 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
600 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
601 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
602 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
603 					    .len = IFNAMSIZ },
604 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
605 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
606 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
607 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
608 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
609 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
610 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
611 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
612 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
613 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
614 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
615 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
616 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
617 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
618 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
619 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
620 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
621 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
622 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
623 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
624 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
625 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
628 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
629 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
630 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
632 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
633 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
634 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
641 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
642 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
647 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
648 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
649 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
650 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
651 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
652 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
653 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
654 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
655 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
656 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
657 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
658 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
659 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
660 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
663 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
664 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
665 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
666 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
670 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
671 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
672 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
677 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
679 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
681 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
684 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
685 	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
687 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
688 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
689 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
690 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
691 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
692 					    .len = 128 / BITS_PER_BYTE },
693 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
694 					    .len = 128 / BITS_PER_BYTE },
695 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
696 };
697 
698 static const struct nla_policy
699 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
700 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
701 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
702 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
703 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
704 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
705 };
706 
707 static const struct nla_policy
708 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
709 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
710 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
711 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
712 						       .len = 128 },
713 };
714 
715 static const struct nla_policy
716 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
717 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
718 };
719 
720 static const struct nla_policy
721 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
722 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
723 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
724 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
725 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
726 };
727 
728 static void fl_set_key_val(struct nlattr **tb,
729 			   void *val, int val_type,
730 			   void *mask, int mask_type, int len)
731 {
732 	if (!tb[val_type])
733 		return;
734 	nla_memcpy(val, tb[val_type], len);
735 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
736 		memset(mask, 0xff, len);
737 	else
738 		nla_memcpy(mask, tb[mask_type], len);
739 }
740 
741 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
742 				 struct fl_flow_key *mask)
743 {
744 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
745 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
746 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
747 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
748 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
749 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
750 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
751 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
752 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
753 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
754 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
755 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
756 
757 	if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
758 	     htons(key->tp_range.tp_max.dst) <=
759 		 htons(key->tp_range.tp_min.dst)) ||
760 	    (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
761 	     htons(key->tp_range.tp_max.src) <=
762 		 htons(key->tp_range.tp_min.src)))
763 		return -EINVAL;
764 
765 	return 0;
766 }
767 
768 static int fl_set_key_mpls(struct nlattr **tb,
769 			   struct flow_dissector_key_mpls *key_val,
770 			   struct flow_dissector_key_mpls *key_mask)
771 {
772 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
773 		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
774 		key_mask->mpls_ttl = MPLS_TTL_MASK;
775 	}
776 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
777 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
778 
779 		if (bos & ~MPLS_BOS_MASK)
780 			return -EINVAL;
781 		key_val->mpls_bos = bos;
782 		key_mask->mpls_bos = MPLS_BOS_MASK;
783 	}
784 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
785 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
786 
787 		if (tc & ~MPLS_TC_MASK)
788 			return -EINVAL;
789 		key_val->mpls_tc = tc;
790 		key_mask->mpls_tc = MPLS_TC_MASK;
791 	}
792 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
793 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
794 
795 		if (label & ~MPLS_LABEL_MASK)
796 			return -EINVAL;
797 		key_val->mpls_label = label;
798 		key_mask->mpls_label = MPLS_LABEL_MASK;
799 	}
800 	return 0;
801 }
802 
803 static void fl_set_key_vlan(struct nlattr **tb,
804 			    __be16 ethertype,
805 			    int vlan_id_key, int vlan_prio_key,
806 			    struct flow_dissector_key_vlan *key_val,
807 			    struct flow_dissector_key_vlan *key_mask)
808 {
809 #define VLAN_PRIORITY_MASK	0x7
810 
811 	if (tb[vlan_id_key]) {
812 		key_val->vlan_id =
813 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
814 		key_mask->vlan_id = VLAN_VID_MASK;
815 	}
816 	if (tb[vlan_prio_key]) {
817 		key_val->vlan_priority =
818 			nla_get_u8(tb[vlan_prio_key]) &
819 			VLAN_PRIORITY_MASK;
820 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
821 	}
822 	key_val->vlan_tpid = ethertype;
823 	key_mask->vlan_tpid = cpu_to_be16(~0);
824 }
825 
826 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
827 			    u32 *dissector_key, u32 *dissector_mask,
828 			    u32 flower_flag_bit, u32 dissector_flag_bit)
829 {
830 	if (flower_mask & flower_flag_bit) {
831 		*dissector_mask |= dissector_flag_bit;
832 		if (flower_key & flower_flag_bit)
833 			*dissector_key |= dissector_flag_bit;
834 	}
835 }
836 
837 static int fl_set_key_flags(struct nlattr **tb,
838 			    u32 *flags_key, u32 *flags_mask)
839 {
840 	u32 key, mask;
841 
842 	/* mask is mandatory for flags */
843 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
844 		return -EINVAL;
845 
846 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
847 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
848 
849 	*flags_key  = 0;
850 	*flags_mask = 0;
851 
852 	fl_set_key_flag(key, mask, flags_key, flags_mask,
853 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
854 	fl_set_key_flag(key, mask, flags_key, flags_mask,
855 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
856 			FLOW_DIS_FIRST_FRAG);
857 
858 	return 0;
859 }
860 
861 static void fl_set_key_ip(struct nlattr **tb, bool encap,
862 			  struct flow_dissector_key_ip *key,
863 			  struct flow_dissector_key_ip *mask)
864 {
865 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
866 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
867 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
868 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
869 
870 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
871 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
872 }
873 
874 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
875 			     int depth, int option_len,
876 			     struct netlink_ext_ack *extack)
877 {
878 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
879 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
880 	struct geneve_opt *opt;
881 	int err, data_len = 0;
882 
883 	if (option_len > sizeof(struct geneve_opt))
884 		data_len = option_len - sizeof(struct geneve_opt);
885 
886 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
887 	memset(opt, 0xff, option_len);
888 	opt->length = data_len / 4;
889 	opt->r1 = 0;
890 	opt->r2 = 0;
891 	opt->r3 = 0;
892 
893 	/* If no mask has been prodived we assume an exact match. */
894 	if (!depth)
895 		return sizeof(struct geneve_opt) + data_len;
896 
897 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
898 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
899 		return -EINVAL;
900 	}
901 
902 	err = nla_parse_nested_deprecated(tb,
903 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
904 					  nla, geneve_opt_policy, extack);
905 	if (err < 0)
906 		return err;
907 
908 	/* We are not allowed to omit any of CLASS, TYPE or DATA
909 	 * fields from the key.
910 	 */
911 	if (!option_len &&
912 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
913 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
914 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
915 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
916 		return -EINVAL;
917 	}
918 
919 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
920 	 * for the mask.
921 	 */
922 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
923 		int new_len = key->enc_opts.len;
924 
925 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
926 		data_len = nla_len(data);
927 		if (data_len < 4) {
928 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
929 			return -ERANGE;
930 		}
931 		if (data_len % 4) {
932 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
933 			return -ERANGE;
934 		}
935 
936 		new_len += sizeof(struct geneve_opt) + data_len;
937 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
938 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
939 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
940 			return -ERANGE;
941 		}
942 		opt->length = data_len / 4;
943 		memcpy(opt->opt_data, nla_data(data), data_len);
944 	}
945 
946 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
947 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
948 		opt->opt_class = nla_get_be16(class);
949 	}
950 
951 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
952 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
953 		opt->type = nla_get_u8(type);
954 	}
955 
956 	return sizeof(struct geneve_opt) + data_len;
957 }
958 
959 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
960 			    int depth, int option_len,
961 			    struct netlink_ext_ack *extack)
962 {
963 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
964 	struct vxlan_metadata *md;
965 	int err;
966 
967 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
968 	memset(md, 0xff, sizeof(*md));
969 
970 	if (!depth)
971 		return sizeof(*md);
972 
973 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
974 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
975 		return -EINVAL;
976 	}
977 
978 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
979 			       vxlan_opt_policy, extack);
980 	if (err < 0)
981 		return err;
982 
983 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
984 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
985 		return -EINVAL;
986 	}
987 
988 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
989 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
990 
991 	return sizeof(*md);
992 }
993 
994 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
995 			     int depth, int option_len,
996 			     struct netlink_ext_ack *extack)
997 {
998 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
999 	struct erspan_metadata *md;
1000 	int err;
1001 
1002 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1003 	memset(md, 0xff, sizeof(*md));
1004 	md->version = 1;
1005 
1006 	if (!depth)
1007 		return sizeof(*md);
1008 
1009 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1010 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1011 		return -EINVAL;
1012 	}
1013 
1014 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1015 			       erspan_opt_policy, extack);
1016 	if (err < 0)
1017 		return err;
1018 
1019 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1020 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1021 		return -EINVAL;
1022 	}
1023 
1024 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1025 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1026 
1027 	if (md->version == 1) {
1028 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1029 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1030 			return -EINVAL;
1031 		}
1032 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1033 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1034 			md->u.index = nla_get_be32(nla);
1035 		}
1036 	} else if (md->version == 2) {
1037 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1038 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1039 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1040 			return -EINVAL;
1041 		}
1042 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1043 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1044 			md->u.md2.dir = nla_get_u8(nla);
1045 		}
1046 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1047 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1048 			set_hwid(&md->u.md2, nla_get_u8(nla));
1049 		}
1050 	} else {
1051 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1052 		return -EINVAL;
1053 	}
1054 
1055 	return sizeof(*md);
1056 }
1057 
1058 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1059 			  struct fl_flow_key *mask,
1060 			  struct netlink_ext_ack *extack)
1061 {
1062 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1063 	int err, option_len, key_depth, msk_depth = 0;
1064 
1065 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1066 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1067 					     enc_opts_policy, extack);
1068 	if (err)
1069 		return err;
1070 
1071 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1072 
1073 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1074 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1075 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1076 						     enc_opts_policy, extack);
1077 		if (err)
1078 			return err;
1079 
1080 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1081 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1082 	}
1083 
1084 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1085 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1086 		switch (nla_type(nla_opt_key)) {
1087 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1088 			if (key->enc_opts.dst_opt_type &&
1089 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1090 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1091 				return -EINVAL;
1092 			}
1093 			option_len = 0;
1094 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1095 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1096 						       key_depth, option_len,
1097 						       extack);
1098 			if (option_len < 0)
1099 				return option_len;
1100 
1101 			key->enc_opts.len += option_len;
1102 			/* At the same time we need to parse through the mask
1103 			 * in order to verify exact and mask attribute lengths.
1104 			 */
1105 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1106 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1107 						       msk_depth, option_len,
1108 						       extack);
1109 			if (option_len < 0)
1110 				return option_len;
1111 
1112 			mask->enc_opts.len += option_len;
1113 			if (key->enc_opts.len != mask->enc_opts.len) {
1114 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1115 				return -EINVAL;
1116 			}
1117 
1118 			if (msk_depth)
1119 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1120 			break;
1121 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1122 			if (key->enc_opts.dst_opt_type) {
1123 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1124 				return -EINVAL;
1125 			}
1126 			option_len = 0;
1127 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1128 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1129 						      key_depth, option_len,
1130 						      extack);
1131 			if (option_len < 0)
1132 				return option_len;
1133 
1134 			key->enc_opts.len += option_len;
1135 			/* At the same time we need to parse through the mask
1136 			 * in order to verify exact and mask attribute lengths.
1137 			 */
1138 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1139 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1140 						      msk_depth, option_len,
1141 						      extack);
1142 			if (option_len < 0)
1143 				return option_len;
1144 
1145 			mask->enc_opts.len += option_len;
1146 			if (key->enc_opts.len != mask->enc_opts.len) {
1147 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1148 				return -EINVAL;
1149 			}
1150 
1151 			if (msk_depth)
1152 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1153 			break;
1154 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1155 			if (key->enc_opts.dst_opt_type) {
1156 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1157 				return -EINVAL;
1158 			}
1159 			option_len = 0;
1160 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1161 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1162 						       key_depth, option_len,
1163 						       extack);
1164 			if (option_len < 0)
1165 				return option_len;
1166 
1167 			key->enc_opts.len += option_len;
1168 			/* At the same time we need to parse through the mask
1169 			 * in order to verify exact and mask attribute lengths.
1170 			 */
1171 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1172 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1173 						       msk_depth, option_len,
1174 						       extack);
1175 			if (option_len < 0)
1176 				return option_len;
1177 
1178 			mask->enc_opts.len += option_len;
1179 			if (key->enc_opts.len != mask->enc_opts.len) {
1180 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1181 				return -EINVAL;
1182 			}
1183 
1184 			if (msk_depth)
1185 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1186 			break;
1187 		default:
1188 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1189 			return -EINVAL;
1190 		}
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 static int fl_set_key_ct(struct nlattr **tb,
1197 			 struct flow_dissector_key_ct *key,
1198 			 struct flow_dissector_key_ct *mask,
1199 			 struct netlink_ext_ack *extack)
1200 {
1201 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1202 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1203 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1204 			return -EOPNOTSUPP;
1205 		}
1206 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1207 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1208 			       sizeof(key->ct_state));
1209 	}
1210 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1211 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1212 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1213 			return -EOPNOTSUPP;
1214 		}
1215 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1216 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1217 			       sizeof(key->ct_zone));
1218 	}
1219 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1220 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1221 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1222 			return -EOPNOTSUPP;
1223 		}
1224 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1225 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1226 			       sizeof(key->ct_mark));
1227 	}
1228 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1229 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1230 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1231 			return -EOPNOTSUPP;
1232 		}
1233 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1234 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1235 			       sizeof(key->ct_labels));
1236 	}
1237 
1238 	return 0;
1239 }
1240 
1241 static int fl_set_key(struct net *net, struct nlattr **tb,
1242 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1243 		      struct netlink_ext_ack *extack)
1244 {
1245 	__be16 ethertype;
1246 	int ret = 0;
1247 
1248 	if (tb[TCA_FLOWER_INDEV]) {
1249 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1250 		if (err < 0)
1251 			return err;
1252 		key->meta.ingress_ifindex = err;
1253 		mask->meta.ingress_ifindex = 0xffffffff;
1254 	}
1255 
1256 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1257 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1258 		       sizeof(key->eth.dst));
1259 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1260 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1261 		       sizeof(key->eth.src));
1262 
1263 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1264 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1265 
1266 		if (eth_type_vlan(ethertype)) {
1267 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1268 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1269 					&mask->vlan);
1270 
1271 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1272 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1273 				if (eth_type_vlan(ethertype)) {
1274 					fl_set_key_vlan(tb, ethertype,
1275 							TCA_FLOWER_KEY_CVLAN_ID,
1276 							TCA_FLOWER_KEY_CVLAN_PRIO,
1277 							&key->cvlan, &mask->cvlan);
1278 					fl_set_key_val(tb, &key->basic.n_proto,
1279 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1280 						       &mask->basic.n_proto,
1281 						       TCA_FLOWER_UNSPEC,
1282 						       sizeof(key->basic.n_proto));
1283 				} else {
1284 					key->basic.n_proto = ethertype;
1285 					mask->basic.n_proto = cpu_to_be16(~0);
1286 				}
1287 			}
1288 		} else {
1289 			key->basic.n_proto = ethertype;
1290 			mask->basic.n_proto = cpu_to_be16(~0);
1291 		}
1292 	}
1293 
1294 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1295 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1296 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1297 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1298 			       sizeof(key->basic.ip_proto));
1299 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1300 	}
1301 
1302 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1303 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1304 		mask->control.addr_type = ~0;
1305 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1306 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1307 			       sizeof(key->ipv4.src));
1308 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1309 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1310 			       sizeof(key->ipv4.dst));
1311 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1312 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1313 		mask->control.addr_type = ~0;
1314 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1315 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1316 			       sizeof(key->ipv6.src));
1317 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1318 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1319 			       sizeof(key->ipv6.dst));
1320 	}
1321 
1322 	if (key->basic.ip_proto == IPPROTO_TCP) {
1323 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1324 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1325 			       sizeof(key->tp.src));
1326 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1327 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1328 			       sizeof(key->tp.dst));
1329 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1330 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1331 			       sizeof(key->tcp.flags));
1332 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1333 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1334 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1335 			       sizeof(key->tp.src));
1336 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1337 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1338 			       sizeof(key->tp.dst));
1339 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1340 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1341 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1342 			       sizeof(key->tp.src));
1343 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1344 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1345 			       sizeof(key->tp.dst));
1346 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1347 		   key->basic.ip_proto == IPPROTO_ICMP) {
1348 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1349 			       &mask->icmp.type,
1350 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1351 			       sizeof(key->icmp.type));
1352 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1353 			       &mask->icmp.code,
1354 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1355 			       sizeof(key->icmp.code));
1356 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1357 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1358 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1359 			       &mask->icmp.type,
1360 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1361 			       sizeof(key->icmp.type));
1362 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1363 			       &mask->icmp.code,
1364 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1365 			       sizeof(key->icmp.code));
1366 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1367 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1368 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1369 		if (ret)
1370 			return ret;
1371 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1372 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1373 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1374 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1375 			       sizeof(key->arp.sip));
1376 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1377 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1378 			       sizeof(key->arp.tip));
1379 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1380 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1381 			       sizeof(key->arp.op));
1382 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1383 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1384 			       sizeof(key->arp.sha));
1385 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1386 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1387 			       sizeof(key->arp.tha));
1388 	}
1389 
1390 	if (key->basic.ip_proto == IPPROTO_TCP ||
1391 	    key->basic.ip_proto == IPPROTO_UDP ||
1392 	    key->basic.ip_proto == IPPROTO_SCTP) {
1393 		ret = fl_set_key_port_range(tb, key, mask);
1394 		if (ret)
1395 			return ret;
1396 	}
1397 
1398 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1399 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1400 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1401 		mask->enc_control.addr_type = ~0;
1402 		fl_set_key_val(tb, &key->enc_ipv4.src,
1403 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1404 			       &mask->enc_ipv4.src,
1405 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1406 			       sizeof(key->enc_ipv4.src));
1407 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1408 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1409 			       &mask->enc_ipv4.dst,
1410 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1411 			       sizeof(key->enc_ipv4.dst));
1412 	}
1413 
1414 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1415 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1416 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1417 		mask->enc_control.addr_type = ~0;
1418 		fl_set_key_val(tb, &key->enc_ipv6.src,
1419 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1420 			       &mask->enc_ipv6.src,
1421 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1422 			       sizeof(key->enc_ipv6.src));
1423 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1424 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1425 			       &mask->enc_ipv6.dst,
1426 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1427 			       sizeof(key->enc_ipv6.dst));
1428 	}
1429 
1430 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1431 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1432 		       sizeof(key->enc_key_id.keyid));
1433 
1434 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1435 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1436 		       sizeof(key->enc_tp.src));
1437 
1438 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1439 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1440 		       sizeof(key->enc_tp.dst));
1441 
1442 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1443 
1444 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1445 		ret = fl_set_enc_opt(tb, key, mask, extack);
1446 		if (ret)
1447 			return ret;
1448 	}
1449 
1450 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1451 	if (ret)
1452 		return ret;
1453 
1454 	if (tb[TCA_FLOWER_KEY_FLAGS])
1455 		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1456 
1457 	return ret;
1458 }
1459 
1460 static void fl_mask_copy(struct fl_flow_mask *dst,
1461 			 struct fl_flow_mask *src)
1462 {
1463 	const void *psrc = fl_key_get_start(&src->key, src);
1464 	void *pdst = fl_key_get_start(&dst->key, src);
1465 
1466 	memcpy(pdst, psrc, fl_mask_range(src));
1467 	dst->range = src->range;
1468 }
1469 
1470 static const struct rhashtable_params fl_ht_params = {
1471 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1472 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1473 	.automatic_shrinking = true,
1474 };
1475 
1476 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1477 {
1478 	mask->filter_ht_params = fl_ht_params;
1479 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1480 	mask->filter_ht_params.key_offset += mask->range.start;
1481 
1482 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1483 }
1484 
1485 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1486 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1487 
1488 #define FL_KEY_IS_MASKED(mask, member)						\
1489 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1490 		   0, FL_KEY_MEMBER_SIZE(member))				\
1491 
1492 #define FL_KEY_SET(keys, cnt, id, member)					\
1493 	do {									\
1494 		keys[cnt].key_id = id;						\
1495 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1496 		cnt++;								\
1497 	} while(0);
1498 
1499 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1500 	do {									\
1501 		if (FL_KEY_IS_MASKED(mask, member))				\
1502 			FL_KEY_SET(keys, cnt, id, member);			\
1503 	} while(0);
1504 
1505 static void fl_init_dissector(struct flow_dissector *dissector,
1506 			      struct fl_flow_key *mask)
1507 {
1508 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1509 	size_t cnt = 0;
1510 
1511 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1512 			     FLOW_DISSECTOR_KEY_META, meta);
1513 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1514 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1515 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1516 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1517 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1518 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1519 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1520 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1521 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1522 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1523 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1524 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1525 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1526 			     FLOW_DISSECTOR_KEY_IP, ip);
1527 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1528 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1529 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1530 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1531 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1532 			     FLOW_DISSECTOR_KEY_ARP, arp);
1533 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1534 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1535 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1536 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1537 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1538 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1539 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1540 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1541 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1542 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1543 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1544 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1545 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1546 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1547 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1548 			   enc_control);
1549 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1550 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1551 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1552 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1553 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1554 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1555 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1556 			     FLOW_DISSECTOR_KEY_CT, ct);
1557 
1558 	skb_flow_dissector_init(dissector, keys, cnt);
1559 }
1560 
1561 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1562 					       struct fl_flow_mask *mask)
1563 {
1564 	struct fl_flow_mask *newmask;
1565 	int err;
1566 
1567 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1568 	if (!newmask)
1569 		return ERR_PTR(-ENOMEM);
1570 
1571 	fl_mask_copy(newmask, mask);
1572 
1573 	if ((newmask->key.tp_range.tp_min.dst &&
1574 	     newmask->key.tp_range.tp_max.dst) ||
1575 	    (newmask->key.tp_range.tp_min.src &&
1576 	     newmask->key.tp_range.tp_max.src))
1577 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1578 
1579 	err = fl_init_mask_hashtable(newmask);
1580 	if (err)
1581 		goto errout_free;
1582 
1583 	fl_init_dissector(&newmask->dissector, &newmask->key);
1584 
1585 	INIT_LIST_HEAD_RCU(&newmask->filters);
1586 
1587 	refcount_set(&newmask->refcnt, 1);
1588 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1589 				      &newmask->ht_node, mask_ht_params);
1590 	if (err)
1591 		goto errout_destroy;
1592 
1593 	spin_lock(&head->masks_lock);
1594 	list_add_tail_rcu(&newmask->list, &head->masks);
1595 	spin_unlock(&head->masks_lock);
1596 
1597 	return newmask;
1598 
1599 errout_destroy:
1600 	rhashtable_destroy(&newmask->ht);
1601 errout_free:
1602 	kfree(newmask);
1603 
1604 	return ERR_PTR(err);
1605 }
1606 
1607 static int fl_check_assign_mask(struct cls_fl_head *head,
1608 				struct cls_fl_filter *fnew,
1609 				struct cls_fl_filter *fold,
1610 				struct fl_flow_mask *mask)
1611 {
1612 	struct fl_flow_mask *newmask;
1613 	int ret = 0;
1614 
1615 	rcu_read_lock();
1616 
1617 	/* Insert mask as temporary node to prevent concurrent creation of mask
1618 	 * with same key. Any concurrent lookups with same key will return
1619 	 * -EAGAIN because mask's refcnt is zero.
1620 	 */
1621 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1622 						       &mask->ht_node,
1623 						       mask_ht_params);
1624 	if (!fnew->mask) {
1625 		rcu_read_unlock();
1626 
1627 		if (fold) {
1628 			ret = -EINVAL;
1629 			goto errout_cleanup;
1630 		}
1631 
1632 		newmask = fl_create_new_mask(head, mask);
1633 		if (IS_ERR(newmask)) {
1634 			ret = PTR_ERR(newmask);
1635 			goto errout_cleanup;
1636 		}
1637 
1638 		fnew->mask = newmask;
1639 		return 0;
1640 	} else if (IS_ERR(fnew->mask)) {
1641 		ret = PTR_ERR(fnew->mask);
1642 	} else if (fold && fold->mask != fnew->mask) {
1643 		ret = -EINVAL;
1644 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1645 		/* Mask was deleted concurrently, try again */
1646 		ret = -EAGAIN;
1647 	}
1648 	rcu_read_unlock();
1649 	return ret;
1650 
1651 errout_cleanup:
1652 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1653 			       mask_ht_params);
1654 	return ret;
1655 }
1656 
1657 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1658 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1659 			unsigned long base, struct nlattr **tb,
1660 			struct nlattr *est, bool ovr,
1661 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1662 			struct netlink_ext_ack *extack)
1663 {
1664 	int err;
1665 
1666 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1667 				extack);
1668 	if (err < 0)
1669 		return err;
1670 
1671 	if (tb[TCA_FLOWER_CLASSID]) {
1672 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1673 		if (!rtnl_held)
1674 			rtnl_lock();
1675 		tcf_bind_filter(tp, &f->res, base);
1676 		if (!rtnl_held)
1677 			rtnl_unlock();
1678 	}
1679 
1680 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1681 	if (err)
1682 		return err;
1683 
1684 	fl_mask_update_range(mask);
1685 	fl_set_masked_key(&f->mkey, &f->key, mask);
1686 
1687 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1688 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1689 		return -EINVAL;
1690 	}
1691 
1692 	return 0;
1693 }
1694 
1695 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1696 			       struct cls_fl_filter *fold,
1697 			       bool *in_ht)
1698 {
1699 	struct fl_flow_mask *mask = fnew->mask;
1700 	int err;
1701 
1702 	err = rhashtable_lookup_insert_fast(&mask->ht,
1703 					    &fnew->ht_node,
1704 					    mask->filter_ht_params);
1705 	if (err) {
1706 		*in_ht = false;
1707 		/* It is okay if filter with same key exists when
1708 		 * overwriting.
1709 		 */
1710 		return fold && err == -EEXIST ? 0 : err;
1711 	}
1712 
1713 	*in_ht = true;
1714 	return 0;
1715 }
1716 
1717 static int fl_change(struct net *net, struct sk_buff *in_skb,
1718 		     struct tcf_proto *tp, unsigned long base,
1719 		     u32 handle, struct nlattr **tca,
1720 		     void **arg, bool ovr, bool rtnl_held,
1721 		     struct netlink_ext_ack *extack)
1722 {
1723 	struct cls_fl_head *head = fl_head_dereference(tp);
1724 	struct cls_fl_filter *fold = *arg;
1725 	struct cls_fl_filter *fnew;
1726 	struct fl_flow_mask *mask;
1727 	struct nlattr **tb;
1728 	bool in_ht;
1729 	int err;
1730 
1731 	if (!tca[TCA_OPTIONS]) {
1732 		err = -EINVAL;
1733 		goto errout_fold;
1734 	}
1735 
1736 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1737 	if (!mask) {
1738 		err = -ENOBUFS;
1739 		goto errout_fold;
1740 	}
1741 
1742 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1743 	if (!tb) {
1744 		err = -ENOBUFS;
1745 		goto errout_mask_alloc;
1746 	}
1747 
1748 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1749 					  tca[TCA_OPTIONS], fl_policy, NULL);
1750 	if (err < 0)
1751 		goto errout_tb;
1752 
1753 	if (fold && handle && fold->handle != handle) {
1754 		err = -EINVAL;
1755 		goto errout_tb;
1756 	}
1757 
1758 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1759 	if (!fnew) {
1760 		err = -ENOBUFS;
1761 		goto errout_tb;
1762 	}
1763 	INIT_LIST_HEAD(&fnew->hw_list);
1764 	refcount_set(&fnew->refcnt, 1);
1765 
1766 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1767 	if (err < 0)
1768 		goto errout;
1769 
1770 	if (tb[TCA_FLOWER_FLAGS]) {
1771 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1772 
1773 		if (!tc_flags_valid(fnew->flags)) {
1774 			err = -EINVAL;
1775 			goto errout;
1776 		}
1777 	}
1778 
1779 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1780 			   tp->chain->tmplt_priv, rtnl_held, extack);
1781 	if (err)
1782 		goto errout;
1783 
1784 	err = fl_check_assign_mask(head, fnew, fold, mask);
1785 	if (err)
1786 		goto errout;
1787 
1788 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1789 	if (err)
1790 		goto errout_mask;
1791 
1792 	if (!tc_skip_hw(fnew->flags)) {
1793 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1794 		if (err)
1795 			goto errout_ht;
1796 	}
1797 
1798 	if (!tc_in_hw(fnew->flags))
1799 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1800 
1801 	spin_lock(&tp->lock);
1802 
1803 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1804 	 * proto again or create new one, if necessary.
1805 	 */
1806 	if (tp->deleting) {
1807 		err = -EAGAIN;
1808 		goto errout_hw;
1809 	}
1810 
1811 	if (fold) {
1812 		/* Fold filter was deleted concurrently. Retry lookup. */
1813 		if (fold->deleted) {
1814 			err = -EAGAIN;
1815 			goto errout_hw;
1816 		}
1817 
1818 		fnew->handle = handle;
1819 
1820 		if (!in_ht) {
1821 			struct rhashtable_params params =
1822 				fnew->mask->filter_ht_params;
1823 
1824 			err = rhashtable_insert_fast(&fnew->mask->ht,
1825 						     &fnew->ht_node,
1826 						     params);
1827 			if (err)
1828 				goto errout_hw;
1829 			in_ht = true;
1830 		}
1831 
1832 		refcount_inc(&fnew->refcnt);
1833 		rhashtable_remove_fast(&fold->mask->ht,
1834 				       &fold->ht_node,
1835 				       fold->mask->filter_ht_params);
1836 		idr_replace(&head->handle_idr, fnew, fnew->handle);
1837 		list_replace_rcu(&fold->list, &fnew->list);
1838 		fold->deleted = true;
1839 
1840 		spin_unlock(&tp->lock);
1841 
1842 		fl_mask_put(head, fold->mask);
1843 		if (!tc_skip_hw(fold->flags))
1844 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1845 		tcf_unbind_filter(tp, &fold->res);
1846 		/* Caller holds reference to fold, so refcnt is always > 0
1847 		 * after this.
1848 		 */
1849 		refcount_dec(&fold->refcnt);
1850 		__fl_put(fold);
1851 	} else {
1852 		if (handle) {
1853 			/* user specifies a handle and it doesn't exist */
1854 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1855 					    handle, GFP_ATOMIC);
1856 
1857 			/* Filter with specified handle was concurrently
1858 			 * inserted after initial check in cls_api. This is not
1859 			 * necessarily an error if NLM_F_EXCL is not set in
1860 			 * message flags. Returning EAGAIN will cause cls_api to
1861 			 * try to update concurrently inserted rule.
1862 			 */
1863 			if (err == -ENOSPC)
1864 				err = -EAGAIN;
1865 		} else {
1866 			handle = 1;
1867 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1868 					    INT_MAX, GFP_ATOMIC);
1869 		}
1870 		if (err)
1871 			goto errout_hw;
1872 
1873 		refcount_inc(&fnew->refcnt);
1874 		fnew->handle = handle;
1875 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1876 		spin_unlock(&tp->lock);
1877 	}
1878 
1879 	*arg = fnew;
1880 
1881 	kfree(tb);
1882 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1883 	return 0;
1884 
1885 errout_ht:
1886 	spin_lock(&tp->lock);
1887 errout_hw:
1888 	fnew->deleted = true;
1889 	spin_unlock(&tp->lock);
1890 	if (!tc_skip_hw(fnew->flags))
1891 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1892 	if (in_ht)
1893 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1894 				       fnew->mask->filter_ht_params);
1895 errout_mask:
1896 	fl_mask_put(head, fnew->mask);
1897 errout:
1898 	__fl_put(fnew);
1899 errout_tb:
1900 	kfree(tb);
1901 errout_mask_alloc:
1902 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1903 errout_fold:
1904 	if (fold)
1905 		__fl_put(fold);
1906 	return err;
1907 }
1908 
1909 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1910 		     bool rtnl_held, struct netlink_ext_ack *extack)
1911 {
1912 	struct cls_fl_head *head = fl_head_dereference(tp);
1913 	struct cls_fl_filter *f = arg;
1914 	bool last_on_mask;
1915 	int err = 0;
1916 
1917 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1918 	*last = list_empty(&head->masks);
1919 	__fl_put(f);
1920 
1921 	return err;
1922 }
1923 
1924 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1925 		    bool rtnl_held)
1926 {
1927 	struct cls_fl_head *head = fl_head_dereference(tp);
1928 	unsigned long id = arg->cookie, tmp;
1929 	struct cls_fl_filter *f;
1930 
1931 	arg->count = arg->skip;
1932 
1933 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1934 		/* don't return filters that are being deleted */
1935 		if (!refcount_inc_not_zero(&f->refcnt))
1936 			continue;
1937 		if (arg->fn(tp, f, arg) < 0) {
1938 			__fl_put(f);
1939 			arg->stop = 1;
1940 			break;
1941 		}
1942 		__fl_put(f);
1943 		arg->count++;
1944 	}
1945 	arg->cookie = id;
1946 }
1947 
1948 static struct cls_fl_filter *
1949 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1950 {
1951 	struct cls_fl_head *head = fl_head_dereference(tp);
1952 
1953 	spin_lock(&tp->lock);
1954 	if (list_empty(&head->hw_filters)) {
1955 		spin_unlock(&tp->lock);
1956 		return NULL;
1957 	}
1958 
1959 	if (!f)
1960 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
1961 			       hw_list);
1962 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1963 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1964 			spin_unlock(&tp->lock);
1965 			return f;
1966 		}
1967 	}
1968 
1969 	spin_unlock(&tp->lock);
1970 	return NULL;
1971 }
1972 
1973 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1974 			void *cb_priv, struct netlink_ext_ack *extack)
1975 {
1976 	struct tcf_block *block = tp->chain->block;
1977 	struct flow_cls_offload cls_flower = {};
1978 	struct cls_fl_filter *f = NULL;
1979 	int err;
1980 
1981 	/* hw_filters list can only be changed by hw offload functions after
1982 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1983 	 * iterating it.
1984 	 */
1985 	ASSERT_RTNL();
1986 
1987 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
1988 		cls_flower.rule =
1989 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1990 		if (!cls_flower.rule) {
1991 			__fl_put(f);
1992 			return -ENOMEM;
1993 		}
1994 
1995 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1996 					   extack);
1997 		cls_flower.command = add ?
1998 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
1999 		cls_flower.cookie = (unsigned long)f;
2000 		cls_flower.rule->match.dissector = &f->mask->dissector;
2001 		cls_flower.rule->match.mask = &f->mask->key;
2002 		cls_flower.rule->match.key = &f->mkey;
2003 
2004 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
2005 					   true);
2006 		if (err) {
2007 			kfree(cls_flower.rule);
2008 			if (tc_skip_sw(f->flags)) {
2009 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2010 				__fl_put(f);
2011 				return err;
2012 			}
2013 			goto next_flow;
2014 		}
2015 
2016 		cls_flower.classid = f->res.classid;
2017 
2018 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2019 					    TC_SETUP_CLSFLOWER, &cls_flower,
2020 					    cb_priv, &f->flags,
2021 					    &f->in_hw_count);
2022 		tc_cleanup_flow_action(&cls_flower.rule->action);
2023 		kfree(cls_flower.rule);
2024 
2025 		if (err) {
2026 			__fl_put(f);
2027 			return err;
2028 		}
2029 next_flow:
2030 		__fl_put(f);
2031 	}
2032 
2033 	return 0;
2034 }
2035 
2036 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2037 {
2038 	struct flow_cls_offload *cls_flower = type_data;
2039 	struct cls_fl_filter *f =
2040 		(struct cls_fl_filter *) cls_flower->cookie;
2041 	struct cls_fl_head *head = fl_head_dereference(tp);
2042 
2043 	spin_lock(&tp->lock);
2044 	list_add(&f->hw_list, &head->hw_filters);
2045 	spin_unlock(&tp->lock);
2046 }
2047 
2048 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2049 {
2050 	struct flow_cls_offload *cls_flower = type_data;
2051 	struct cls_fl_filter *f =
2052 		(struct cls_fl_filter *) cls_flower->cookie;
2053 
2054 	spin_lock(&tp->lock);
2055 	if (!list_empty(&f->hw_list))
2056 		list_del_init(&f->hw_list);
2057 	spin_unlock(&tp->lock);
2058 }
2059 
2060 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2061 			      struct fl_flow_tmplt *tmplt)
2062 {
2063 	struct flow_cls_offload cls_flower = {};
2064 	struct tcf_block *block = chain->block;
2065 
2066 	cls_flower.rule = flow_rule_alloc(0);
2067 	if (!cls_flower.rule)
2068 		return -ENOMEM;
2069 
2070 	cls_flower.common.chain_index = chain->index;
2071 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2072 	cls_flower.cookie = (unsigned long) tmplt;
2073 	cls_flower.rule->match.dissector = &tmplt->dissector;
2074 	cls_flower.rule->match.mask = &tmplt->mask;
2075 	cls_flower.rule->match.key = &tmplt->dummy_key;
2076 
2077 	/* We don't care if driver (any of them) fails to handle this
2078 	 * call. It serves just as a hint for it.
2079 	 */
2080 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2081 	kfree(cls_flower.rule);
2082 
2083 	return 0;
2084 }
2085 
2086 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2087 				struct fl_flow_tmplt *tmplt)
2088 {
2089 	struct flow_cls_offload cls_flower = {};
2090 	struct tcf_block *block = chain->block;
2091 
2092 	cls_flower.common.chain_index = chain->index;
2093 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2094 	cls_flower.cookie = (unsigned long) tmplt;
2095 
2096 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2097 }
2098 
2099 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2100 			     struct nlattr **tca,
2101 			     struct netlink_ext_ack *extack)
2102 {
2103 	struct fl_flow_tmplt *tmplt;
2104 	struct nlattr **tb;
2105 	int err;
2106 
2107 	if (!tca[TCA_OPTIONS])
2108 		return ERR_PTR(-EINVAL);
2109 
2110 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2111 	if (!tb)
2112 		return ERR_PTR(-ENOBUFS);
2113 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2114 					  tca[TCA_OPTIONS], fl_policy, NULL);
2115 	if (err)
2116 		goto errout_tb;
2117 
2118 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2119 	if (!tmplt) {
2120 		err = -ENOMEM;
2121 		goto errout_tb;
2122 	}
2123 	tmplt->chain = chain;
2124 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2125 	if (err)
2126 		goto errout_tmplt;
2127 
2128 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2129 
2130 	err = fl_hw_create_tmplt(chain, tmplt);
2131 	if (err)
2132 		goto errout_tmplt;
2133 
2134 	kfree(tb);
2135 	return tmplt;
2136 
2137 errout_tmplt:
2138 	kfree(tmplt);
2139 errout_tb:
2140 	kfree(tb);
2141 	return ERR_PTR(err);
2142 }
2143 
2144 static void fl_tmplt_destroy(void *tmplt_priv)
2145 {
2146 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2147 
2148 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2149 	kfree(tmplt);
2150 }
2151 
2152 static int fl_dump_key_val(struct sk_buff *skb,
2153 			   void *val, int val_type,
2154 			   void *mask, int mask_type, int len)
2155 {
2156 	int err;
2157 
2158 	if (!memchr_inv(mask, 0, len))
2159 		return 0;
2160 	err = nla_put(skb, val_type, len, val);
2161 	if (err)
2162 		return err;
2163 	if (mask_type != TCA_FLOWER_UNSPEC) {
2164 		err = nla_put(skb, mask_type, len, mask);
2165 		if (err)
2166 			return err;
2167 	}
2168 	return 0;
2169 }
2170 
2171 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2172 				  struct fl_flow_key *mask)
2173 {
2174 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2175 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2176 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2177 			    sizeof(key->tp_range.tp_min.dst)) ||
2178 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2179 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2180 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2181 			    sizeof(key->tp_range.tp_max.dst)) ||
2182 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2183 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2184 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2185 			    sizeof(key->tp_range.tp_min.src)) ||
2186 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2187 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2188 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2189 			    sizeof(key->tp_range.tp_max.src)))
2190 		return -1;
2191 
2192 	return 0;
2193 }
2194 
2195 static int fl_dump_key_mpls(struct sk_buff *skb,
2196 			    struct flow_dissector_key_mpls *mpls_key,
2197 			    struct flow_dissector_key_mpls *mpls_mask)
2198 {
2199 	int err;
2200 
2201 	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2202 		return 0;
2203 	if (mpls_mask->mpls_ttl) {
2204 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2205 				 mpls_key->mpls_ttl);
2206 		if (err)
2207 			return err;
2208 	}
2209 	if (mpls_mask->mpls_tc) {
2210 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2211 				 mpls_key->mpls_tc);
2212 		if (err)
2213 			return err;
2214 	}
2215 	if (mpls_mask->mpls_label) {
2216 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2217 				  mpls_key->mpls_label);
2218 		if (err)
2219 			return err;
2220 	}
2221 	if (mpls_mask->mpls_bos) {
2222 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2223 				 mpls_key->mpls_bos);
2224 		if (err)
2225 			return err;
2226 	}
2227 	return 0;
2228 }
2229 
2230 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2231 			  struct flow_dissector_key_ip *key,
2232 			  struct flow_dissector_key_ip *mask)
2233 {
2234 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2235 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2236 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2237 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2238 
2239 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2240 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2241 		return -1;
2242 
2243 	return 0;
2244 }
2245 
2246 static int fl_dump_key_vlan(struct sk_buff *skb,
2247 			    int vlan_id_key, int vlan_prio_key,
2248 			    struct flow_dissector_key_vlan *vlan_key,
2249 			    struct flow_dissector_key_vlan *vlan_mask)
2250 {
2251 	int err;
2252 
2253 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2254 		return 0;
2255 	if (vlan_mask->vlan_id) {
2256 		err = nla_put_u16(skb, vlan_id_key,
2257 				  vlan_key->vlan_id);
2258 		if (err)
2259 			return err;
2260 	}
2261 	if (vlan_mask->vlan_priority) {
2262 		err = nla_put_u8(skb, vlan_prio_key,
2263 				 vlan_key->vlan_priority);
2264 		if (err)
2265 			return err;
2266 	}
2267 	return 0;
2268 }
2269 
2270 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2271 			    u32 *flower_key, u32 *flower_mask,
2272 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2273 {
2274 	if (dissector_mask & dissector_flag_bit) {
2275 		*flower_mask |= flower_flag_bit;
2276 		if (dissector_key & dissector_flag_bit)
2277 			*flower_key |= flower_flag_bit;
2278 	}
2279 }
2280 
2281 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2282 {
2283 	u32 key, mask;
2284 	__be32 _key, _mask;
2285 	int err;
2286 
2287 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2288 		return 0;
2289 
2290 	key = 0;
2291 	mask = 0;
2292 
2293 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2294 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2295 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2296 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2297 			FLOW_DIS_FIRST_FRAG);
2298 
2299 	_key = cpu_to_be32(key);
2300 	_mask = cpu_to_be32(mask);
2301 
2302 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2303 	if (err)
2304 		return err;
2305 
2306 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2307 }
2308 
2309 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2310 				  struct flow_dissector_key_enc_opts *enc_opts)
2311 {
2312 	struct geneve_opt *opt;
2313 	struct nlattr *nest;
2314 	int opt_off = 0;
2315 
2316 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2317 	if (!nest)
2318 		goto nla_put_failure;
2319 
2320 	while (enc_opts->len > opt_off) {
2321 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2322 
2323 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2324 				 opt->opt_class))
2325 			goto nla_put_failure;
2326 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2327 			       opt->type))
2328 			goto nla_put_failure;
2329 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2330 			    opt->length * 4, opt->opt_data))
2331 			goto nla_put_failure;
2332 
2333 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2334 	}
2335 	nla_nest_end(skb, nest);
2336 	return 0;
2337 
2338 nla_put_failure:
2339 	nla_nest_cancel(skb, nest);
2340 	return -EMSGSIZE;
2341 }
2342 
2343 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2344 				 struct flow_dissector_key_enc_opts *enc_opts)
2345 {
2346 	struct vxlan_metadata *md;
2347 	struct nlattr *nest;
2348 
2349 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2350 	if (!nest)
2351 		goto nla_put_failure;
2352 
2353 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2354 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2355 		goto nla_put_failure;
2356 
2357 	nla_nest_end(skb, nest);
2358 	return 0;
2359 
2360 nla_put_failure:
2361 	nla_nest_cancel(skb, nest);
2362 	return -EMSGSIZE;
2363 }
2364 
2365 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2366 				  struct flow_dissector_key_enc_opts *enc_opts)
2367 {
2368 	struct erspan_metadata *md;
2369 	struct nlattr *nest;
2370 
2371 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2372 	if (!nest)
2373 		goto nla_put_failure;
2374 
2375 	md = (struct erspan_metadata *)&enc_opts->data[0];
2376 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2377 		goto nla_put_failure;
2378 
2379 	if (md->version == 1 &&
2380 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2381 		goto nla_put_failure;
2382 
2383 	if (md->version == 2 &&
2384 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2385 			md->u.md2.dir) ||
2386 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2387 			get_hwid(&md->u.md2))))
2388 		goto nla_put_failure;
2389 
2390 	nla_nest_end(skb, nest);
2391 	return 0;
2392 
2393 nla_put_failure:
2394 	nla_nest_cancel(skb, nest);
2395 	return -EMSGSIZE;
2396 }
2397 
2398 static int fl_dump_key_ct(struct sk_buff *skb,
2399 			  struct flow_dissector_key_ct *key,
2400 			  struct flow_dissector_key_ct *mask)
2401 {
2402 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2403 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2404 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2405 			    sizeof(key->ct_state)))
2406 		goto nla_put_failure;
2407 
2408 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2409 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2410 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2411 			    sizeof(key->ct_zone)))
2412 		goto nla_put_failure;
2413 
2414 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2415 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2416 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2417 			    sizeof(key->ct_mark)))
2418 		goto nla_put_failure;
2419 
2420 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2421 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2422 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2423 			    sizeof(key->ct_labels)))
2424 		goto nla_put_failure;
2425 
2426 	return 0;
2427 
2428 nla_put_failure:
2429 	return -EMSGSIZE;
2430 }
2431 
2432 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2433 			       struct flow_dissector_key_enc_opts *enc_opts)
2434 {
2435 	struct nlattr *nest;
2436 	int err;
2437 
2438 	if (!enc_opts->len)
2439 		return 0;
2440 
2441 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2442 	if (!nest)
2443 		goto nla_put_failure;
2444 
2445 	switch (enc_opts->dst_opt_type) {
2446 	case TUNNEL_GENEVE_OPT:
2447 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2448 		if (err)
2449 			goto nla_put_failure;
2450 		break;
2451 	case TUNNEL_VXLAN_OPT:
2452 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2453 		if (err)
2454 			goto nla_put_failure;
2455 		break;
2456 	case TUNNEL_ERSPAN_OPT:
2457 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2458 		if (err)
2459 			goto nla_put_failure;
2460 		break;
2461 	default:
2462 		goto nla_put_failure;
2463 	}
2464 	nla_nest_end(skb, nest);
2465 	return 0;
2466 
2467 nla_put_failure:
2468 	nla_nest_cancel(skb, nest);
2469 	return -EMSGSIZE;
2470 }
2471 
2472 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2473 			       struct flow_dissector_key_enc_opts *key_opts,
2474 			       struct flow_dissector_key_enc_opts *msk_opts)
2475 {
2476 	int err;
2477 
2478 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2479 	if (err)
2480 		return err;
2481 
2482 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2483 }
2484 
2485 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2486 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2487 {
2488 	if (mask->meta.ingress_ifindex) {
2489 		struct net_device *dev;
2490 
2491 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2492 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2493 			goto nla_put_failure;
2494 	}
2495 
2496 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2497 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2498 			    sizeof(key->eth.dst)) ||
2499 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2500 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2501 			    sizeof(key->eth.src)) ||
2502 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2503 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2504 			    sizeof(key->basic.n_proto)))
2505 		goto nla_put_failure;
2506 
2507 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2508 		goto nla_put_failure;
2509 
2510 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2511 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2512 		goto nla_put_failure;
2513 
2514 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2515 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2516 			     &key->cvlan, &mask->cvlan) ||
2517 	    (mask->cvlan.vlan_tpid &&
2518 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2519 			  key->cvlan.vlan_tpid)))
2520 		goto nla_put_failure;
2521 
2522 	if (mask->basic.n_proto) {
2523 		if (mask->cvlan.vlan_tpid) {
2524 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2525 					 key->basic.n_proto))
2526 				goto nla_put_failure;
2527 		} else if (mask->vlan.vlan_tpid) {
2528 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2529 					 key->basic.n_proto))
2530 				goto nla_put_failure;
2531 		}
2532 	}
2533 
2534 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2535 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2536 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2537 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2538 			    sizeof(key->basic.ip_proto)) ||
2539 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2540 		goto nla_put_failure;
2541 
2542 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2543 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2544 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2545 			     sizeof(key->ipv4.src)) ||
2546 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2547 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2548 			     sizeof(key->ipv4.dst))))
2549 		goto nla_put_failure;
2550 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2551 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2552 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2553 				  sizeof(key->ipv6.src)) ||
2554 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2555 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2556 				  sizeof(key->ipv6.dst))))
2557 		goto nla_put_failure;
2558 
2559 	if (key->basic.ip_proto == IPPROTO_TCP &&
2560 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2561 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2562 			     sizeof(key->tp.src)) ||
2563 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2564 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2565 			     sizeof(key->tp.dst)) ||
2566 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2567 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2568 			     sizeof(key->tcp.flags))))
2569 		goto nla_put_failure;
2570 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2571 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2572 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2573 				  sizeof(key->tp.src)) ||
2574 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2575 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2576 				  sizeof(key->tp.dst))))
2577 		goto nla_put_failure;
2578 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2579 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2580 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2581 				  sizeof(key->tp.src)) ||
2582 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2583 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2584 				  sizeof(key->tp.dst))))
2585 		goto nla_put_failure;
2586 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2587 		 key->basic.ip_proto == IPPROTO_ICMP &&
2588 		 (fl_dump_key_val(skb, &key->icmp.type,
2589 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2590 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2591 				  sizeof(key->icmp.type)) ||
2592 		  fl_dump_key_val(skb, &key->icmp.code,
2593 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2594 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2595 				  sizeof(key->icmp.code))))
2596 		goto nla_put_failure;
2597 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2598 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2599 		 (fl_dump_key_val(skb, &key->icmp.type,
2600 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2601 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2602 				  sizeof(key->icmp.type)) ||
2603 		  fl_dump_key_val(skb, &key->icmp.code,
2604 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2605 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2606 				  sizeof(key->icmp.code))))
2607 		goto nla_put_failure;
2608 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2609 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2610 		 (fl_dump_key_val(skb, &key->arp.sip,
2611 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2612 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2613 				  sizeof(key->arp.sip)) ||
2614 		  fl_dump_key_val(skb, &key->arp.tip,
2615 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2616 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2617 				  sizeof(key->arp.tip)) ||
2618 		  fl_dump_key_val(skb, &key->arp.op,
2619 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2620 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2621 				  sizeof(key->arp.op)) ||
2622 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2623 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2624 				  sizeof(key->arp.sha)) ||
2625 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2626 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2627 				  sizeof(key->arp.tha))))
2628 		goto nla_put_failure;
2629 
2630 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2631 	     key->basic.ip_proto == IPPROTO_UDP ||
2632 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2633 	     fl_dump_key_port_range(skb, key, mask))
2634 		goto nla_put_failure;
2635 
2636 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2637 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2638 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2639 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2640 			    sizeof(key->enc_ipv4.src)) ||
2641 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2642 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2643 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2644 			     sizeof(key->enc_ipv4.dst))))
2645 		goto nla_put_failure;
2646 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2647 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2648 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2649 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2650 			    sizeof(key->enc_ipv6.src)) ||
2651 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2652 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2653 				 &mask->enc_ipv6.dst,
2654 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2655 			    sizeof(key->enc_ipv6.dst))))
2656 		goto nla_put_failure;
2657 
2658 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2659 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2660 			    sizeof(key->enc_key_id)) ||
2661 	    fl_dump_key_val(skb, &key->enc_tp.src,
2662 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2663 			    &mask->enc_tp.src,
2664 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2665 			    sizeof(key->enc_tp.src)) ||
2666 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2667 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2668 			    &mask->enc_tp.dst,
2669 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2670 			    sizeof(key->enc_tp.dst)) ||
2671 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2672 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2673 		goto nla_put_failure;
2674 
2675 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2676 		goto nla_put_failure;
2677 
2678 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2679 		goto nla_put_failure;
2680 
2681 	return 0;
2682 
2683 nla_put_failure:
2684 	return -EMSGSIZE;
2685 }
2686 
2687 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2688 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2689 {
2690 	struct cls_fl_filter *f = fh;
2691 	struct nlattr *nest;
2692 	struct fl_flow_key *key, *mask;
2693 	bool skip_hw;
2694 
2695 	if (!f)
2696 		return skb->len;
2697 
2698 	t->tcm_handle = f->handle;
2699 
2700 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2701 	if (!nest)
2702 		goto nla_put_failure;
2703 
2704 	spin_lock(&tp->lock);
2705 
2706 	if (f->res.classid &&
2707 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2708 		goto nla_put_failure_locked;
2709 
2710 	key = &f->key;
2711 	mask = &f->mask->key;
2712 	skip_hw = tc_skip_hw(f->flags);
2713 
2714 	if (fl_dump_key(skb, net, key, mask))
2715 		goto nla_put_failure_locked;
2716 
2717 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2718 		goto nla_put_failure_locked;
2719 
2720 	spin_unlock(&tp->lock);
2721 
2722 	if (!skip_hw)
2723 		fl_hw_update_stats(tp, f, rtnl_held);
2724 
2725 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2726 		goto nla_put_failure;
2727 
2728 	if (tcf_exts_dump(skb, &f->exts))
2729 		goto nla_put_failure;
2730 
2731 	nla_nest_end(skb, nest);
2732 
2733 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2734 		goto nla_put_failure;
2735 
2736 	return skb->len;
2737 
2738 nla_put_failure_locked:
2739 	spin_unlock(&tp->lock);
2740 nla_put_failure:
2741 	nla_nest_cancel(skb, nest);
2742 	return -1;
2743 }
2744 
2745 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2746 {
2747 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2748 	struct fl_flow_key *key, *mask;
2749 	struct nlattr *nest;
2750 
2751 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2752 	if (!nest)
2753 		goto nla_put_failure;
2754 
2755 	key = &tmplt->dummy_key;
2756 	mask = &tmplt->mask;
2757 
2758 	if (fl_dump_key(skb, net, key, mask))
2759 		goto nla_put_failure;
2760 
2761 	nla_nest_end(skb, nest);
2762 
2763 	return skb->len;
2764 
2765 nla_put_failure:
2766 	nla_nest_cancel(skb, nest);
2767 	return -EMSGSIZE;
2768 }
2769 
2770 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
2771 			  unsigned long base)
2772 {
2773 	struct cls_fl_filter *f = fh;
2774 
2775 	if (f && f->res.classid == classid) {
2776 		if (cl)
2777 			__tcf_bind_filter(q, &f->res, base);
2778 		else
2779 			__tcf_unbind_filter(q, &f->res);
2780 	}
2781 }
2782 
2783 static bool fl_delete_empty(struct tcf_proto *tp)
2784 {
2785 	struct cls_fl_head *head = fl_head_dereference(tp);
2786 
2787 	spin_lock(&tp->lock);
2788 	tp->deleting = idr_is_empty(&head->handle_idr);
2789 	spin_unlock(&tp->lock);
2790 
2791 	return tp->deleting;
2792 }
2793 
2794 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2795 	.kind		= "flower",
2796 	.classify	= fl_classify,
2797 	.init		= fl_init,
2798 	.destroy	= fl_destroy,
2799 	.get		= fl_get,
2800 	.put		= fl_put,
2801 	.change		= fl_change,
2802 	.delete		= fl_delete,
2803 	.delete_empty	= fl_delete_empty,
2804 	.walk		= fl_walk,
2805 	.reoffload	= fl_reoffload,
2806 	.hw_add		= fl_hw_add,
2807 	.hw_del		= fl_hw_del,
2808 	.dump		= fl_dump,
2809 	.bind_class	= fl_bind_class,
2810 	.tmplt_create	= fl_tmplt_create,
2811 	.tmplt_destroy	= fl_tmplt_destroy,
2812 	.tmplt_dump	= fl_tmplt_dump,
2813 	.owner		= THIS_MODULE,
2814 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
2815 };
2816 
2817 static int __init cls_fl_init(void)
2818 {
2819 	return register_tcf_proto_ops(&cls_fl_ops);
2820 }
2821 
2822 static void __exit cls_fl_exit(void)
2823 {
2824 	unregister_tcf_proto_ops(&cls_fl_ops);
2825 }
2826 
2827 module_init(cls_fl_init);
2828 module_exit(cls_fl_exit);
2829 
2830 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2831 MODULE_DESCRIPTION("Flower classifier");
2832 MODULE_LICENSE("GPL v2");
2833