xref: /openbmc/linux/net/sched/cls_flower.c (revision 2d6b01bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 struct fl_flow_key {
34 	struct flow_dissector_key_meta meta;
35 	struct flow_dissector_key_control control;
36 	struct flow_dissector_key_control enc_control;
37 	struct flow_dissector_key_basic basic;
38 	struct flow_dissector_key_eth_addrs eth;
39 	struct flow_dissector_key_vlan vlan;
40 	struct flow_dissector_key_vlan cvlan;
41 	union {
42 		struct flow_dissector_key_ipv4_addrs ipv4;
43 		struct flow_dissector_key_ipv6_addrs ipv6;
44 	};
45 	struct flow_dissector_key_ports tp;
46 	struct flow_dissector_key_icmp icmp;
47 	struct flow_dissector_key_arp arp;
48 	struct flow_dissector_key_keyid enc_key_id;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 	};
53 	struct flow_dissector_key_ports enc_tp;
54 	struct flow_dissector_key_mpls mpls;
55 	struct flow_dissector_key_tcp tcp;
56 	struct flow_dissector_key_ip ip;
57 	struct flow_dissector_key_ip enc_ip;
58 	struct flow_dissector_key_enc_opts enc_opts;
59 	union {
60 		struct flow_dissector_key_ports tp;
61 		struct {
62 			struct flow_dissector_key_ports tp_min;
63 			struct flow_dissector_key_ports tp_max;
64 		};
65 	} tp_range;
66 	struct flow_dissector_key_ct ct;
67 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
68 
69 struct fl_flow_mask_range {
70 	unsigned short int start;
71 	unsigned short int end;
72 };
73 
74 struct fl_flow_mask {
75 	struct fl_flow_key key;
76 	struct fl_flow_mask_range range;
77 	u32 flags;
78 	struct rhash_head ht_node;
79 	struct rhashtable ht;
80 	struct rhashtable_params filter_ht_params;
81 	struct flow_dissector dissector;
82 	struct list_head filters;
83 	struct rcu_work rwork;
84 	struct list_head list;
85 	refcount_t refcnt;
86 };
87 
88 struct fl_flow_tmplt {
89 	struct fl_flow_key dummy_key;
90 	struct fl_flow_key mask;
91 	struct flow_dissector dissector;
92 	struct tcf_chain *chain;
93 };
94 
95 struct cls_fl_head {
96 	struct rhashtable ht;
97 	spinlock_t masks_lock; /* Protect masks list */
98 	struct list_head masks;
99 	struct list_head hw_filters;
100 	struct rcu_work rwork;
101 	struct idr handle_idr;
102 };
103 
104 struct cls_fl_filter {
105 	struct fl_flow_mask *mask;
106 	struct rhash_head ht_node;
107 	struct fl_flow_key mkey;
108 	struct tcf_exts exts;
109 	struct tcf_result res;
110 	struct fl_flow_key key;
111 	struct list_head list;
112 	struct list_head hw_list;
113 	u32 handle;
114 	u32 flags;
115 	u32 in_hw_count;
116 	struct rcu_work rwork;
117 	struct net_device *hw_dev;
118 	/* Flower classifier is unlocked, which means that its reference counter
119 	 * can be changed concurrently without any kind of external
120 	 * synchronization. Use atomic reference counter to be concurrency-safe.
121 	 */
122 	refcount_t refcnt;
123 	bool deleted;
124 };
125 
126 static const struct rhashtable_params mask_ht_params = {
127 	.key_offset = offsetof(struct fl_flow_mask, key),
128 	.key_len = sizeof(struct fl_flow_key),
129 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
130 	.automatic_shrinking = true,
131 };
132 
133 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
134 {
135 	return mask->range.end - mask->range.start;
136 }
137 
138 static void fl_mask_update_range(struct fl_flow_mask *mask)
139 {
140 	const u8 *bytes = (const u8 *) &mask->key;
141 	size_t size = sizeof(mask->key);
142 	size_t i, first = 0, last;
143 
144 	for (i = 0; i < size; i++) {
145 		if (bytes[i]) {
146 			first = i;
147 			break;
148 		}
149 	}
150 	last = first;
151 	for (i = size - 1; i != first; i--) {
152 		if (bytes[i]) {
153 			last = i;
154 			break;
155 		}
156 	}
157 	mask->range.start = rounddown(first, sizeof(long));
158 	mask->range.end = roundup(last + 1, sizeof(long));
159 }
160 
161 static void *fl_key_get_start(struct fl_flow_key *key,
162 			      const struct fl_flow_mask *mask)
163 {
164 	return (u8 *) key + mask->range.start;
165 }
166 
167 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
168 			      struct fl_flow_mask *mask)
169 {
170 	const long *lkey = fl_key_get_start(key, mask);
171 	const long *lmask = fl_key_get_start(&mask->key, mask);
172 	long *lmkey = fl_key_get_start(mkey, mask);
173 	int i;
174 
175 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
176 		*lmkey++ = *lkey++ & *lmask++;
177 }
178 
179 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
180 			       struct fl_flow_mask *mask)
181 {
182 	const long *lmask = fl_key_get_start(&mask->key, mask);
183 	const long *ltmplt;
184 	int i;
185 
186 	if (!tmplt)
187 		return true;
188 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
189 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
190 		if (~*ltmplt++ & *lmask++)
191 			return false;
192 	}
193 	return true;
194 }
195 
196 static void fl_clear_masked_range(struct fl_flow_key *key,
197 				  struct fl_flow_mask *mask)
198 {
199 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
200 }
201 
202 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
203 				  struct fl_flow_key *key,
204 				  struct fl_flow_key *mkey)
205 {
206 	__be16 min_mask, max_mask, min_val, max_val;
207 
208 	min_mask = htons(filter->mask->key.tp_range.tp_min.dst);
209 	max_mask = htons(filter->mask->key.tp_range.tp_max.dst);
210 	min_val = htons(filter->key.tp_range.tp_min.dst);
211 	max_val = htons(filter->key.tp_range.tp_max.dst);
212 
213 	if (min_mask && max_mask) {
214 		if (htons(key->tp_range.tp.dst) < min_val ||
215 		    htons(key->tp_range.tp.dst) > max_val)
216 			return false;
217 
218 		/* skb does not have min and max values */
219 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
220 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
221 	}
222 	return true;
223 }
224 
225 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
226 				  struct fl_flow_key *key,
227 				  struct fl_flow_key *mkey)
228 {
229 	__be16 min_mask, max_mask, min_val, max_val;
230 
231 	min_mask = htons(filter->mask->key.tp_range.tp_min.src);
232 	max_mask = htons(filter->mask->key.tp_range.tp_max.src);
233 	min_val = htons(filter->key.tp_range.tp_min.src);
234 	max_val = htons(filter->key.tp_range.tp_max.src);
235 
236 	if (min_mask && max_mask) {
237 		if (htons(key->tp_range.tp.src) < min_val ||
238 		    htons(key->tp_range.tp.src) > max_val)
239 			return false;
240 
241 		/* skb does not have min and max values */
242 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
243 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
244 	}
245 	return true;
246 }
247 
248 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
249 					 struct fl_flow_key *mkey)
250 {
251 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
252 				      mask->filter_ht_params);
253 }
254 
255 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
256 					     struct fl_flow_key *mkey,
257 					     struct fl_flow_key *key)
258 {
259 	struct cls_fl_filter *filter, *f;
260 
261 	list_for_each_entry_rcu(filter, &mask->filters, list) {
262 		if (!fl_range_port_dst_cmp(filter, key, mkey))
263 			continue;
264 
265 		if (!fl_range_port_src_cmp(filter, key, mkey))
266 			continue;
267 
268 		f = __fl_lookup(mask, mkey);
269 		if (f)
270 			return f;
271 	}
272 	return NULL;
273 }
274 
275 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
276 				       struct fl_flow_key *mkey,
277 				       struct fl_flow_key *key)
278 {
279 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
280 		return fl_lookup_range(mask, mkey, key);
281 
282 	return __fl_lookup(mask, mkey);
283 }
284 
285 static u16 fl_ct_info_to_flower_map[] = {
286 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
287 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
288 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
289 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
290 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
291 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
292 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
293 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
294 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
296 };
297 
298 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
299 		       struct tcf_result *res)
300 {
301 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
302 	struct fl_flow_key skb_mkey;
303 	struct fl_flow_key skb_key;
304 	struct fl_flow_mask *mask;
305 	struct cls_fl_filter *f;
306 
307 	list_for_each_entry_rcu(mask, &head->masks, list) {
308 		fl_clear_masked_range(&skb_key, mask);
309 
310 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
311 		/* skb_flow_dissect() does not set n_proto in case an unknown
312 		 * protocol, so do it rather here.
313 		 */
314 		skb_key.basic.n_proto = skb->protocol;
315 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
316 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
317 				    fl_ct_info_to_flower_map,
318 				    ARRAY_SIZE(fl_ct_info_to_flower_map));
319 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
320 
321 		fl_set_masked_key(&skb_mkey, &skb_key, mask);
322 
323 		f = fl_lookup(mask, &skb_mkey, &skb_key);
324 		if (f && !tc_skip_sw(f->flags)) {
325 			*res = f->res;
326 			return tcf_exts_exec(skb, &f->exts, res);
327 		}
328 	}
329 	return -1;
330 }
331 
332 static int fl_init(struct tcf_proto *tp)
333 {
334 	struct cls_fl_head *head;
335 
336 	head = kzalloc(sizeof(*head), GFP_KERNEL);
337 	if (!head)
338 		return -ENOBUFS;
339 
340 	spin_lock_init(&head->masks_lock);
341 	INIT_LIST_HEAD_RCU(&head->masks);
342 	INIT_LIST_HEAD(&head->hw_filters);
343 	rcu_assign_pointer(tp->root, head);
344 	idr_init(&head->handle_idr);
345 
346 	return rhashtable_init(&head->ht, &mask_ht_params);
347 }
348 
349 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
350 {
351 	/* temporary masks don't have their filters list and ht initialized */
352 	if (mask_init_done) {
353 		WARN_ON(!list_empty(&mask->filters));
354 		rhashtable_destroy(&mask->ht);
355 	}
356 	kfree(mask);
357 }
358 
359 static void fl_mask_free_work(struct work_struct *work)
360 {
361 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
362 						 struct fl_flow_mask, rwork);
363 
364 	fl_mask_free(mask, true);
365 }
366 
367 static void fl_uninit_mask_free_work(struct work_struct *work)
368 {
369 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
370 						 struct fl_flow_mask, rwork);
371 
372 	fl_mask_free(mask, false);
373 }
374 
375 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
376 {
377 	if (!refcount_dec_and_test(&mask->refcnt))
378 		return false;
379 
380 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
381 
382 	spin_lock(&head->masks_lock);
383 	list_del_rcu(&mask->list);
384 	spin_unlock(&head->masks_lock);
385 
386 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
387 
388 	return true;
389 }
390 
391 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
392 {
393 	/* Flower classifier only changes root pointer during init and destroy.
394 	 * Users must obtain reference to tcf_proto instance before calling its
395 	 * API, so tp->root pointer is protected from concurrent call to
396 	 * fl_destroy() by reference counting.
397 	 */
398 	return rcu_dereference_raw(tp->root);
399 }
400 
401 static void __fl_destroy_filter(struct cls_fl_filter *f)
402 {
403 	tcf_exts_destroy(&f->exts);
404 	tcf_exts_put_net(&f->exts);
405 	kfree(f);
406 }
407 
408 static void fl_destroy_filter_work(struct work_struct *work)
409 {
410 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
411 					struct cls_fl_filter, rwork);
412 
413 	__fl_destroy_filter(f);
414 }
415 
416 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
417 				 bool rtnl_held, struct netlink_ext_ack *extack)
418 {
419 	struct tcf_block *block = tp->chain->block;
420 	struct flow_cls_offload cls_flower = {};
421 
422 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
423 	cls_flower.command = FLOW_CLS_DESTROY;
424 	cls_flower.cookie = (unsigned long) f;
425 
426 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
427 			    &f->flags, &f->in_hw_count, rtnl_held);
428 
429 }
430 
431 static int fl_hw_replace_filter(struct tcf_proto *tp,
432 				struct cls_fl_filter *f, bool rtnl_held,
433 				struct netlink_ext_ack *extack)
434 {
435 	struct tcf_block *block = tp->chain->block;
436 	struct flow_cls_offload cls_flower = {};
437 	bool skip_sw = tc_skip_sw(f->flags);
438 	int err = 0;
439 
440 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
441 	if (!cls_flower.rule)
442 		return -ENOMEM;
443 
444 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
445 	cls_flower.command = FLOW_CLS_REPLACE;
446 	cls_flower.cookie = (unsigned long) f;
447 	cls_flower.rule->match.dissector = &f->mask->dissector;
448 	cls_flower.rule->match.mask = &f->mask->key;
449 	cls_flower.rule->match.key = &f->mkey;
450 	cls_flower.classid = f->res.classid;
451 
452 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
453 				   rtnl_held);
454 	if (err) {
455 		kfree(cls_flower.rule);
456 		if (skip_sw) {
457 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
458 			return err;
459 		}
460 		return 0;
461 	}
462 
463 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
464 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
465 	tc_cleanup_flow_action(&cls_flower.rule->action);
466 	kfree(cls_flower.rule);
467 
468 	if (err) {
469 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
470 		return err;
471 	}
472 
473 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
474 		return -EINVAL;
475 
476 	return 0;
477 }
478 
479 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
480 			       bool rtnl_held)
481 {
482 	struct tcf_block *block = tp->chain->block;
483 	struct flow_cls_offload cls_flower = {};
484 
485 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
486 	cls_flower.command = FLOW_CLS_STATS;
487 	cls_flower.cookie = (unsigned long) f;
488 	cls_flower.classid = f->res.classid;
489 
490 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
491 			 rtnl_held);
492 
493 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
494 			      cls_flower.stats.pkts,
495 			      cls_flower.stats.lastused);
496 }
497 
498 static void __fl_put(struct cls_fl_filter *f)
499 {
500 	if (!refcount_dec_and_test(&f->refcnt))
501 		return;
502 
503 	if (tcf_exts_get_net(&f->exts))
504 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
505 	else
506 		__fl_destroy_filter(f);
507 }
508 
509 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
510 {
511 	struct cls_fl_filter *f;
512 
513 	rcu_read_lock();
514 	f = idr_find(&head->handle_idr, handle);
515 	if (f && !refcount_inc_not_zero(&f->refcnt))
516 		f = NULL;
517 	rcu_read_unlock();
518 
519 	return f;
520 }
521 
522 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
523 		       bool *last, bool rtnl_held,
524 		       struct netlink_ext_ack *extack)
525 {
526 	struct cls_fl_head *head = fl_head_dereference(tp);
527 
528 	*last = false;
529 
530 	spin_lock(&tp->lock);
531 	if (f->deleted) {
532 		spin_unlock(&tp->lock);
533 		return -ENOENT;
534 	}
535 
536 	f->deleted = true;
537 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
538 			       f->mask->filter_ht_params);
539 	idr_remove(&head->handle_idr, f->handle);
540 	list_del_rcu(&f->list);
541 	spin_unlock(&tp->lock);
542 
543 	*last = fl_mask_put(head, f->mask);
544 	if (!tc_skip_hw(f->flags))
545 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
546 	tcf_unbind_filter(tp, &f->res);
547 	__fl_put(f);
548 
549 	return 0;
550 }
551 
552 static void fl_destroy_sleepable(struct work_struct *work)
553 {
554 	struct cls_fl_head *head = container_of(to_rcu_work(work),
555 						struct cls_fl_head,
556 						rwork);
557 
558 	rhashtable_destroy(&head->ht);
559 	kfree(head);
560 	module_put(THIS_MODULE);
561 }
562 
563 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
564 		       struct netlink_ext_ack *extack)
565 {
566 	struct cls_fl_head *head = fl_head_dereference(tp);
567 	struct fl_flow_mask *mask, *next_mask;
568 	struct cls_fl_filter *f, *next;
569 	bool last;
570 
571 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
572 		list_for_each_entry_safe(f, next, &mask->filters, list) {
573 			__fl_delete(tp, f, &last, rtnl_held, extack);
574 			if (last)
575 				break;
576 		}
577 	}
578 	idr_destroy(&head->handle_idr);
579 
580 	__module_get(THIS_MODULE);
581 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
582 }
583 
584 static void fl_put(struct tcf_proto *tp, void *arg)
585 {
586 	struct cls_fl_filter *f = arg;
587 
588 	__fl_put(f);
589 }
590 
591 static void *fl_get(struct tcf_proto *tp, u32 handle)
592 {
593 	struct cls_fl_head *head = fl_head_dereference(tp);
594 
595 	return __fl_get(head, handle);
596 }
597 
598 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
599 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
600 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
601 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
602 					    .len = IFNAMSIZ },
603 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
604 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
605 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
606 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
607 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
608 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
609 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
610 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
611 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
612 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
613 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
614 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
615 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
616 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
617 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
618 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
619 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
620 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
621 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
622 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
623 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
624 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
628 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
629 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
632 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
633 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
641 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
642 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
643 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
646 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
647 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
648 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
649 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
650 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
651 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
652 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
653 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
654 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
655 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
656 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
657 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
658 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
659 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
660 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
662 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
663 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
664 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
665 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
669 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
670 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
671 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
672 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
673 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
676 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
678 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
679 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
681 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
683 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
684 	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
685 	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
687 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
688 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
689 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
690 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
691 					    .len = 128 / BITS_PER_BYTE },
692 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
693 					    .len = 128 / BITS_PER_BYTE },
694 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
695 };
696 
697 static const struct nla_policy
698 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
699 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
700 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
701 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
702 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
703 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
704 };
705 
706 static const struct nla_policy
707 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
708 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
709 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
710 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
711 						       .len = 128 },
712 };
713 
714 static const struct nla_policy
715 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
716 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
717 };
718 
719 static const struct nla_policy
720 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
721 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
722 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
723 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
724 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
725 };
726 
727 static void fl_set_key_val(struct nlattr **tb,
728 			   void *val, int val_type,
729 			   void *mask, int mask_type, int len)
730 {
731 	if (!tb[val_type])
732 		return;
733 	nla_memcpy(val, tb[val_type], len);
734 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
735 		memset(mask, 0xff, len);
736 	else
737 		nla_memcpy(mask, tb[mask_type], len);
738 }
739 
740 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
741 				 struct fl_flow_key *mask)
742 {
743 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
744 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
745 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
746 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
747 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
748 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
749 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
750 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
751 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
752 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
753 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
754 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
755 
756 	if ((mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
757 	     htons(key->tp_range.tp_max.dst) <=
758 		 htons(key->tp_range.tp_min.dst)) ||
759 	    (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
760 	     htons(key->tp_range.tp_max.src) <=
761 		 htons(key->tp_range.tp_min.src)))
762 		return -EINVAL;
763 
764 	return 0;
765 }
766 
767 static int fl_set_key_mpls(struct nlattr **tb,
768 			   struct flow_dissector_key_mpls *key_val,
769 			   struct flow_dissector_key_mpls *key_mask)
770 {
771 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
772 		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
773 		key_mask->mpls_ttl = MPLS_TTL_MASK;
774 	}
775 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
776 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
777 
778 		if (bos & ~MPLS_BOS_MASK)
779 			return -EINVAL;
780 		key_val->mpls_bos = bos;
781 		key_mask->mpls_bos = MPLS_BOS_MASK;
782 	}
783 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
784 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
785 
786 		if (tc & ~MPLS_TC_MASK)
787 			return -EINVAL;
788 		key_val->mpls_tc = tc;
789 		key_mask->mpls_tc = MPLS_TC_MASK;
790 	}
791 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
792 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
793 
794 		if (label & ~MPLS_LABEL_MASK)
795 			return -EINVAL;
796 		key_val->mpls_label = label;
797 		key_mask->mpls_label = MPLS_LABEL_MASK;
798 	}
799 	return 0;
800 }
801 
802 static void fl_set_key_vlan(struct nlattr **tb,
803 			    __be16 ethertype,
804 			    int vlan_id_key, int vlan_prio_key,
805 			    struct flow_dissector_key_vlan *key_val,
806 			    struct flow_dissector_key_vlan *key_mask)
807 {
808 #define VLAN_PRIORITY_MASK	0x7
809 
810 	if (tb[vlan_id_key]) {
811 		key_val->vlan_id =
812 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
813 		key_mask->vlan_id = VLAN_VID_MASK;
814 	}
815 	if (tb[vlan_prio_key]) {
816 		key_val->vlan_priority =
817 			nla_get_u8(tb[vlan_prio_key]) &
818 			VLAN_PRIORITY_MASK;
819 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
820 	}
821 	key_val->vlan_tpid = ethertype;
822 	key_mask->vlan_tpid = cpu_to_be16(~0);
823 }
824 
825 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
826 			    u32 *dissector_key, u32 *dissector_mask,
827 			    u32 flower_flag_bit, u32 dissector_flag_bit)
828 {
829 	if (flower_mask & flower_flag_bit) {
830 		*dissector_mask |= dissector_flag_bit;
831 		if (flower_key & flower_flag_bit)
832 			*dissector_key |= dissector_flag_bit;
833 	}
834 }
835 
836 static int fl_set_key_flags(struct nlattr **tb,
837 			    u32 *flags_key, u32 *flags_mask)
838 {
839 	u32 key, mask;
840 
841 	/* mask is mandatory for flags */
842 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
843 		return -EINVAL;
844 
845 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
846 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
847 
848 	*flags_key  = 0;
849 	*flags_mask = 0;
850 
851 	fl_set_key_flag(key, mask, flags_key, flags_mask,
852 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
853 	fl_set_key_flag(key, mask, flags_key, flags_mask,
854 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
855 			FLOW_DIS_FIRST_FRAG);
856 
857 	return 0;
858 }
859 
860 static void fl_set_key_ip(struct nlattr **tb, bool encap,
861 			  struct flow_dissector_key_ip *key,
862 			  struct flow_dissector_key_ip *mask)
863 {
864 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
865 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
866 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
867 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
868 
869 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
870 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
871 }
872 
873 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
874 			     int depth, int option_len,
875 			     struct netlink_ext_ack *extack)
876 {
877 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
878 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
879 	struct geneve_opt *opt;
880 	int err, data_len = 0;
881 
882 	if (option_len > sizeof(struct geneve_opt))
883 		data_len = option_len - sizeof(struct geneve_opt);
884 
885 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
886 	memset(opt, 0xff, option_len);
887 	opt->length = data_len / 4;
888 	opt->r1 = 0;
889 	opt->r2 = 0;
890 	opt->r3 = 0;
891 
892 	/* If no mask has been prodived we assume an exact match. */
893 	if (!depth)
894 		return sizeof(struct geneve_opt) + data_len;
895 
896 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
897 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
898 		return -EINVAL;
899 	}
900 
901 	err = nla_parse_nested_deprecated(tb,
902 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
903 					  nla, geneve_opt_policy, extack);
904 	if (err < 0)
905 		return err;
906 
907 	/* We are not allowed to omit any of CLASS, TYPE or DATA
908 	 * fields from the key.
909 	 */
910 	if (!option_len &&
911 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
912 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
913 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
914 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
915 		return -EINVAL;
916 	}
917 
918 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
919 	 * for the mask.
920 	 */
921 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
922 		int new_len = key->enc_opts.len;
923 
924 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
925 		data_len = nla_len(data);
926 		if (data_len < 4) {
927 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
928 			return -ERANGE;
929 		}
930 		if (data_len % 4) {
931 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
932 			return -ERANGE;
933 		}
934 
935 		new_len += sizeof(struct geneve_opt) + data_len;
936 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
937 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
938 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
939 			return -ERANGE;
940 		}
941 		opt->length = data_len / 4;
942 		memcpy(opt->opt_data, nla_data(data), data_len);
943 	}
944 
945 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
946 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
947 		opt->opt_class = nla_get_be16(class);
948 	}
949 
950 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
951 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
952 		opt->type = nla_get_u8(type);
953 	}
954 
955 	return sizeof(struct geneve_opt) + data_len;
956 }
957 
958 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
959 			    int depth, int option_len,
960 			    struct netlink_ext_ack *extack)
961 {
962 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
963 	struct vxlan_metadata *md;
964 	int err;
965 
966 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
967 	memset(md, 0xff, sizeof(*md));
968 
969 	if (!depth)
970 		return sizeof(*md);
971 
972 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
973 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
974 		return -EINVAL;
975 	}
976 
977 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
978 			       vxlan_opt_policy, extack);
979 	if (err < 0)
980 		return err;
981 
982 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
983 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
984 		return -EINVAL;
985 	}
986 
987 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
988 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
989 
990 	return sizeof(*md);
991 }
992 
993 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
994 			     int depth, int option_len,
995 			     struct netlink_ext_ack *extack)
996 {
997 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
998 	struct erspan_metadata *md;
999 	int err;
1000 
1001 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1002 	memset(md, 0xff, sizeof(*md));
1003 	md->version = 1;
1004 
1005 	if (!depth)
1006 		return sizeof(*md);
1007 
1008 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1009 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1010 		return -EINVAL;
1011 	}
1012 
1013 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1014 			       erspan_opt_policy, extack);
1015 	if (err < 0)
1016 		return err;
1017 
1018 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1019 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1020 		return -EINVAL;
1021 	}
1022 
1023 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1024 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1025 
1026 	if (md->version == 1) {
1027 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1028 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1029 			return -EINVAL;
1030 		}
1031 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1032 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1033 			md->u.index = nla_get_be32(nla);
1034 		}
1035 	} else if (md->version == 2) {
1036 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1037 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1038 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1039 			return -EINVAL;
1040 		}
1041 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1042 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1043 			md->u.md2.dir = nla_get_u8(nla);
1044 		}
1045 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1046 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1047 			set_hwid(&md->u.md2, nla_get_u8(nla));
1048 		}
1049 	} else {
1050 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1051 		return -EINVAL;
1052 	}
1053 
1054 	return sizeof(*md);
1055 }
1056 
1057 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1058 			  struct fl_flow_key *mask,
1059 			  struct netlink_ext_ack *extack)
1060 {
1061 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1062 	int err, option_len, key_depth, msk_depth = 0;
1063 
1064 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1065 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1066 					     enc_opts_policy, extack);
1067 	if (err)
1068 		return err;
1069 
1070 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1071 
1072 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1073 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1074 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1075 						     enc_opts_policy, extack);
1076 		if (err)
1077 			return err;
1078 
1079 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1080 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1081 	}
1082 
1083 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1084 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1085 		switch (nla_type(nla_opt_key)) {
1086 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1087 			if (key->enc_opts.dst_opt_type &&
1088 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1089 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1090 				return -EINVAL;
1091 			}
1092 			option_len = 0;
1093 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1094 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1095 						       key_depth, option_len,
1096 						       extack);
1097 			if (option_len < 0)
1098 				return option_len;
1099 
1100 			key->enc_opts.len += option_len;
1101 			/* At the same time we need to parse through the mask
1102 			 * in order to verify exact and mask attribute lengths.
1103 			 */
1104 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1105 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1106 						       msk_depth, option_len,
1107 						       extack);
1108 			if (option_len < 0)
1109 				return option_len;
1110 
1111 			mask->enc_opts.len += option_len;
1112 			if (key->enc_opts.len != mask->enc_opts.len) {
1113 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1114 				return -EINVAL;
1115 			}
1116 
1117 			if (msk_depth)
1118 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1119 			break;
1120 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1121 			if (key->enc_opts.dst_opt_type) {
1122 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1123 				return -EINVAL;
1124 			}
1125 			option_len = 0;
1126 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1127 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1128 						      key_depth, option_len,
1129 						      extack);
1130 			if (option_len < 0)
1131 				return option_len;
1132 
1133 			key->enc_opts.len += option_len;
1134 			/* At the same time we need to parse through the mask
1135 			 * in order to verify exact and mask attribute lengths.
1136 			 */
1137 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1138 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1139 						      msk_depth, option_len,
1140 						      extack);
1141 			if (option_len < 0)
1142 				return option_len;
1143 
1144 			mask->enc_opts.len += option_len;
1145 			if (key->enc_opts.len != mask->enc_opts.len) {
1146 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1147 				return -EINVAL;
1148 			}
1149 
1150 			if (msk_depth)
1151 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1152 			break;
1153 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1154 			if (key->enc_opts.dst_opt_type) {
1155 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1156 				return -EINVAL;
1157 			}
1158 			option_len = 0;
1159 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1160 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1161 						       key_depth, option_len,
1162 						       extack);
1163 			if (option_len < 0)
1164 				return option_len;
1165 
1166 			key->enc_opts.len += option_len;
1167 			/* At the same time we need to parse through the mask
1168 			 * in order to verify exact and mask attribute lengths.
1169 			 */
1170 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1171 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1172 						       msk_depth, option_len,
1173 						       extack);
1174 			if (option_len < 0)
1175 				return option_len;
1176 
1177 			mask->enc_opts.len += option_len;
1178 			if (key->enc_opts.len != mask->enc_opts.len) {
1179 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1180 				return -EINVAL;
1181 			}
1182 
1183 			if (msk_depth)
1184 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1185 			break;
1186 		default:
1187 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1188 			return -EINVAL;
1189 		}
1190 	}
1191 
1192 	return 0;
1193 }
1194 
1195 static int fl_set_key_ct(struct nlattr **tb,
1196 			 struct flow_dissector_key_ct *key,
1197 			 struct flow_dissector_key_ct *mask,
1198 			 struct netlink_ext_ack *extack)
1199 {
1200 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1201 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1202 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1203 			return -EOPNOTSUPP;
1204 		}
1205 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1206 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1207 			       sizeof(key->ct_state));
1208 	}
1209 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1210 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1211 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1212 			return -EOPNOTSUPP;
1213 		}
1214 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1215 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1216 			       sizeof(key->ct_zone));
1217 	}
1218 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1219 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1220 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1221 			return -EOPNOTSUPP;
1222 		}
1223 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1224 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1225 			       sizeof(key->ct_mark));
1226 	}
1227 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1228 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1229 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1230 			return -EOPNOTSUPP;
1231 		}
1232 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1233 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1234 			       sizeof(key->ct_labels));
1235 	}
1236 
1237 	return 0;
1238 }
1239 
1240 static int fl_set_key(struct net *net, struct nlattr **tb,
1241 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1242 		      struct netlink_ext_ack *extack)
1243 {
1244 	__be16 ethertype;
1245 	int ret = 0;
1246 
1247 	if (tb[TCA_FLOWER_INDEV]) {
1248 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1249 		if (err < 0)
1250 			return err;
1251 		key->meta.ingress_ifindex = err;
1252 		mask->meta.ingress_ifindex = 0xffffffff;
1253 	}
1254 
1255 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1256 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1257 		       sizeof(key->eth.dst));
1258 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1259 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1260 		       sizeof(key->eth.src));
1261 
1262 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1263 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1264 
1265 		if (eth_type_vlan(ethertype)) {
1266 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1267 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1268 					&mask->vlan);
1269 
1270 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1271 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1272 				if (eth_type_vlan(ethertype)) {
1273 					fl_set_key_vlan(tb, ethertype,
1274 							TCA_FLOWER_KEY_CVLAN_ID,
1275 							TCA_FLOWER_KEY_CVLAN_PRIO,
1276 							&key->cvlan, &mask->cvlan);
1277 					fl_set_key_val(tb, &key->basic.n_proto,
1278 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1279 						       &mask->basic.n_proto,
1280 						       TCA_FLOWER_UNSPEC,
1281 						       sizeof(key->basic.n_proto));
1282 				} else {
1283 					key->basic.n_proto = ethertype;
1284 					mask->basic.n_proto = cpu_to_be16(~0);
1285 				}
1286 			}
1287 		} else {
1288 			key->basic.n_proto = ethertype;
1289 			mask->basic.n_proto = cpu_to_be16(~0);
1290 		}
1291 	}
1292 
1293 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1294 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1295 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1296 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1297 			       sizeof(key->basic.ip_proto));
1298 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1299 	}
1300 
1301 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1302 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1303 		mask->control.addr_type = ~0;
1304 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1305 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1306 			       sizeof(key->ipv4.src));
1307 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1308 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1309 			       sizeof(key->ipv4.dst));
1310 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1311 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1312 		mask->control.addr_type = ~0;
1313 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1314 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1315 			       sizeof(key->ipv6.src));
1316 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1317 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1318 			       sizeof(key->ipv6.dst));
1319 	}
1320 
1321 	if (key->basic.ip_proto == IPPROTO_TCP) {
1322 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1323 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1324 			       sizeof(key->tp.src));
1325 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1326 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1327 			       sizeof(key->tp.dst));
1328 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1329 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1330 			       sizeof(key->tcp.flags));
1331 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1332 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1333 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1334 			       sizeof(key->tp.src));
1335 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1336 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1337 			       sizeof(key->tp.dst));
1338 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1339 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1340 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1341 			       sizeof(key->tp.src));
1342 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1343 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1344 			       sizeof(key->tp.dst));
1345 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1346 		   key->basic.ip_proto == IPPROTO_ICMP) {
1347 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1348 			       &mask->icmp.type,
1349 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1350 			       sizeof(key->icmp.type));
1351 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1352 			       &mask->icmp.code,
1353 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1354 			       sizeof(key->icmp.code));
1355 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1356 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1357 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1358 			       &mask->icmp.type,
1359 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1360 			       sizeof(key->icmp.type));
1361 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1362 			       &mask->icmp.code,
1363 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1364 			       sizeof(key->icmp.code));
1365 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1366 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1367 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1368 		if (ret)
1369 			return ret;
1370 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1371 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1372 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1373 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1374 			       sizeof(key->arp.sip));
1375 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1376 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1377 			       sizeof(key->arp.tip));
1378 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1379 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1380 			       sizeof(key->arp.op));
1381 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1382 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1383 			       sizeof(key->arp.sha));
1384 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1385 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1386 			       sizeof(key->arp.tha));
1387 	}
1388 
1389 	if (key->basic.ip_proto == IPPROTO_TCP ||
1390 	    key->basic.ip_proto == IPPROTO_UDP ||
1391 	    key->basic.ip_proto == IPPROTO_SCTP) {
1392 		ret = fl_set_key_port_range(tb, key, mask);
1393 		if (ret)
1394 			return ret;
1395 	}
1396 
1397 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1398 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1399 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1400 		mask->enc_control.addr_type = ~0;
1401 		fl_set_key_val(tb, &key->enc_ipv4.src,
1402 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1403 			       &mask->enc_ipv4.src,
1404 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1405 			       sizeof(key->enc_ipv4.src));
1406 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1407 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1408 			       &mask->enc_ipv4.dst,
1409 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1410 			       sizeof(key->enc_ipv4.dst));
1411 	}
1412 
1413 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1414 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1415 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1416 		mask->enc_control.addr_type = ~0;
1417 		fl_set_key_val(tb, &key->enc_ipv6.src,
1418 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1419 			       &mask->enc_ipv6.src,
1420 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1421 			       sizeof(key->enc_ipv6.src));
1422 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1423 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1424 			       &mask->enc_ipv6.dst,
1425 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1426 			       sizeof(key->enc_ipv6.dst));
1427 	}
1428 
1429 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1430 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1431 		       sizeof(key->enc_key_id.keyid));
1432 
1433 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1434 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1435 		       sizeof(key->enc_tp.src));
1436 
1437 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1438 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1439 		       sizeof(key->enc_tp.dst));
1440 
1441 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1442 
1443 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1444 		ret = fl_set_enc_opt(tb, key, mask, extack);
1445 		if (ret)
1446 			return ret;
1447 	}
1448 
1449 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1450 	if (ret)
1451 		return ret;
1452 
1453 	if (tb[TCA_FLOWER_KEY_FLAGS])
1454 		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1455 
1456 	return ret;
1457 }
1458 
1459 static void fl_mask_copy(struct fl_flow_mask *dst,
1460 			 struct fl_flow_mask *src)
1461 {
1462 	const void *psrc = fl_key_get_start(&src->key, src);
1463 	void *pdst = fl_key_get_start(&dst->key, src);
1464 
1465 	memcpy(pdst, psrc, fl_mask_range(src));
1466 	dst->range = src->range;
1467 }
1468 
1469 static const struct rhashtable_params fl_ht_params = {
1470 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1471 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1472 	.automatic_shrinking = true,
1473 };
1474 
1475 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1476 {
1477 	mask->filter_ht_params = fl_ht_params;
1478 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1479 	mask->filter_ht_params.key_offset += mask->range.start;
1480 
1481 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1482 }
1483 
1484 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1485 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1486 
1487 #define FL_KEY_IS_MASKED(mask, member)						\
1488 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1489 		   0, FL_KEY_MEMBER_SIZE(member))				\
1490 
1491 #define FL_KEY_SET(keys, cnt, id, member)					\
1492 	do {									\
1493 		keys[cnt].key_id = id;						\
1494 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1495 		cnt++;								\
1496 	} while(0);
1497 
1498 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1499 	do {									\
1500 		if (FL_KEY_IS_MASKED(mask, member))				\
1501 			FL_KEY_SET(keys, cnt, id, member);			\
1502 	} while(0);
1503 
1504 static void fl_init_dissector(struct flow_dissector *dissector,
1505 			      struct fl_flow_key *mask)
1506 {
1507 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1508 	size_t cnt = 0;
1509 
1510 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1511 			     FLOW_DISSECTOR_KEY_META, meta);
1512 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1513 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1514 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1515 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1516 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1517 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1518 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1519 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1520 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1521 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1522 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1523 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1524 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1525 			     FLOW_DISSECTOR_KEY_IP, ip);
1526 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1527 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1528 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1529 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1530 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1531 			     FLOW_DISSECTOR_KEY_ARP, arp);
1532 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1533 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1534 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1535 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1536 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1537 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1538 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1539 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1540 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1541 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1542 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1543 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1544 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1545 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1546 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1547 			   enc_control);
1548 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1549 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1550 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1551 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1552 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1553 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1554 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1555 			     FLOW_DISSECTOR_KEY_CT, ct);
1556 
1557 	skb_flow_dissector_init(dissector, keys, cnt);
1558 }
1559 
1560 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1561 					       struct fl_flow_mask *mask)
1562 {
1563 	struct fl_flow_mask *newmask;
1564 	int err;
1565 
1566 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1567 	if (!newmask)
1568 		return ERR_PTR(-ENOMEM);
1569 
1570 	fl_mask_copy(newmask, mask);
1571 
1572 	if ((newmask->key.tp_range.tp_min.dst &&
1573 	     newmask->key.tp_range.tp_max.dst) ||
1574 	    (newmask->key.tp_range.tp_min.src &&
1575 	     newmask->key.tp_range.tp_max.src))
1576 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1577 
1578 	err = fl_init_mask_hashtable(newmask);
1579 	if (err)
1580 		goto errout_free;
1581 
1582 	fl_init_dissector(&newmask->dissector, &newmask->key);
1583 
1584 	INIT_LIST_HEAD_RCU(&newmask->filters);
1585 
1586 	refcount_set(&newmask->refcnt, 1);
1587 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1588 				      &newmask->ht_node, mask_ht_params);
1589 	if (err)
1590 		goto errout_destroy;
1591 
1592 	spin_lock(&head->masks_lock);
1593 	list_add_tail_rcu(&newmask->list, &head->masks);
1594 	spin_unlock(&head->masks_lock);
1595 
1596 	return newmask;
1597 
1598 errout_destroy:
1599 	rhashtable_destroy(&newmask->ht);
1600 errout_free:
1601 	kfree(newmask);
1602 
1603 	return ERR_PTR(err);
1604 }
1605 
1606 static int fl_check_assign_mask(struct cls_fl_head *head,
1607 				struct cls_fl_filter *fnew,
1608 				struct cls_fl_filter *fold,
1609 				struct fl_flow_mask *mask)
1610 {
1611 	struct fl_flow_mask *newmask;
1612 	int ret = 0;
1613 
1614 	rcu_read_lock();
1615 
1616 	/* Insert mask as temporary node to prevent concurrent creation of mask
1617 	 * with same key. Any concurrent lookups with same key will return
1618 	 * -EAGAIN because mask's refcnt is zero.
1619 	 */
1620 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1621 						       &mask->ht_node,
1622 						       mask_ht_params);
1623 	if (!fnew->mask) {
1624 		rcu_read_unlock();
1625 
1626 		if (fold) {
1627 			ret = -EINVAL;
1628 			goto errout_cleanup;
1629 		}
1630 
1631 		newmask = fl_create_new_mask(head, mask);
1632 		if (IS_ERR(newmask)) {
1633 			ret = PTR_ERR(newmask);
1634 			goto errout_cleanup;
1635 		}
1636 
1637 		fnew->mask = newmask;
1638 		return 0;
1639 	} else if (IS_ERR(fnew->mask)) {
1640 		ret = PTR_ERR(fnew->mask);
1641 	} else if (fold && fold->mask != fnew->mask) {
1642 		ret = -EINVAL;
1643 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1644 		/* Mask was deleted concurrently, try again */
1645 		ret = -EAGAIN;
1646 	}
1647 	rcu_read_unlock();
1648 	return ret;
1649 
1650 errout_cleanup:
1651 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1652 			       mask_ht_params);
1653 	return ret;
1654 }
1655 
1656 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1657 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1658 			unsigned long base, struct nlattr **tb,
1659 			struct nlattr *est, bool ovr,
1660 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1661 			struct netlink_ext_ack *extack)
1662 {
1663 	int err;
1664 
1665 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1666 				extack);
1667 	if (err < 0)
1668 		return err;
1669 
1670 	if (tb[TCA_FLOWER_CLASSID]) {
1671 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1672 		if (!rtnl_held)
1673 			rtnl_lock();
1674 		tcf_bind_filter(tp, &f->res, base);
1675 		if (!rtnl_held)
1676 			rtnl_unlock();
1677 	}
1678 
1679 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1680 	if (err)
1681 		return err;
1682 
1683 	fl_mask_update_range(mask);
1684 	fl_set_masked_key(&f->mkey, &f->key, mask);
1685 
1686 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1687 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1688 		return -EINVAL;
1689 	}
1690 
1691 	return 0;
1692 }
1693 
1694 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1695 			       struct cls_fl_filter *fold,
1696 			       bool *in_ht)
1697 {
1698 	struct fl_flow_mask *mask = fnew->mask;
1699 	int err;
1700 
1701 	err = rhashtable_lookup_insert_fast(&mask->ht,
1702 					    &fnew->ht_node,
1703 					    mask->filter_ht_params);
1704 	if (err) {
1705 		*in_ht = false;
1706 		/* It is okay if filter with same key exists when
1707 		 * overwriting.
1708 		 */
1709 		return fold && err == -EEXIST ? 0 : err;
1710 	}
1711 
1712 	*in_ht = true;
1713 	return 0;
1714 }
1715 
1716 static int fl_change(struct net *net, struct sk_buff *in_skb,
1717 		     struct tcf_proto *tp, unsigned long base,
1718 		     u32 handle, struct nlattr **tca,
1719 		     void **arg, bool ovr, bool rtnl_held,
1720 		     struct netlink_ext_ack *extack)
1721 {
1722 	struct cls_fl_head *head = fl_head_dereference(tp);
1723 	struct cls_fl_filter *fold = *arg;
1724 	struct cls_fl_filter *fnew;
1725 	struct fl_flow_mask *mask;
1726 	struct nlattr **tb;
1727 	bool in_ht;
1728 	int err;
1729 
1730 	if (!tca[TCA_OPTIONS]) {
1731 		err = -EINVAL;
1732 		goto errout_fold;
1733 	}
1734 
1735 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1736 	if (!mask) {
1737 		err = -ENOBUFS;
1738 		goto errout_fold;
1739 	}
1740 
1741 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1742 	if (!tb) {
1743 		err = -ENOBUFS;
1744 		goto errout_mask_alloc;
1745 	}
1746 
1747 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1748 					  tca[TCA_OPTIONS], fl_policy, NULL);
1749 	if (err < 0)
1750 		goto errout_tb;
1751 
1752 	if (fold && handle && fold->handle != handle) {
1753 		err = -EINVAL;
1754 		goto errout_tb;
1755 	}
1756 
1757 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1758 	if (!fnew) {
1759 		err = -ENOBUFS;
1760 		goto errout_tb;
1761 	}
1762 	INIT_LIST_HEAD(&fnew->hw_list);
1763 	refcount_set(&fnew->refcnt, 1);
1764 
1765 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1766 	if (err < 0)
1767 		goto errout;
1768 
1769 	if (tb[TCA_FLOWER_FLAGS]) {
1770 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1771 
1772 		if (!tc_flags_valid(fnew->flags)) {
1773 			err = -EINVAL;
1774 			goto errout;
1775 		}
1776 	}
1777 
1778 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1779 			   tp->chain->tmplt_priv, rtnl_held, extack);
1780 	if (err)
1781 		goto errout;
1782 
1783 	err = fl_check_assign_mask(head, fnew, fold, mask);
1784 	if (err)
1785 		goto errout;
1786 
1787 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1788 	if (err)
1789 		goto errout_mask;
1790 
1791 	if (!tc_skip_hw(fnew->flags)) {
1792 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1793 		if (err)
1794 			goto errout_ht;
1795 	}
1796 
1797 	if (!tc_in_hw(fnew->flags))
1798 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1799 
1800 	spin_lock(&tp->lock);
1801 
1802 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1803 	 * proto again or create new one, if necessary.
1804 	 */
1805 	if (tp->deleting) {
1806 		err = -EAGAIN;
1807 		goto errout_hw;
1808 	}
1809 
1810 	if (fold) {
1811 		/* Fold filter was deleted concurrently. Retry lookup. */
1812 		if (fold->deleted) {
1813 			err = -EAGAIN;
1814 			goto errout_hw;
1815 		}
1816 
1817 		fnew->handle = handle;
1818 
1819 		if (!in_ht) {
1820 			struct rhashtable_params params =
1821 				fnew->mask->filter_ht_params;
1822 
1823 			err = rhashtable_insert_fast(&fnew->mask->ht,
1824 						     &fnew->ht_node,
1825 						     params);
1826 			if (err)
1827 				goto errout_hw;
1828 			in_ht = true;
1829 		}
1830 
1831 		refcount_inc(&fnew->refcnt);
1832 		rhashtable_remove_fast(&fold->mask->ht,
1833 				       &fold->ht_node,
1834 				       fold->mask->filter_ht_params);
1835 		idr_replace(&head->handle_idr, fnew, fnew->handle);
1836 		list_replace_rcu(&fold->list, &fnew->list);
1837 		fold->deleted = true;
1838 
1839 		spin_unlock(&tp->lock);
1840 
1841 		fl_mask_put(head, fold->mask);
1842 		if (!tc_skip_hw(fold->flags))
1843 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1844 		tcf_unbind_filter(tp, &fold->res);
1845 		/* Caller holds reference to fold, so refcnt is always > 0
1846 		 * after this.
1847 		 */
1848 		refcount_dec(&fold->refcnt);
1849 		__fl_put(fold);
1850 	} else {
1851 		if (handle) {
1852 			/* user specifies a handle and it doesn't exist */
1853 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1854 					    handle, GFP_ATOMIC);
1855 
1856 			/* Filter with specified handle was concurrently
1857 			 * inserted after initial check in cls_api. This is not
1858 			 * necessarily an error if NLM_F_EXCL is not set in
1859 			 * message flags. Returning EAGAIN will cause cls_api to
1860 			 * try to update concurrently inserted rule.
1861 			 */
1862 			if (err == -ENOSPC)
1863 				err = -EAGAIN;
1864 		} else {
1865 			handle = 1;
1866 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1867 					    INT_MAX, GFP_ATOMIC);
1868 		}
1869 		if (err)
1870 			goto errout_hw;
1871 
1872 		refcount_inc(&fnew->refcnt);
1873 		fnew->handle = handle;
1874 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1875 		spin_unlock(&tp->lock);
1876 	}
1877 
1878 	*arg = fnew;
1879 
1880 	kfree(tb);
1881 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1882 	return 0;
1883 
1884 errout_ht:
1885 	spin_lock(&tp->lock);
1886 errout_hw:
1887 	fnew->deleted = true;
1888 	spin_unlock(&tp->lock);
1889 	if (!tc_skip_hw(fnew->flags))
1890 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1891 	if (in_ht)
1892 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1893 				       fnew->mask->filter_ht_params);
1894 errout_mask:
1895 	fl_mask_put(head, fnew->mask);
1896 errout:
1897 	__fl_put(fnew);
1898 errout_tb:
1899 	kfree(tb);
1900 errout_mask_alloc:
1901 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1902 errout_fold:
1903 	if (fold)
1904 		__fl_put(fold);
1905 	return err;
1906 }
1907 
1908 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1909 		     bool rtnl_held, struct netlink_ext_ack *extack)
1910 {
1911 	struct cls_fl_head *head = fl_head_dereference(tp);
1912 	struct cls_fl_filter *f = arg;
1913 	bool last_on_mask;
1914 	int err = 0;
1915 
1916 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1917 	*last = list_empty(&head->masks);
1918 	__fl_put(f);
1919 
1920 	return err;
1921 }
1922 
1923 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1924 		    bool rtnl_held)
1925 {
1926 	struct cls_fl_head *head = fl_head_dereference(tp);
1927 	unsigned long id = arg->cookie, tmp;
1928 	struct cls_fl_filter *f;
1929 
1930 	arg->count = arg->skip;
1931 
1932 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1933 		/* don't return filters that are being deleted */
1934 		if (!refcount_inc_not_zero(&f->refcnt))
1935 			continue;
1936 		if (arg->fn(tp, f, arg) < 0) {
1937 			__fl_put(f);
1938 			arg->stop = 1;
1939 			break;
1940 		}
1941 		__fl_put(f);
1942 		arg->count++;
1943 	}
1944 	arg->cookie = id;
1945 }
1946 
1947 static struct cls_fl_filter *
1948 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1949 {
1950 	struct cls_fl_head *head = fl_head_dereference(tp);
1951 
1952 	spin_lock(&tp->lock);
1953 	if (list_empty(&head->hw_filters)) {
1954 		spin_unlock(&tp->lock);
1955 		return NULL;
1956 	}
1957 
1958 	if (!f)
1959 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
1960 			       hw_list);
1961 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1962 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1963 			spin_unlock(&tp->lock);
1964 			return f;
1965 		}
1966 	}
1967 
1968 	spin_unlock(&tp->lock);
1969 	return NULL;
1970 }
1971 
1972 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1973 			void *cb_priv, struct netlink_ext_ack *extack)
1974 {
1975 	struct tcf_block *block = tp->chain->block;
1976 	struct flow_cls_offload cls_flower = {};
1977 	struct cls_fl_filter *f = NULL;
1978 	int err;
1979 
1980 	/* hw_filters list can only be changed by hw offload functions after
1981 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1982 	 * iterating it.
1983 	 */
1984 	ASSERT_RTNL();
1985 
1986 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
1987 		cls_flower.rule =
1988 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1989 		if (!cls_flower.rule) {
1990 			__fl_put(f);
1991 			return -ENOMEM;
1992 		}
1993 
1994 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1995 					   extack);
1996 		cls_flower.command = add ?
1997 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
1998 		cls_flower.cookie = (unsigned long)f;
1999 		cls_flower.rule->match.dissector = &f->mask->dissector;
2000 		cls_flower.rule->match.mask = &f->mask->key;
2001 		cls_flower.rule->match.key = &f->mkey;
2002 
2003 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
2004 					   true);
2005 		if (err) {
2006 			kfree(cls_flower.rule);
2007 			if (tc_skip_sw(f->flags)) {
2008 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2009 				__fl_put(f);
2010 				return err;
2011 			}
2012 			goto next_flow;
2013 		}
2014 
2015 		cls_flower.classid = f->res.classid;
2016 
2017 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2018 					    TC_SETUP_CLSFLOWER, &cls_flower,
2019 					    cb_priv, &f->flags,
2020 					    &f->in_hw_count);
2021 		tc_cleanup_flow_action(&cls_flower.rule->action);
2022 		kfree(cls_flower.rule);
2023 
2024 		if (err) {
2025 			__fl_put(f);
2026 			return err;
2027 		}
2028 next_flow:
2029 		__fl_put(f);
2030 	}
2031 
2032 	return 0;
2033 }
2034 
2035 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2036 {
2037 	struct flow_cls_offload *cls_flower = type_data;
2038 	struct cls_fl_filter *f =
2039 		(struct cls_fl_filter *) cls_flower->cookie;
2040 	struct cls_fl_head *head = fl_head_dereference(tp);
2041 
2042 	spin_lock(&tp->lock);
2043 	list_add(&f->hw_list, &head->hw_filters);
2044 	spin_unlock(&tp->lock);
2045 }
2046 
2047 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2048 {
2049 	struct flow_cls_offload *cls_flower = type_data;
2050 	struct cls_fl_filter *f =
2051 		(struct cls_fl_filter *) cls_flower->cookie;
2052 
2053 	spin_lock(&tp->lock);
2054 	if (!list_empty(&f->hw_list))
2055 		list_del_init(&f->hw_list);
2056 	spin_unlock(&tp->lock);
2057 }
2058 
2059 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2060 			      struct fl_flow_tmplt *tmplt)
2061 {
2062 	struct flow_cls_offload cls_flower = {};
2063 	struct tcf_block *block = chain->block;
2064 
2065 	cls_flower.rule = flow_rule_alloc(0);
2066 	if (!cls_flower.rule)
2067 		return -ENOMEM;
2068 
2069 	cls_flower.common.chain_index = chain->index;
2070 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2071 	cls_flower.cookie = (unsigned long) tmplt;
2072 	cls_flower.rule->match.dissector = &tmplt->dissector;
2073 	cls_flower.rule->match.mask = &tmplt->mask;
2074 	cls_flower.rule->match.key = &tmplt->dummy_key;
2075 
2076 	/* We don't care if driver (any of them) fails to handle this
2077 	 * call. It serves just as a hint for it.
2078 	 */
2079 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2080 	kfree(cls_flower.rule);
2081 
2082 	return 0;
2083 }
2084 
2085 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2086 				struct fl_flow_tmplt *tmplt)
2087 {
2088 	struct flow_cls_offload cls_flower = {};
2089 	struct tcf_block *block = chain->block;
2090 
2091 	cls_flower.common.chain_index = chain->index;
2092 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2093 	cls_flower.cookie = (unsigned long) tmplt;
2094 
2095 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2096 }
2097 
2098 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2099 			     struct nlattr **tca,
2100 			     struct netlink_ext_ack *extack)
2101 {
2102 	struct fl_flow_tmplt *tmplt;
2103 	struct nlattr **tb;
2104 	int err;
2105 
2106 	if (!tca[TCA_OPTIONS])
2107 		return ERR_PTR(-EINVAL);
2108 
2109 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2110 	if (!tb)
2111 		return ERR_PTR(-ENOBUFS);
2112 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2113 					  tca[TCA_OPTIONS], fl_policy, NULL);
2114 	if (err)
2115 		goto errout_tb;
2116 
2117 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2118 	if (!tmplt) {
2119 		err = -ENOMEM;
2120 		goto errout_tb;
2121 	}
2122 	tmplt->chain = chain;
2123 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2124 	if (err)
2125 		goto errout_tmplt;
2126 
2127 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2128 
2129 	err = fl_hw_create_tmplt(chain, tmplt);
2130 	if (err)
2131 		goto errout_tmplt;
2132 
2133 	kfree(tb);
2134 	return tmplt;
2135 
2136 errout_tmplt:
2137 	kfree(tmplt);
2138 errout_tb:
2139 	kfree(tb);
2140 	return ERR_PTR(err);
2141 }
2142 
2143 static void fl_tmplt_destroy(void *tmplt_priv)
2144 {
2145 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2146 
2147 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2148 	kfree(tmplt);
2149 }
2150 
2151 static int fl_dump_key_val(struct sk_buff *skb,
2152 			   void *val, int val_type,
2153 			   void *mask, int mask_type, int len)
2154 {
2155 	int err;
2156 
2157 	if (!memchr_inv(mask, 0, len))
2158 		return 0;
2159 	err = nla_put(skb, val_type, len, val);
2160 	if (err)
2161 		return err;
2162 	if (mask_type != TCA_FLOWER_UNSPEC) {
2163 		err = nla_put(skb, mask_type, len, mask);
2164 		if (err)
2165 			return err;
2166 	}
2167 	return 0;
2168 }
2169 
2170 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2171 				  struct fl_flow_key *mask)
2172 {
2173 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2174 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2175 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2176 			    sizeof(key->tp_range.tp_min.dst)) ||
2177 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2178 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2179 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2180 			    sizeof(key->tp_range.tp_max.dst)) ||
2181 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2182 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2183 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2184 			    sizeof(key->tp_range.tp_min.src)) ||
2185 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2186 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2187 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2188 			    sizeof(key->tp_range.tp_max.src)))
2189 		return -1;
2190 
2191 	return 0;
2192 }
2193 
2194 static int fl_dump_key_mpls(struct sk_buff *skb,
2195 			    struct flow_dissector_key_mpls *mpls_key,
2196 			    struct flow_dissector_key_mpls *mpls_mask)
2197 {
2198 	int err;
2199 
2200 	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2201 		return 0;
2202 	if (mpls_mask->mpls_ttl) {
2203 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2204 				 mpls_key->mpls_ttl);
2205 		if (err)
2206 			return err;
2207 	}
2208 	if (mpls_mask->mpls_tc) {
2209 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2210 				 mpls_key->mpls_tc);
2211 		if (err)
2212 			return err;
2213 	}
2214 	if (mpls_mask->mpls_label) {
2215 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2216 				  mpls_key->mpls_label);
2217 		if (err)
2218 			return err;
2219 	}
2220 	if (mpls_mask->mpls_bos) {
2221 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2222 				 mpls_key->mpls_bos);
2223 		if (err)
2224 			return err;
2225 	}
2226 	return 0;
2227 }
2228 
2229 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2230 			  struct flow_dissector_key_ip *key,
2231 			  struct flow_dissector_key_ip *mask)
2232 {
2233 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2234 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2235 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2236 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2237 
2238 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2239 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2240 		return -1;
2241 
2242 	return 0;
2243 }
2244 
2245 static int fl_dump_key_vlan(struct sk_buff *skb,
2246 			    int vlan_id_key, int vlan_prio_key,
2247 			    struct flow_dissector_key_vlan *vlan_key,
2248 			    struct flow_dissector_key_vlan *vlan_mask)
2249 {
2250 	int err;
2251 
2252 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2253 		return 0;
2254 	if (vlan_mask->vlan_id) {
2255 		err = nla_put_u16(skb, vlan_id_key,
2256 				  vlan_key->vlan_id);
2257 		if (err)
2258 			return err;
2259 	}
2260 	if (vlan_mask->vlan_priority) {
2261 		err = nla_put_u8(skb, vlan_prio_key,
2262 				 vlan_key->vlan_priority);
2263 		if (err)
2264 			return err;
2265 	}
2266 	return 0;
2267 }
2268 
2269 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2270 			    u32 *flower_key, u32 *flower_mask,
2271 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2272 {
2273 	if (dissector_mask & dissector_flag_bit) {
2274 		*flower_mask |= flower_flag_bit;
2275 		if (dissector_key & dissector_flag_bit)
2276 			*flower_key |= flower_flag_bit;
2277 	}
2278 }
2279 
2280 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2281 {
2282 	u32 key, mask;
2283 	__be32 _key, _mask;
2284 	int err;
2285 
2286 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2287 		return 0;
2288 
2289 	key = 0;
2290 	mask = 0;
2291 
2292 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2293 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2294 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2295 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2296 			FLOW_DIS_FIRST_FRAG);
2297 
2298 	_key = cpu_to_be32(key);
2299 	_mask = cpu_to_be32(mask);
2300 
2301 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2302 	if (err)
2303 		return err;
2304 
2305 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2306 }
2307 
2308 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2309 				  struct flow_dissector_key_enc_opts *enc_opts)
2310 {
2311 	struct geneve_opt *opt;
2312 	struct nlattr *nest;
2313 	int opt_off = 0;
2314 
2315 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2316 	if (!nest)
2317 		goto nla_put_failure;
2318 
2319 	while (enc_opts->len > opt_off) {
2320 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2321 
2322 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2323 				 opt->opt_class))
2324 			goto nla_put_failure;
2325 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2326 			       opt->type))
2327 			goto nla_put_failure;
2328 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2329 			    opt->length * 4, opt->opt_data))
2330 			goto nla_put_failure;
2331 
2332 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2333 	}
2334 	nla_nest_end(skb, nest);
2335 	return 0;
2336 
2337 nla_put_failure:
2338 	nla_nest_cancel(skb, nest);
2339 	return -EMSGSIZE;
2340 }
2341 
2342 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2343 				 struct flow_dissector_key_enc_opts *enc_opts)
2344 {
2345 	struct vxlan_metadata *md;
2346 	struct nlattr *nest;
2347 
2348 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2349 	if (!nest)
2350 		goto nla_put_failure;
2351 
2352 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2353 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2354 		goto nla_put_failure;
2355 
2356 	nla_nest_end(skb, nest);
2357 	return 0;
2358 
2359 nla_put_failure:
2360 	nla_nest_cancel(skb, nest);
2361 	return -EMSGSIZE;
2362 }
2363 
2364 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2365 				  struct flow_dissector_key_enc_opts *enc_opts)
2366 {
2367 	struct erspan_metadata *md;
2368 	struct nlattr *nest;
2369 
2370 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2371 	if (!nest)
2372 		goto nla_put_failure;
2373 
2374 	md = (struct erspan_metadata *)&enc_opts->data[0];
2375 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2376 		goto nla_put_failure;
2377 
2378 	if (md->version == 1 &&
2379 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2380 		goto nla_put_failure;
2381 
2382 	if (md->version == 2 &&
2383 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2384 			md->u.md2.dir) ||
2385 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2386 			get_hwid(&md->u.md2))))
2387 		goto nla_put_failure;
2388 
2389 	nla_nest_end(skb, nest);
2390 	return 0;
2391 
2392 nla_put_failure:
2393 	nla_nest_cancel(skb, nest);
2394 	return -EMSGSIZE;
2395 }
2396 
2397 static int fl_dump_key_ct(struct sk_buff *skb,
2398 			  struct flow_dissector_key_ct *key,
2399 			  struct flow_dissector_key_ct *mask)
2400 {
2401 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2402 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2403 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2404 			    sizeof(key->ct_state)))
2405 		goto nla_put_failure;
2406 
2407 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2408 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2409 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2410 			    sizeof(key->ct_zone)))
2411 		goto nla_put_failure;
2412 
2413 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2414 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2415 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2416 			    sizeof(key->ct_mark)))
2417 		goto nla_put_failure;
2418 
2419 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2420 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2421 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2422 			    sizeof(key->ct_labels)))
2423 		goto nla_put_failure;
2424 
2425 	return 0;
2426 
2427 nla_put_failure:
2428 	return -EMSGSIZE;
2429 }
2430 
2431 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2432 			       struct flow_dissector_key_enc_opts *enc_opts)
2433 {
2434 	struct nlattr *nest;
2435 	int err;
2436 
2437 	if (!enc_opts->len)
2438 		return 0;
2439 
2440 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2441 	if (!nest)
2442 		goto nla_put_failure;
2443 
2444 	switch (enc_opts->dst_opt_type) {
2445 	case TUNNEL_GENEVE_OPT:
2446 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2447 		if (err)
2448 			goto nla_put_failure;
2449 		break;
2450 	case TUNNEL_VXLAN_OPT:
2451 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2452 		if (err)
2453 			goto nla_put_failure;
2454 		break;
2455 	case TUNNEL_ERSPAN_OPT:
2456 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2457 		if (err)
2458 			goto nla_put_failure;
2459 		break;
2460 	default:
2461 		goto nla_put_failure;
2462 	}
2463 	nla_nest_end(skb, nest);
2464 	return 0;
2465 
2466 nla_put_failure:
2467 	nla_nest_cancel(skb, nest);
2468 	return -EMSGSIZE;
2469 }
2470 
2471 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2472 			       struct flow_dissector_key_enc_opts *key_opts,
2473 			       struct flow_dissector_key_enc_opts *msk_opts)
2474 {
2475 	int err;
2476 
2477 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2478 	if (err)
2479 		return err;
2480 
2481 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2482 }
2483 
2484 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2485 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2486 {
2487 	if (mask->meta.ingress_ifindex) {
2488 		struct net_device *dev;
2489 
2490 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2491 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2492 			goto nla_put_failure;
2493 	}
2494 
2495 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2496 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2497 			    sizeof(key->eth.dst)) ||
2498 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2499 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2500 			    sizeof(key->eth.src)) ||
2501 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2502 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2503 			    sizeof(key->basic.n_proto)))
2504 		goto nla_put_failure;
2505 
2506 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2507 		goto nla_put_failure;
2508 
2509 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2510 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2511 		goto nla_put_failure;
2512 
2513 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2514 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2515 			     &key->cvlan, &mask->cvlan) ||
2516 	    (mask->cvlan.vlan_tpid &&
2517 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2518 			  key->cvlan.vlan_tpid)))
2519 		goto nla_put_failure;
2520 
2521 	if (mask->basic.n_proto) {
2522 		if (mask->cvlan.vlan_tpid) {
2523 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2524 					 key->basic.n_proto))
2525 				goto nla_put_failure;
2526 		} else if (mask->vlan.vlan_tpid) {
2527 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2528 					 key->basic.n_proto))
2529 				goto nla_put_failure;
2530 		}
2531 	}
2532 
2533 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2534 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2535 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2536 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2537 			    sizeof(key->basic.ip_proto)) ||
2538 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2539 		goto nla_put_failure;
2540 
2541 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2542 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2543 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2544 			     sizeof(key->ipv4.src)) ||
2545 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2546 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2547 			     sizeof(key->ipv4.dst))))
2548 		goto nla_put_failure;
2549 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2550 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2551 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2552 				  sizeof(key->ipv6.src)) ||
2553 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2554 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2555 				  sizeof(key->ipv6.dst))))
2556 		goto nla_put_failure;
2557 
2558 	if (key->basic.ip_proto == IPPROTO_TCP &&
2559 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2560 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2561 			     sizeof(key->tp.src)) ||
2562 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2563 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2564 			     sizeof(key->tp.dst)) ||
2565 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2566 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2567 			     sizeof(key->tcp.flags))))
2568 		goto nla_put_failure;
2569 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2570 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2571 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2572 				  sizeof(key->tp.src)) ||
2573 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2574 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2575 				  sizeof(key->tp.dst))))
2576 		goto nla_put_failure;
2577 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2578 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2579 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2580 				  sizeof(key->tp.src)) ||
2581 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2582 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2583 				  sizeof(key->tp.dst))))
2584 		goto nla_put_failure;
2585 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2586 		 key->basic.ip_proto == IPPROTO_ICMP &&
2587 		 (fl_dump_key_val(skb, &key->icmp.type,
2588 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2589 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2590 				  sizeof(key->icmp.type)) ||
2591 		  fl_dump_key_val(skb, &key->icmp.code,
2592 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2593 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2594 				  sizeof(key->icmp.code))))
2595 		goto nla_put_failure;
2596 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2597 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2598 		 (fl_dump_key_val(skb, &key->icmp.type,
2599 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2600 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2601 				  sizeof(key->icmp.type)) ||
2602 		  fl_dump_key_val(skb, &key->icmp.code,
2603 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2604 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2605 				  sizeof(key->icmp.code))))
2606 		goto nla_put_failure;
2607 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2608 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2609 		 (fl_dump_key_val(skb, &key->arp.sip,
2610 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2611 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2612 				  sizeof(key->arp.sip)) ||
2613 		  fl_dump_key_val(skb, &key->arp.tip,
2614 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2615 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2616 				  sizeof(key->arp.tip)) ||
2617 		  fl_dump_key_val(skb, &key->arp.op,
2618 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2619 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2620 				  sizeof(key->arp.op)) ||
2621 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2622 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2623 				  sizeof(key->arp.sha)) ||
2624 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2625 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2626 				  sizeof(key->arp.tha))))
2627 		goto nla_put_failure;
2628 
2629 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2630 	     key->basic.ip_proto == IPPROTO_UDP ||
2631 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2632 	     fl_dump_key_port_range(skb, key, mask))
2633 		goto nla_put_failure;
2634 
2635 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2636 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2637 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2638 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2639 			    sizeof(key->enc_ipv4.src)) ||
2640 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2641 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2642 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2643 			     sizeof(key->enc_ipv4.dst))))
2644 		goto nla_put_failure;
2645 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2646 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2647 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2648 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2649 			    sizeof(key->enc_ipv6.src)) ||
2650 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2651 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2652 				 &mask->enc_ipv6.dst,
2653 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2654 			    sizeof(key->enc_ipv6.dst))))
2655 		goto nla_put_failure;
2656 
2657 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2658 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2659 			    sizeof(key->enc_key_id)) ||
2660 	    fl_dump_key_val(skb, &key->enc_tp.src,
2661 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2662 			    &mask->enc_tp.src,
2663 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2664 			    sizeof(key->enc_tp.src)) ||
2665 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2666 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2667 			    &mask->enc_tp.dst,
2668 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2669 			    sizeof(key->enc_tp.dst)) ||
2670 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2671 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2672 		goto nla_put_failure;
2673 
2674 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2675 		goto nla_put_failure;
2676 
2677 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2678 		goto nla_put_failure;
2679 
2680 	return 0;
2681 
2682 nla_put_failure:
2683 	return -EMSGSIZE;
2684 }
2685 
2686 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2687 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2688 {
2689 	struct cls_fl_filter *f = fh;
2690 	struct nlattr *nest;
2691 	struct fl_flow_key *key, *mask;
2692 	bool skip_hw;
2693 
2694 	if (!f)
2695 		return skb->len;
2696 
2697 	t->tcm_handle = f->handle;
2698 
2699 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2700 	if (!nest)
2701 		goto nla_put_failure;
2702 
2703 	spin_lock(&tp->lock);
2704 
2705 	if (f->res.classid &&
2706 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2707 		goto nla_put_failure_locked;
2708 
2709 	key = &f->key;
2710 	mask = &f->mask->key;
2711 	skip_hw = tc_skip_hw(f->flags);
2712 
2713 	if (fl_dump_key(skb, net, key, mask))
2714 		goto nla_put_failure_locked;
2715 
2716 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2717 		goto nla_put_failure_locked;
2718 
2719 	spin_unlock(&tp->lock);
2720 
2721 	if (!skip_hw)
2722 		fl_hw_update_stats(tp, f, rtnl_held);
2723 
2724 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2725 		goto nla_put_failure;
2726 
2727 	if (tcf_exts_dump(skb, &f->exts))
2728 		goto nla_put_failure;
2729 
2730 	nla_nest_end(skb, nest);
2731 
2732 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2733 		goto nla_put_failure;
2734 
2735 	return skb->len;
2736 
2737 nla_put_failure_locked:
2738 	spin_unlock(&tp->lock);
2739 nla_put_failure:
2740 	nla_nest_cancel(skb, nest);
2741 	return -1;
2742 }
2743 
2744 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2745 {
2746 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2747 	struct fl_flow_key *key, *mask;
2748 	struct nlattr *nest;
2749 
2750 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2751 	if (!nest)
2752 		goto nla_put_failure;
2753 
2754 	key = &tmplt->dummy_key;
2755 	mask = &tmplt->mask;
2756 
2757 	if (fl_dump_key(skb, net, key, mask))
2758 		goto nla_put_failure;
2759 
2760 	nla_nest_end(skb, nest);
2761 
2762 	return skb->len;
2763 
2764 nla_put_failure:
2765 	nla_nest_cancel(skb, nest);
2766 	return -EMSGSIZE;
2767 }
2768 
2769 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
2770 			  unsigned long base)
2771 {
2772 	struct cls_fl_filter *f = fh;
2773 
2774 	if (f && f->res.classid == classid) {
2775 		if (cl)
2776 			__tcf_bind_filter(q, &f->res, base);
2777 		else
2778 			__tcf_unbind_filter(q, &f->res);
2779 	}
2780 }
2781 
2782 static bool fl_delete_empty(struct tcf_proto *tp)
2783 {
2784 	struct cls_fl_head *head = fl_head_dereference(tp);
2785 
2786 	spin_lock(&tp->lock);
2787 	tp->deleting = idr_is_empty(&head->handle_idr);
2788 	spin_unlock(&tp->lock);
2789 
2790 	return tp->deleting;
2791 }
2792 
2793 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2794 	.kind		= "flower",
2795 	.classify	= fl_classify,
2796 	.init		= fl_init,
2797 	.destroy	= fl_destroy,
2798 	.get		= fl_get,
2799 	.put		= fl_put,
2800 	.change		= fl_change,
2801 	.delete		= fl_delete,
2802 	.delete_empty	= fl_delete_empty,
2803 	.walk		= fl_walk,
2804 	.reoffload	= fl_reoffload,
2805 	.hw_add		= fl_hw_add,
2806 	.hw_del		= fl_hw_del,
2807 	.dump		= fl_dump,
2808 	.bind_class	= fl_bind_class,
2809 	.tmplt_create	= fl_tmplt_create,
2810 	.tmplt_destroy	= fl_tmplt_destroy,
2811 	.tmplt_dump	= fl_tmplt_dump,
2812 	.owner		= THIS_MODULE,
2813 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
2814 };
2815 
2816 static int __init cls_fl_init(void)
2817 {
2818 	return register_tcf_proto_ops(&cls_fl_ops);
2819 }
2820 
2821 static void __exit cls_fl_exit(void)
2822 {
2823 	unregister_tcf_proto_ops(&cls_fl_ops);
2824 }
2825 
2826 module_init(cls_fl_init);
2827 module_exit(cls_fl_exit);
2828 
2829 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2830 MODULE_DESCRIPTION("Flower classifier");
2831 MODULE_LICENSE("GPL v2");
2832