xref: /openbmc/linux/net/sched/cls_flower.c (revision 1a18374f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 struct fl_flow_key {
34 	struct flow_dissector_key_meta meta;
35 	struct flow_dissector_key_control control;
36 	struct flow_dissector_key_control enc_control;
37 	struct flow_dissector_key_basic basic;
38 	struct flow_dissector_key_eth_addrs eth;
39 	struct flow_dissector_key_vlan vlan;
40 	struct flow_dissector_key_vlan cvlan;
41 	union {
42 		struct flow_dissector_key_ipv4_addrs ipv4;
43 		struct flow_dissector_key_ipv6_addrs ipv6;
44 	};
45 	struct flow_dissector_key_ports tp;
46 	struct flow_dissector_key_icmp icmp;
47 	struct flow_dissector_key_arp arp;
48 	struct flow_dissector_key_keyid enc_key_id;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 	};
53 	struct flow_dissector_key_ports enc_tp;
54 	struct flow_dissector_key_mpls mpls;
55 	struct flow_dissector_key_tcp tcp;
56 	struct flow_dissector_key_ip ip;
57 	struct flow_dissector_key_ip enc_ip;
58 	struct flow_dissector_key_enc_opts enc_opts;
59 	struct flow_dissector_key_ports tp_min;
60 	struct flow_dissector_key_ports tp_max;
61 	struct flow_dissector_key_ct ct;
62 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
63 
64 struct fl_flow_mask_range {
65 	unsigned short int start;
66 	unsigned short int end;
67 };
68 
69 struct fl_flow_mask {
70 	struct fl_flow_key key;
71 	struct fl_flow_mask_range range;
72 	u32 flags;
73 	struct rhash_head ht_node;
74 	struct rhashtable ht;
75 	struct rhashtable_params filter_ht_params;
76 	struct flow_dissector dissector;
77 	struct list_head filters;
78 	struct rcu_work rwork;
79 	struct list_head list;
80 	refcount_t refcnt;
81 };
82 
83 struct fl_flow_tmplt {
84 	struct fl_flow_key dummy_key;
85 	struct fl_flow_key mask;
86 	struct flow_dissector dissector;
87 	struct tcf_chain *chain;
88 };
89 
90 struct cls_fl_head {
91 	struct rhashtable ht;
92 	spinlock_t masks_lock; /* Protect masks list */
93 	struct list_head masks;
94 	struct list_head hw_filters;
95 	struct rcu_work rwork;
96 	struct idr handle_idr;
97 };
98 
99 struct cls_fl_filter {
100 	struct fl_flow_mask *mask;
101 	struct rhash_head ht_node;
102 	struct fl_flow_key mkey;
103 	struct tcf_exts exts;
104 	struct tcf_result res;
105 	struct fl_flow_key key;
106 	struct list_head list;
107 	struct list_head hw_list;
108 	u32 handle;
109 	u32 flags;
110 	u32 in_hw_count;
111 	struct rcu_work rwork;
112 	struct net_device *hw_dev;
113 	/* Flower classifier is unlocked, which means that its reference counter
114 	 * can be changed concurrently without any kind of external
115 	 * synchronization. Use atomic reference counter to be concurrency-safe.
116 	 */
117 	refcount_t refcnt;
118 	bool deleted;
119 };
120 
121 static const struct rhashtable_params mask_ht_params = {
122 	.key_offset = offsetof(struct fl_flow_mask, key),
123 	.key_len = sizeof(struct fl_flow_key),
124 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
125 	.automatic_shrinking = true,
126 };
127 
128 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
129 {
130 	return mask->range.end - mask->range.start;
131 }
132 
133 static void fl_mask_update_range(struct fl_flow_mask *mask)
134 {
135 	const u8 *bytes = (const u8 *) &mask->key;
136 	size_t size = sizeof(mask->key);
137 	size_t i, first = 0, last;
138 
139 	for (i = 0; i < size; i++) {
140 		if (bytes[i]) {
141 			first = i;
142 			break;
143 		}
144 	}
145 	last = first;
146 	for (i = size - 1; i != first; i--) {
147 		if (bytes[i]) {
148 			last = i;
149 			break;
150 		}
151 	}
152 	mask->range.start = rounddown(first, sizeof(long));
153 	mask->range.end = roundup(last + 1, sizeof(long));
154 }
155 
156 static void *fl_key_get_start(struct fl_flow_key *key,
157 			      const struct fl_flow_mask *mask)
158 {
159 	return (u8 *) key + mask->range.start;
160 }
161 
162 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
163 			      struct fl_flow_mask *mask)
164 {
165 	const long *lkey = fl_key_get_start(key, mask);
166 	const long *lmask = fl_key_get_start(&mask->key, mask);
167 	long *lmkey = fl_key_get_start(mkey, mask);
168 	int i;
169 
170 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
171 		*lmkey++ = *lkey++ & *lmask++;
172 }
173 
174 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
175 			       struct fl_flow_mask *mask)
176 {
177 	const long *lmask = fl_key_get_start(&mask->key, mask);
178 	const long *ltmplt;
179 	int i;
180 
181 	if (!tmplt)
182 		return true;
183 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
184 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
185 		if (~*ltmplt++ & *lmask++)
186 			return false;
187 	}
188 	return true;
189 }
190 
191 static void fl_clear_masked_range(struct fl_flow_key *key,
192 				  struct fl_flow_mask *mask)
193 {
194 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
195 }
196 
197 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
198 				  struct fl_flow_key *key,
199 				  struct fl_flow_key *mkey)
200 {
201 	__be16 min_mask, max_mask, min_val, max_val;
202 
203 	min_mask = htons(filter->mask->key.tp_min.dst);
204 	max_mask = htons(filter->mask->key.tp_max.dst);
205 	min_val = htons(filter->key.tp_min.dst);
206 	max_val = htons(filter->key.tp_max.dst);
207 
208 	if (min_mask && max_mask) {
209 		if (htons(key->tp.dst) < min_val ||
210 		    htons(key->tp.dst) > max_val)
211 			return false;
212 
213 		/* skb does not have min and max values */
214 		mkey->tp_min.dst = filter->mkey.tp_min.dst;
215 		mkey->tp_max.dst = filter->mkey.tp_max.dst;
216 	}
217 	return true;
218 }
219 
220 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
221 				  struct fl_flow_key *key,
222 				  struct fl_flow_key *mkey)
223 {
224 	__be16 min_mask, max_mask, min_val, max_val;
225 
226 	min_mask = htons(filter->mask->key.tp_min.src);
227 	max_mask = htons(filter->mask->key.tp_max.src);
228 	min_val = htons(filter->key.tp_min.src);
229 	max_val = htons(filter->key.tp_max.src);
230 
231 	if (min_mask && max_mask) {
232 		if (htons(key->tp.src) < min_val ||
233 		    htons(key->tp.src) > max_val)
234 			return false;
235 
236 		/* skb does not have min and max values */
237 		mkey->tp_min.src = filter->mkey.tp_min.src;
238 		mkey->tp_max.src = filter->mkey.tp_max.src;
239 	}
240 	return true;
241 }
242 
243 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
244 					 struct fl_flow_key *mkey)
245 {
246 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
247 				      mask->filter_ht_params);
248 }
249 
250 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
251 					     struct fl_flow_key *mkey,
252 					     struct fl_flow_key *key)
253 {
254 	struct cls_fl_filter *filter, *f;
255 
256 	list_for_each_entry_rcu(filter, &mask->filters, list) {
257 		if (!fl_range_port_dst_cmp(filter, key, mkey))
258 			continue;
259 
260 		if (!fl_range_port_src_cmp(filter, key, mkey))
261 			continue;
262 
263 		f = __fl_lookup(mask, mkey);
264 		if (f)
265 			return f;
266 	}
267 	return NULL;
268 }
269 
270 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
271 				       struct fl_flow_key *mkey,
272 				       struct fl_flow_key *key)
273 {
274 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
275 		return fl_lookup_range(mask, mkey, key);
276 
277 	return __fl_lookup(mask, mkey);
278 }
279 
280 static u16 fl_ct_info_to_flower_map[] = {
281 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
282 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
283 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
284 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
285 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
286 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
287 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
288 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
289 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
290 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
291 };
292 
293 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
294 		       struct tcf_result *res)
295 {
296 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
297 	struct fl_flow_key skb_mkey;
298 	struct fl_flow_key skb_key;
299 	struct fl_flow_mask *mask;
300 	struct cls_fl_filter *f;
301 
302 	list_for_each_entry_rcu(mask, &head->masks, list) {
303 		fl_clear_masked_range(&skb_key, mask);
304 
305 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
306 		/* skb_flow_dissect() does not set n_proto in case an unknown
307 		 * protocol, so do it rather here.
308 		 */
309 		skb_key.basic.n_proto = skb->protocol;
310 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
311 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
312 				    fl_ct_info_to_flower_map,
313 				    ARRAY_SIZE(fl_ct_info_to_flower_map));
314 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
315 
316 		fl_set_masked_key(&skb_mkey, &skb_key, mask);
317 
318 		f = fl_lookup(mask, &skb_mkey, &skb_key);
319 		if (f && !tc_skip_sw(f->flags)) {
320 			*res = f->res;
321 			return tcf_exts_exec(skb, &f->exts, res);
322 		}
323 	}
324 	return -1;
325 }
326 
327 static int fl_init(struct tcf_proto *tp)
328 {
329 	struct cls_fl_head *head;
330 
331 	head = kzalloc(sizeof(*head), GFP_KERNEL);
332 	if (!head)
333 		return -ENOBUFS;
334 
335 	spin_lock_init(&head->masks_lock);
336 	INIT_LIST_HEAD_RCU(&head->masks);
337 	INIT_LIST_HEAD(&head->hw_filters);
338 	rcu_assign_pointer(tp->root, head);
339 	idr_init(&head->handle_idr);
340 
341 	return rhashtable_init(&head->ht, &mask_ht_params);
342 }
343 
344 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
345 {
346 	/* temporary masks don't have their filters list and ht initialized */
347 	if (mask_init_done) {
348 		WARN_ON(!list_empty(&mask->filters));
349 		rhashtable_destroy(&mask->ht);
350 	}
351 	kfree(mask);
352 }
353 
354 static void fl_mask_free_work(struct work_struct *work)
355 {
356 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
357 						 struct fl_flow_mask, rwork);
358 
359 	fl_mask_free(mask, true);
360 }
361 
362 static void fl_uninit_mask_free_work(struct work_struct *work)
363 {
364 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
365 						 struct fl_flow_mask, rwork);
366 
367 	fl_mask_free(mask, false);
368 }
369 
370 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
371 {
372 	if (!refcount_dec_and_test(&mask->refcnt))
373 		return false;
374 
375 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
376 
377 	spin_lock(&head->masks_lock);
378 	list_del_rcu(&mask->list);
379 	spin_unlock(&head->masks_lock);
380 
381 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
382 
383 	return true;
384 }
385 
386 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
387 {
388 	/* Flower classifier only changes root pointer during init and destroy.
389 	 * Users must obtain reference to tcf_proto instance before calling its
390 	 * API, so tp->root pointer is protected from concurrent call to
391 	 * fl_destroy() by reference counting.
392 	 */
393 	return rcu_dereference_raw(tp->root);
394 }
395 
396 static void __fl_destroy_filter(struct cls_fl_filter *f)
397 {
398 	tcf_exts_destroy(&f->exts);
399 	tcf_exts_put_net(&f->exts);
400 	kfree(f);
401 }
402 
403 static void fl_destroy_filter_work(struct work_struct *work)
404 {
405 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
406 					struct cls_fl_filter, rwork);
407 
408 	__fl_destroy_filter(f);
409 }
410 
411 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
412 				 bool rtnl_held, struct netlink_ext_ack *extack)
413 {
414 	struct tcf_block *block = tp->chain->block;
415 	struct flow_cls_offload cls_flower = {};
416 
417 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
418 	cls_flower.command = FLOW_CLS_DESTROY;
419 	cls_flower.cookie = (unsigned long) f;
420 
421 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
422 			    &f->flags, &f->in_hw_count, rtnl_held);
423 
424 }
425 
426 static int fl_hw_replace_filter(struct tcf_proto *tp,
427 				struct cls_fl_filter *f, bool rtnl_held,
428 				struct netlink_ext_ack *extack)
429 {
430 	struct tcf_block *block = tp->chain->block;
431 	struct flow_cls_offload cls_flower = {};
432 	bool skip_sw = tc_skip_sw(f->flags);
433 	int err = 0;
434 
435 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
436 	if (!cls_flower.rule)
437 		return -ENOMEM;
438 
439 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
440 	cls_flower.command = FLOW_CLS_REPLACE;
441 	cls_flower.cookie = (unsigned long) f;
442 	cls_flower.rule->match.dissector = &f->mask->dissector;
443 	cls_flower.rule->match.mask = &f->mask->key;
444 	cls_flower.rule->match.key = &f->mkey;
445 	cls_flower.classid = f->res.classid;
446 
447 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
448 				   rtnl_held);
449 	if (err) {
450 		kfree(cls_flower.rule);
451 		if (skip_sw) {
452 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
453 			return err;
454 		}
455 		return 0;
456 	}
457 
458 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
459 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
460 	tc_cleanup_flow_action(&cls_flower.rule->action);
461 	kfree(cls_flower.rule);
462 
463 	if (err) {
464 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
465 		return err;
466 	}
467 
468 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
469 		return -EINVAL;
470 
471 	return 0;
472 }
473 
474 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
475 			       bool rtnl_held)
476 {
477 	struct tcf_block *block = tp->chain->block;
478 	struct flow_cls_offload cls_flower = {};
479 
480 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
481 	cls_flower.command = FLOW_CLS_STATS;
482 	cls_flower.cookie = (unsigned long) f;
483 	cls_flower.classid = f->res.classid;
484 
485 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
486 			 rtnl_held);
487 
488 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
489 			      cls_flower.stats.pkts,
490 			      cls_flower.stats.lastused);
491 }
492 
493 static void __fl_put(struct cls_fl_filter *f)
494 {
495 	if (!refcount_dec_and_test(&f->refcnt))
496 		return;
497 
498 	if (tcf_exts_get_net(&f->exts))
499 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
500 	else
501 		__fl_destroy_filter(f);
502 }
503 
504 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
505 {
506 	struct cls_fl_filter *f;
507 
508 	rcu_read_lock();
509 	f = idr_find(&head->handle_idr, handle);
510 	if (f && !refcount_inc_not_zero(&f->refcnt))
511 		f = NULL;
512 	rcu_read_unlock();
513 
514 	return f;
515 }
516 
517 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
518 		       bool *last, bool rtnl_held,
519 		       struct netlink_ext_ack *extack)
520 {
521 	struct cls_fl_head *head = fl_head_dereference(tp);
522 
523 	*last = false;
524 
525 	spin_lock(&tp->lock);
526 	if (f->deleted) {
527 		spin_unlock(&tp->lock);
528 		return -ENOENT;
529 	}
530 
531 	f->deleted = true;
532 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
533 			       f->mask->filter_ht_params);
534 	idr_remove(&head->handle_idr, f->handle);
535 	list_del_rcu(&f->list);
536 	spin_unlock(&tp->lock);
537 
538 	*last = fl_mask_put(head, f->mask);
539 	if (!tc_skip_hw(f->flags))
540 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
541 	tcf_unbind_filter(tp, &f->res);
542 	__fl_put(f);
543 
544 	return 0;
545 }
546 
547 static void fl_destroy_sleepable(struct work_struct *work)
548 {
549 	struct cls_fl_head *head = container_of(to_rcu_work(work),
550 						struct cls_fl_head,
551 						rwork);
552 
553 	rhashtable_destroy(&head->ht);
554 	kfree(head);
555 	module_put(THIS_MODULE);
556 }
557 
558 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
559 		       struct netlink_ext_ack *extack)
560 {
561 	struct cls_fl_head *head = fl_head_dereference(tp);
562 	struct fl_flow_mask *mask, *next_mask;
563 	struct cls_fl_filter *f, *next;
564 	bool last;
565 
566 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
567 		list_for_each_entry_safe(f, next, &mask->filters, list) {
568 			__fl_delete(tp, f, &last, rtnl_held, extack);
569 			if (last)
570 				break;
571 		}
572 	}
573 	idr_destroy(&head->handle_idr);
574 
575 	__module_get(THIS_MODULE);
576 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
577 }
578 
579 static void fl_put(struct tcf_proto *tp, void *arg)
580 {
581 	struct cls_fl_filter *f = arg;
582 
583 	__fl_put(f);
584 }
585 
586 static void *fl_get(struct tcf_proto *tp, u32 handle)
587 {
588 	struct cls_fl_head *head = fl_head_dereference(tp);
589 
590 	return __fl_get(head, handle);
591 }
592 
593 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
594 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
595 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
596 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
597 					    .len = IFNAMSIZ },
598 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
599 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
600 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
601 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
602 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
603 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
604 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
605 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
606 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
607 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
608 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
609 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
610 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
611 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
612 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
613 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
614 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
615 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
616 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
617 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
618 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
619 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
620 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
621 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
622 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
624 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
625 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
626 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
627 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
628 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
629 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
630 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
639 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
643 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
644 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
645 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
646 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
647 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
648 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
649 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
650 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
651 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
652 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
653 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
654 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
655 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
656 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
657 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
658 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
659 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
660 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
664 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
665 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
666 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
671 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
672 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
673 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
678 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
679 	[TCA_FLOWER_KEY_CT_STATE]	= { .type = NLA_U16 },
680 	[TCA_FLOWER_KEY_CT_STATE_MASK]	= { .type = NLA_U16 },
681 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
682 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
683 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
684 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
685 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
686 					    .len = 128 / BITS_PER_BYTE },
687 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
688 					    .len = 128 / BITS_PER_BYTE },
689 };
690 
691 static const struct nla_policy
692 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
693 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
694 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
695 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
696 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
697 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
698 };
699 
700 static const struct nla_policy
701 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
702 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
703 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
704 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
705 						       .len = 128 },
706 };
707 
708 static const struct nla_policy
709 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
710 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
711 };
712 
713 static const struct nla_policy
714 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
715 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
716 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
717 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
718 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
719 };
720 
721 static void fl_set_key_val(struct nlattr **tb,
722 			   void *val, int val_type,
723 			   void *mask, int mask_type, int len)
724 {
725 	if (!tb[val_type])
726 		return;
727 	nla_memcpy(val, tb[val_type], len);
728 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
729 		memset(mask, 0xff, len);
730 	else
731 		nla_memcpy(mask, tb[mask_type], len);
732 }
733 
734 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
735 				 struct fl_flow_key *mask)
736 {
737 	fl_set_key_val(tb, &key->tp_min.dst,
738 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
739 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
740 	fl_set_key_val(tb, &key->tp_max.dst,
741 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
742 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
743 	fl_set_key_val(tb, &key->tp_min.src,
744 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
745 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
746 	fl_set_key_val(tb, &key->tp_max.src,
747 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
748 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
749 
750 	if ((mask->tp_min.dst && mask->tp_max.dst &&
751 	     htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
752 	     (mask->tp_min.src && mask->tp_max.src &&
753 	      htons(key->tp_max.src) <= htons(key->tp_min.src)))
754 		return -EINVAL;
755 
756 	return 0;
757 }
758 
759 static int fl_set_key_mpls(struct nlattr **tb,
760 			   struct flow_dissector_key_mpls *key_val,
761 			   struct flow_dissector_key_mpls *key_mask)
762 {
763 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
764 		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
765 		key_mask->mpls_ttl = MPLS_TTL_MASK;
766 	}
767 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
768 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
769 
770 		if (bos & ~MPLS_BOS_MASK)
771 			return -EINVAL;
772 		key_val->mpls_bos = bos;
773 		key_mask->mpls_bos = MPLS_BOS_MASK;
774 	}
775 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
776 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
777 
778 		if (tc & ~MPLS_TC_MASK)
779 			return -EINVAL;
780 		key_val->mpls_tc = tc;
781 		key_mask->mpls_tc = MPLS_TC_MASK;
782 	}
783 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
784 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
785 
786 		if (label & ~MPLS_LABEL_MASK)
787 			return -EINVAL;
788 		key_val->mpls_label = label;
789 		key_mask->mpls_label = MPLS_LABEL_MASK;
790 	}
791 	return 0;
792 }
793 
794 static void fl_set_key_vlan(struct nlattr **tb,
795 			    __be16 ethertype,
796 			    int vlan_id_key, int vlan_prio_key,
797 			    struct flow_dissector_key_vlan *key_val,
798 			    struct flow_dissector_key_vlan *key_mask)
799 {
800 #define VLAN_PRIORITY_MASK	0x7
801 
802 	if (tb[vlan_id_key]) {
803 		key_val->vlan_id =
804 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
805 		key_mask->vlan_id = VLAN_VID_MASK;
806 	}
807 	if (tb[vlan_prio_key]) {
808 		key_val->vlan_priority =
809 			nla_get_u8(tb[vlan_prio_key]) &
810 			VLAN_PRIORITY_MASK;
811 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
812 	}
813 	key_val->vlan_tpid = ethertype;
814 	key_mask->vlan_tpid = cpu_to_be16(~0);
815 }
816 
817 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
818 			    u32 *dissector_key, u32 *dissector_mask,
819 			    u32 flower_flag_bit, u32 dissector_flag_bit)
820 {
821 	if (flower_mask & flower_flag_bit) {
822 		*dissector_mask |= dissector_flag_bit;
823 		if (flower_key & flower_flag_bit)
824 			*dissector_key |= dissector_flag_bit;
825 	}
826 }
827 
828 static int fl_set_key_flags(struct nlattr **tb,
829 			    u32 *flags_key, u32 *flags_mask)
830 {
831 	u32 key, mask;
832 
833 	/* mask is mandatory for flags */
834 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
835 		return -EINVAL;
836 
837 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
838 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
839 
840 	*flags_key  = 0;
841 	*flags_mask = 0;
842 
843 	fl_set_key_flag(key, mask, flags_key, flags_mask,
844 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
845 	fl_set_key_flag(key, mask, flags_key, flags_mask,
846 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
847 			FLOW_DIS_FIRST_FRAG);
848 
849 	return 0;
850 }
851 
852 static void fl_set_key_ip(struct nlattr **tb, bool encap,
853 			  struct flow_dissector_key_ip *key,
854 			  struct flow_dissector_key_ip *mask)
855 {
856 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
857 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
858 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
859 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
860 
861 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
862 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
863 }
864 
865 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
866 			     int depth, int option_len,
867 			     struct netlink_ext_ack *extack)
868 {
869 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
870 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
871 	struct geneve_opt *opt;
872 	int err, data_len = 0;
873 
874 	if (option_len > sizeof(struct geneve_opt))
875 		data_len = option_len - sizeof(struct geneve_opt);
876 
877 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
878 	memset(opt, 0xff, option_len);
879 	opt->length = data_len / 4;
880 	opt->r1 = 0;
881 	opt->r2 = 0;
882 	opt->r3 = 0;
883 
884 	/* If no mask has been prodived we assume an exact match. */
885 	if (!depth)
886 		return sizeof(struct geneve_opt) + data_len;
887 
888 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
889 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
890 		return -EINVAL;
891 	}
892 
893 	err = nla_parse_nested_deprecated(tb,
894 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
895 					  nla, geneve_opt_policy, extack);
896 	if (err < 0)
897 		return err;
898 
899 	/* We are not allowed to omit any of CLASS, TYPE or DATA
900 	 * fields from the key.
901 	 */
902 	if (!option_len &&
903 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
904 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
905 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
906 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
907 		return -EINVAL;
908 	}
909 
910 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
911 	 * for the mask.
912 	 */
913 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
914 		int new_len = key->enc_opts.len;
915 
916 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
917 		data_len = nla_len(data);
918 		if (data_len < 4) {
919 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
920 			return -ERANGE;
921 		}
922 		if (data_len % 4) {
923 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
924 			return -ERANGE;
925 		}
926 
927 		new_len += sizeof(struct geneve_opt) + data_len;
928 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
929 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
930 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
931 			return -ERANGE;
932 		}
933 		opt->length = data_len / 4;
934 		memcpy(opt->opt_data, nla_data(data), data_len);
935 	}
936 
937 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
938 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
939 		opt->opt_class = nla_get_be16(class);
940 	}
941 
942 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
943 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
944 		opt->type = nla_get_u8(type);
945 	}
946 
947 	return sizeof(struct geneve_opt) + data_len;
948 }
949 
950 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
951 			    int depth, int option_len,
952 			    struct netlink_ext_ack *extack)
953 {
954 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
955 	struct vxlan_metadata *md;
956 	int err;
957 
958 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
959 	memset(md, 0xff, sizeof(*md));
960 
961 	if (!depth)
962 		return sizeof(*md);
963 
964 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
965 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
966 		return -EINVAL;
967 	}
968 
969 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
970 			       vxlan_opt_policy, extack);
971 	if (err < 0)
972 		return err;
973 
974 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
975 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
976 		return -EINVAL;
977 	}
978 
979 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP])
980 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
981 
982 	return sizeof(*md);
983 }
984 
985 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
986 			     int depth, int option_len,
987 			     struct netlink_ext_ack *extack)
988 {
989 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
990 	struct erspan_metadata *md;
991 	int err;
992 
993 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
994 	memset(md, 0xff, sizeof(*md));
995 	md->version = 1;
996 
997 	if (!depth)
998 		return sizeof(*md);
999 
1000 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1001 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1002 		return -EINVAL;
1003 	}
1004 
1005 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1006 			       erspan_opt_policy, extack);
1007 	if (err < 0)
1008 		return err;
1009 
1010 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1011 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1012 		return -EINVAL;
1013 	}
1014 
1015 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1016 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1017 
1018 	if (md->version == 1) {
1019 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1020 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1021 			return -EINVAL;
1022 		}
1023 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1024 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1025 			md->u.index = nla_get_be32(nla);
1026 		}
1027 	} else if (md->version == 2) {
1028 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1029 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1030 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1031 			return -EINVAL;
1032 		}
1033 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1034 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1035 			md->u.md2.dir = nla_get_u8(nla);
1036 		}
1037 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1038 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1039 			set_hwid(&md->u.md2, nla_get_u8(nla));
1040 		}
1041 	} else {
1042 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1043 		return -EINVAL;
1044 	}
1045 
1046 	return sizeof(*md);
1047 }
1048 
1049 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1050 			  struct fl_flow_key *mask,
1051 			  struct netlink_ext_ack *extack)
1052 {
1053 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1054 	int err, option_len, key_depth, msk_depth = 0;
1055 
1056 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1057 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1058 					     enc_opts_policy, extack);
1059 	if (err)
1060 		return err;
1061 
1062 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1063 
1064 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1065 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1066 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1067 						     enc_opts_policy, extack);
1068 		if (err)
1069 			return err;
1070 
1071 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1072 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1073 	}
1074 
1075 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1076 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1077 		switch (nla_type(nla_opt_key)) {
1078 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1079 			if (key->enc_opts.dst_opt_type &&
1080 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1081 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1082 				return -EINVAL;
1083 			}
1084 			option_len = 0;
1085 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1086 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1087 						       key_depth, option_len,
1088 						       extack);
1089 			if (option_len < 0)
1090 				return option_len;
1091 
1092 			key->enc_opts.len += option_len;
1093 			/* At the same time we need to parse through the mask
1094 			 * in order to verify exact and mask attribute lengths.
1095 			 */
1096 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1097 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1098 						       msk_depth, option_len,
1099 						       extack);
1100 			if (option_len < 0)
1101 				return option_len;
1102 
1103 			mask->enc_opts.len += option_len;
1104 			if (key->enc_opts.len != mask->enc_opts.len) {
1105 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1106 				return -EINVAL;
1107 			}
1108 
1109 			if (msk_depth)
1110 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1111 			break;
1112 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1113 			if (key->enc_opts.dst_opt_type) {
1114 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1115 				return -EINVAL;
1116 			}
1117 			option_len = 0;
1118 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1119 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1120 						      key_depth, option_len,
1121 						      extack);
1122 			if (option_len < 0)
1123 				return option_len;
1124 
1125 			key->enc_opts.len += option_len;
1126 			/* At the same time we need to parse through the mask
1127 			 * in order to verify exact and mask attribute lengths.
1128 			 */
1129 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1130 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1131 						      msk_depth, option_len,
1132 						      extack);
1133 			if (option_len < 0)
1134 				return option_len;
1135 
1136 			mask->enc_opts.len += option_len;
1137 			if (key->enc_opts.len != mask->enc_opts.len) {
1138 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1139 				return -EINVAL;
1140 			}
1141 
1142 			if (msk_depth)
1143 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1144 			break;
1145 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1146 			if (key->enc_opts.dst_opt_type) {
1147 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1148 				return -EINVAL;
1149 			}
1150 			option_len = 0;
1151 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1152 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1153 						       key_depth, option_len,
1154 						       extack);
1155 			if (option_len < 0)
1156 				return option_len;
1157 
1158 			key->enc_opts.len += option_len;
1159 			/* At the same time we need to parse through the mask
1160 			 * in order to verify exact and mask attribute lengths.
1161 			 */
1162 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1163 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1164 						       msk_depth, option_len,
1165 						       extack);
1166 			if (option_len < 0)
1167 				return option_len;
1168 
1169 			mask->enc_opts.len += option_len;
1170 			if (key->enc_opts.len != mask->enc_opts.len) {
1171 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1172 				return -EINVAL;
1173 			}
1174 
1175 			if (msk_depth)
1176 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1177 			break;
1178 		default:
1179 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1180 			return -EINVAL;
1181 		}
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static int fl_set_key_ct(struct nlattr **tb,
1188 			 struct flow_dissector_key_ct *key,
1189 			 struct flow_dissector_key_ct *mask,
1190 			 struct netlink_ext_ack *extack)
1191 {
1192 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1193 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1194 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1195 			return -EOPNOTSUPP;
1196 		}
1197 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1198 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1199 			       sizeof(key->ct_state));
1200 	}
1201 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1202 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1203 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1204 			return -EOPNOTSUPP;
1205 		}
1206 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1207 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1208 			       sizeof(key->ct_zone));
1209 	}
1210 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1211 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1212 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1213 			return -EOPNOTSUPP;
1214 		}
1215 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1216 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1217 			       sizeof(key->ct_mark));
1218 	}
1219 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1220 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1221 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1222 			return -EOPNOTSUPP;
1223 		}
1224 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1225 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1226 			       sizeof(key->ct_labels));
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 static int fl_set_key(struct net *net, struct nlattr **tb,
1233 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1234 		      struct netlink_ext_ack *extack)
1235 {
1236 	__be16 ethertype;
1237 	int ret = 0;
1238 
1239 	if (tb[TCA_FLOWER_INDEV]) {
1240 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1241 		if (err < 0)
1242 			return err;
1243 		key->meta.ingress_ifindex = err;
1244 		mask->meta.ingress_ifindex = 0xffffffff;
1245 	}
1246 
1247 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1248 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1249 		       sizeof(key->eth.dst));
1250 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1251 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1252 		       sizeof(key->eth.src));
1253 
1254 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1255 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1256 
1257 		if (eth_type_vlan(ethertype)) {
1258 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1259 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1260 					&mask->vlan);
1261 
1262 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1263 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1264 				if (eth_type_vlan(ethertype)) {
1265 					fl_set_key_vlan(tb, ethertype,
1266 							TCA_FLOWER_KEY_CVLAN_ID,
1267 							TCA_FLOWER_KEY_CVLAN_PRIO,
1268 							&key->cvlan, &mask->cvlan);
1269 					fl_set_key_val(tb, &key->basic.n_proto,
1270 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1271 						       &mask->basic.n_proto,
1272 						       TCA_FLOWER_UNSPEC,
1273 						       sizeof(key->basic.n_proto));
1274 				} else {
1275 					key->basic.n_proto = ethertype;
1276 					mask->basic.n_proto = cpu_to_be16(~0);
1277 				}
1278 			}
1279 		} else {
1280 			key->basic.n_proto = ethertype;
1281 			mask->basic.n_proto = cpu_to_be16(~0);
1282 		}
1283 	}
1284 
1285 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1286 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1287 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1288 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1289 			       sizeof(key->basic.ip_proto));
1290 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1291 	}
1292 
1293 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1294 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1295 		mask->control.addr_type = ~0;
1296 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1297 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1298 			       sizeof(key->ipv4.src));
1299 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1300 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1301 			       sizeof(key->ipv4.dst));
1302 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1303 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1304 		mask->control.addr_type = ~0;
1305 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1306 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1307 			       sizeof(key->ipv6.src));
1308 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1309 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1310 			       sizeof(key->ipv6.dst));
1311 	}
1312 
1313 	if (key->basic.ip_proto == IPPROTO_TCP) {
1314 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1315 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1316 			       sizeof(key->tp.src));
1317 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1318 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1319 			       sizeof(key->tp.dst));
1320 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1321 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1322 			       sizeof(key->tcp.flags));
1323 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1324 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1325 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1326 			       sizeof(key->tp.src));
1327 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1328 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1329 			       sizeof(key->tp.dst));
1330 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1331 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1332 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1333 			       sizeof(key->tp.src));
1334 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1335 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1336 			       sizeof(key->tp.dst));
1337 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1338 		   key->basic.ip_proto == IPPROTO_ICMP) {
1339 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1340 			       &mask->icmp.type,
1341 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1342 			       sizeof(key->icmp.type));
1343 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1344 			       &mask->icmp.code,
1345 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1346 			       sizeof(key->icmp.code));
1347 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1348 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1349 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1350 			       &mask->icmp.type,
1351 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1352 			       sizeof(key->icmp.type));
1353 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1354 			       &mask->icmp.code,
1355 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1356 			       sizeof(key->icmp.code));
1357 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1358 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1359 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1360 		if (ret)
1361 			return ret;
1362 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1363 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1364 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1365 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1366 			       sizeof(key->arp.sip));
1367 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1368 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1369 			       sizeof(key->arp.tip));
1370 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1371 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1372 			       sizeof(key->arp.op));
1373 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1374 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1375 			       sizeof(key->arp.sha));
1376 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1377 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1378 			       sizeof(key->arp.tha));
1379 	}
1380 
1381 	if (key->basic.ip_proto == IPPROTO_TCP ||
1382 	    key->basic.ip_proto == IPPROTO_UDP ||
1383 	    key->basic.ip_proto == IPPROTO_SCTP) {
1384 		ret = fl_set_key_port_range(tb, key, mask);
1385 		if (ret)
1386 			return ret;
1387 	}
1388 
1389 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1390 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1391 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1392 		mask->enc_control.addr_type = ~0;
1393 		fl_set_key_val(tb, &key->enc_ipv4.src,
1394 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1395 			       &mask->enc_ipv4.src,
1396 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1397 			       sizeof(key->enc_ipv4.src));
1398 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1399 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1400 			       &mask->enc_ipv4.dst,
1401 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1402 			       sizeof(key->enc_ipv4.dst));
1403 	}
1404 
1405 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1406 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1407 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1408 		mask->enc_control.addr_type = ~0;
1409 		fl_set_key_val(tb, &key->enc_ipv6.src,
1410 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1411 			       &mask->enc_ipv6.src,
1412 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1413 			       sizeof(key->enc_ipv6.src));
1414 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1415 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1416 			       &mask->enc_ipv6.dst,
1417 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1418 			       sizeof(key->enc_ipv6.dst));
1419 	}
1420 
1421 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1422 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1423 		       sizeof(key->enc_key_id.keyid));
1424 
1425 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1426 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1427 		       sizeof(key->enc_tp.src));
1428 
1429 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1430 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1431 		       sizeof(key->enc_tp.dst));
1432 
1433 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1434 
1435 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1436 		ret = fl_set_enc_opt(tb, key, mask, extack);
1437 		if (ret)
1438 			return ret;
1439 	}
1440 
1441 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1442 	if (ret)
1443 		return ret;
1444 
1445 	if (tb[TCA_FLOWER_KEY_FLAGS])
1446 		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1447 
1448 	return ret;
1449 }
1450 
1451 static void fl_mask_copy(struct fl_flow_mask *dst,
1452 			 struct fl_flow_mask *src)
1453 {
1454 	const void *psrc = fl_key_get_start(&src->key, src);
1455 	void *pdst = fl_key_get_start(&dst->key, src);
1456 
1457 	memcpy(pdst, psrc, fl_mask_range(src));
1458 	dst->range = src->range;
1459 }
1460 
1461 static const struct rhashtable_params fl_ht_params = {
1462 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1463 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1464 	.automatic_shrinking = true,
1465 };
1466 
1467 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1468 {
1469 	mask->filter_ht_params = fl_ht_params;
1470 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1471 	mask->filter_ht_params.key_offset += mask->range.start;
1472 
1473 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1474 }
1475 
1476 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1477 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1478 
1479 #define FL_KEY_IS_MASKED(mask, member)						\
1480 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1481 		   0, FL_KEY_MEMBER_SIZE(member))				\
1482 
1483 #define FL_KEY_SET(keys, cnt, id, member)					\
1484 	do {									\
1485 		keys[cnt].key_id = id;						\
1486 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1487 		cnt++;								\
1488 	} while(0);
1489 
1490 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1491 	do {									\
1492 		if (FL_KEY_IS_MASKED(mask, member))				\
1493 			FL_KEY_SET(keys, cnt, id, member);			\
1494 	} while(0);
1495 
1496 static void fl_init_dissector(struct flow_dissector *dissector,
1497 			      struct fl_flow_key *mask)
1498 {
1499 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1500 	size_t cnt = 0;
1501 
1502 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1503 			     FLOW_DISSECTOR_KEY_META, meta);
1504 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1505 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1506 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1507 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1508 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1509 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1510 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1511 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1512 	if (FL_KEY_IS_MASKED(mask, tp) ||
1513 	    FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1514 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
1515 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1516 			     FLOW_DISSECTOR_KEY_IP, ip);
1517 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1518 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1519 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1520 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1521 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1522 			     FLOW_DISSECTOR_KEY_ARP, arp);
1523 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1524 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1525 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1526 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1527 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1528 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1529 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1530 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1531 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1532 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1533 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1534 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1535 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1536 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1537 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1538 			   enc_control);
1539 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1540 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1541 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1542 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1543 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1544 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1545 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1546 			     FLOW_DISSECTOR_KEY_CT, ct);
1547 
1548 	skb_flow_dissector_init(dissector, keys, cnt);
1549 }
1550 
1551 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1552 					       struct fl_flow_mask *mask)
1553 {
1554 	struct fl_flow_mask *newmask;
1555 	int err;
1556 
1557 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1558 	if (!newmask)
1559 		return ERR_PTR(-ENOMEM);
1560 
1561 	fl_mask_copy(newmask, mask);
1562 
1563 	if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1564 	    (newmask->key.tp_min.src && newmask->key.tp_max.src))
1565 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1566 
1567 	err = fl_init_mask_hashtable(newmask);
1568 	if (err)
1569 		goto errout_free;
1570 
1571 	fl_init_dissector(&newmask->dissector, &newmask->key);
1572 
1573 	INIT_LIST_HEAD_RCU(&newmask->filters);
1574 
1575 	refcount_set(&newmask->refcnt, 1);
1576 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1577 				      &newmask->ht_node, mask_ht_params);
1578 	if (err)
1579 		goto errout_destroy;
1580 
1581 	spin_lock(&head->masks_lock);
1582 	list_add_tail_rcu(&newmask->list, &head->masks);
1583 	spin_unlock(&head->masks_lock);
1584 
1585 	return newmask;
1586 
1587 errout_destroy:
1588 	rhashtable_destroy(&newmask->ht);
1589 errout_free:
1590 	kfree(newmask);
1591 
1592 	return ERR_PTR(err);
1593 }
1594 
1595 static int fl_check_assign_mask(struct cls_fl_head *head,
1596 				struct cls_fl_filter *fnew,
1597 				struct cls_fl_filter *fold,
1598 				struct fl_flow_mask *mask)
1599 {
1600 	struct fl_flow_mask *newmask;
1601 	int ret = 0;
1602 
1603 	rcu_read_lock();
1604 
1605 	/* Insert mask as temporary node to prevent concurrent creation of mask
1606 	 * with same key. Any concurrent lookups with same key will return
1607 	 * -EAGAIN because mask's refcnt is zero.
1608 	 */
1609 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1610 						       &mask->ht_node,
1611 						       mask_ht_params);
1612 	if (!fnew->mask) {
1613 		rcu_read_unlock();
1614 
1615 		if (fold) {
1616 			ret = -EINVAL;
1617 			goto errout_cleanup;
1618 		}
1619 
1620 		newmask = fl_create_new_mask(head, mask);
1621 		if (IS_ERR(newmask)) {
1622 			ret = PTR_ERR(newmask);
1623 			goto errout_cleanup;
1624 		}
1625 
1626 		fnew->mask = newmask;
1627 		return 0;
1628 	} else if (IS_ERR(fnew->mask)) {
1629 		ret = PTR_ERR(fnew->mask);
1630 	} else if (fold && fold->mask != fnew->mask) {
1631 		ret = -EINVAL;
1632 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1633 		/* Mask was deleted concurrently, try again */
1634 		ret = -EAGAIN;
1635 	}
1636 	rcu_read_unlock();
1637 	return ret;
1638 
1639 errout_cleanup:
1640 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1641 			       mask_ht_params);
1642 	return ret;
1643 }
1644 
1645 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1646 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1647 			unsigned long base, struct nlattr **tb,
1648 			struct nlattr *est, bool ovr,
1649 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1650 			struct netlink_ext_ack *extack)
1651 {
1652 	int err;
1653 
1654 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1655 				extack);
1656 	if (err < 0)
1657 		return err;
1658 
1659 	if (tb[TCA_FLOWER_CLASSID]) {
1660 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1661 		if (!rtnl_held)
1662 			rtnl_lock();
1663 		tcf_bind_filter(tp, &f->res, base);
1664 		if (!rtnl_held)
1665 			rtnl_unlock();
1666 	}
1667 
1668 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1669 	if (err)
1670 		return err;
1671 
1672 	fl_mask_update_range(mask);
1673 	fl_set_masked_key(&f->mkey, &f->key, mask);
1674 
1675 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1676 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1677 		return -EINVAL;
1678 	}
1679 
1680 	return 0;
1681 }
1682 
1683 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1684 			       struct cls_fl_filter *fold,
1685 			       bool *in_ht)
1686 {
1687 	struct fl_flow_mask *mask = fnew->mask;
1688 	int err;
1689 
1690 	err = rhashtable_lookup_insert_fast(&mask->ht,
1691 					    &fnew->ht_node,
1692 					    mask->filter_ht_params);
1693 	if (err) {
1694 		*in_ht = false;
1695 		/* It is okay if filter with same key exists when
1696 		 * overwriting.
1697 		 */
1698 		return fold && err == -EEXIST ? 0 : err;
1699 	}
1700 
1701 	*in_ht = true;
1702 	return 0;
1703 }
1704 
1705 static int fl_change(struct net *net, struct sk_buff *in_skb,
1706 		     struct tcf_proto *tp, unsigned long base,
1707 		     u32 handle, struct nlattr **tca,
1708 		     void **arg, bool ovr, bool rtnl_held,
1709 		     struct netlink_ext_ack *extack)
1710 {
1711 	struct cls_fl_head *head = fl_head_dereference(tp);
1712 	struct cls_fl_filter *fold = *arg;
1713 	struct cls_fl_filter *fnew;
1714 	struct fl_flow_mask *mask;
1715 	struct nlattr **tb;
1716 	bool in_ht;
1717 	int err;
1718 
1719 	if (!tca[TCA_OPTIONS]) {
1720 		err = -EINVAL;
1721 		goto errout_fold;
1722 	}
1723 
1724 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1725 	if (!mask) {
1726 		err = -ENOBUFS;
1727 		goto errout_fold;
1728 	}
1729 
1730 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1731 	if (!tb) {
1732 		err = -ENOBUFS;
1733 		goto errout_mask_alloc;
1734 	}
1735 
1736 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1737 					  tca[TCA_OPTIONS], fl_policy, NULL);
1738 	if (err < 0)
1739 		goto errout_tb;
1740 
1741 	if (fold && handle && fold->handle != handle) {
1742 		err = -EINVAL;
1743 		goto errout_tb;
1744 	}
1745 
1746 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1747 	if (!fnew) {
1748 		err = -ENOBUFS;
1749 		goto errout_tb;
1750 	}
1751 	INIT_LIST_HEAD(&fnew->hw_list);
1752 	refcount_set(&fnew->refcnt, 1);
1753 
1754 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1755 	if (err < 0)
1756 		goto errout;
1757 
1758 	if (tb[TCA_FLOWER_FLAGS]) {
1759 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1760 
1761 		if (!tc_flags_valid(fnew->flags)) {
1762 			err = -EINVAL;
1763 			goto errout;
1764 		}
1765 	}
1766 
1767 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1768 			   tp->chain->tmplt_priv, rtnl_held, extack);
1769 	if (err)
1770 		goto errout;
1771 
1772 	err = fl_check_assign_mask(head, fnew, fold, mask);
1773 	if (err)
1774 		goto errout;
1775 
1776 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1777 	if (err)
1778 		goto errout_mask;
1779 
1780 	if (!tc_skip_hw(fnew->flags)) {
1781 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1782 		if (err)
1783 			goto errout_ht;
1784 	}
1785 
1786 	if (!tc_in_hw(fnew->flags))
1787 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1788 
1789 	spin_lock(&tp->lock);
1790 
1791 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1792 	 * proto again or create new one, if necessary.
1793 	 */
1794 	if (tp->deleting) {
1795 		err = -EAGAIN;
1796 		goto errout_hw;
1797 	}
1798 
1799 	if (fold) {
1800 		/* Fold filter was deleted concurrently. Retry lookup. */
1801 		if (fold->deleted) {
1802 			err = -EAGAIN;
1803 			goto errout_hw;
1804 		}
1805 
1806 		fnew->handle = handle;
1807 
1808 		if (!in_ht) {
1809 			struct rhashtable_params params =
1810 				fnew->mask->filter_ht_params;
1811 
1812 			err = rhashtable_insert_fast(&fnew->mask->ht,
1813 						     &fnew->ht_node,
1814 						     params);
1815 			if (err)
1816 				goto errout_hw;
1817 			in_ht = true;
1818 		}
1819 
1820 		refcount_inc(&fnew->refcnt);
1821 		rhashtable_remove_fast(&fold->mask->ht,
1822 				       &fold->ht_node,
1823 				       fold->mask->filter_ht_params);
1824 		idr_replace(&head->handle_idr, fnew, fnew->handle);
1825 		list_replace_rcu(&fold->list, &fnew->list);
1826 		fold->deleted = true;
1827 
1828 		spin_unlock(&tp->lock);
1829 
1830 		fl_mask_put(head, fold->mask);
1831 		if (!tc_skip_hw(fold->flags))
1832 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1833 		tcf_unbind_filter(tp, &fold->res);
1834 		/* Caller holds reference to fold, so refcnt is always > 0
1835 		 * after this.
1836 		 */
1837 		refcount_dec(&fold->refcnt);
1838 		__fl_put(fold);
1839 	} else {
1840 		if (handle) {
1841 			/* user specifies a handle and it doesn't exist */
1842 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1843 					    handle, GFP_ATOMIC);
1844 
1845 			/* Filter with specified handle was concurrently
1846 			 * inserted after initial check in cls_api. This is not
1847 			 * necessarily an error if NLM_F_EXCL is not set in
1848 			 * message flags. Returning EAGAIN will cause cls_api to
1849 			 * try to update concurrently inserted rule.
1850 			 */
1851 			if (err == -ENOSPC)
1852 				err = -EAGAIN;
1853 		} else {
1854 			handle = 1;
1855 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1856 					    INT_MAX, GFP_ATOMIC);
1857 		}
1858 		if (err)
1859 			goto errout_hw;
1860 
1861 		refcount_inc(&fnew->refcnt);
1862 		fnew->handle = handle;
1863 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1864 		spin_unlock(&tp->lock);
1865 	}
1866 
1867 	*arg = fnew;
1868 
1869 	kfree(tb);
1870 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1871 	return 0;
1872 
1873 errout_ht:
1874 	spin_lock(&tp->lock);
1875 errout_hw:
1876 	fnew->deleted = true;
1877 	spin_unlock(&tp->lock);
1878 	if (!tc_skip_hw(fnew->flags))
1879 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1880 	if (in_ht)
1881 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1882 				       fnew->mask->filter_ht_params);
1883 errout_mask:
1884 	fl_mask_put(head, fnew->mask);
1885 errout:
1886 	__fl_put(fnew);
1887 errout_tb:
1888 	kfree(tb);
1889 errout_mask_alloc:
1890 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
1891 errout_fold:
1892 	if (fold)
1893 		__fl_put(fold);
1894 	return err;
1895 }
1896 
1897 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1898 		     bool rtnl_held, struct netlink_ext_ack *extack)
1899 {
1900 	struct cls_fl_head *head = fl_head_dereference(tp);
1901 	struct cls_fl_filter *f = arg;
1902 	bool last_on_mask;
1903 	int err = 0;
1904 
1905 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1906 	*last = list_empty(&head->masks);
1907 	__fl_put(f);
1908 
1909 	return err;
1910 }
1911 
1912 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1913 		    bool rtnl_held)
1914 {
1915 	struct cls_fl_head *head = fl_head_dereference(tp);
1916 	unsigned long id = arg->cookie, tmp;
1917 	struct cls_fl_filter *f;
1918 
1919 	arg->count = arg->skip;
1920 
1921 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
1922 		/* don't return filters that are being deleted */
1923 		if (!refcount_inc_not_zero(&f->refcnt))
1924 			continue;
1925 		if (arg->fn(tp, f, arg) < 0) {
1926 			__fl_put(f);
1927 			arg->stop = 1;
1928 			break;
1929 		}
1930 		__fl_put(f);
1931 		arg->count++;
1932 	}
1933 	arg->cookie = id;
1934 }
1935 
1936 static struct cls_fl_filter *
1937 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1938 {
1939 	struct cls_fl_head *head = fl_head_dereference(tp);
1940 
1941 	spin_lock(&tp->lock);
1942 	if (list_empty(&head->hw_filters)) {
1943 		spin_unlock(&tp->lock);
1944 		return NULL;
1945 	}
1946 
1947 	if (!f)
1948 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
1949 			       hw_list);
1950 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1951 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1952 			spin_unlock(&tp->lock);
1953 			return f;
1954 		}
1955 	}
1956 
1957 	spin_unlock(&tp->lock);
1958 	return NULL;
1959 }
1960 
1961 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
1962 			void *cb_priv, struct netlink_ext_ack *extack)
1963 {
1964 	struct tcf_block *block = tp->chain->block;
1965 	struct flow_cls_offload cls_flower = {};
1966 	struct cls_fl_filter *f = NULL;
1967 	int err;
1968 
1969 	/* hw_filters list can only be changed by hw offload functions after
1970 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1971 	 * iterating it.
1972 	 */
1973 	ASSERT_RTNL();
1974 
1975 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
1976 		cls_flower.rule =
1977 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1978 		if (!cls_flower.rule) {
1979 			__fl_put(f);
1980 			return -ENOMEM;
1981 		}
1982 
1983 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1984 					   extack);
1985 		cls_flower.command = add ?
1986 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
1987 		cls_flower.cookie = (unsigned long)f;
1988 		cls_flower.rule->match.dissector = &f->mask->dissector;
1989 		cls_flower.rule->match.mask = &f->mask->key;
1990 		cls_flower.rule->match.key = &f->mkey;
1991 
1992 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts,
1993 					   true);
1994 		if (err) {
1995 			kfree(cls_flower.rule);
1996 			if (tc_skip_sw(f->flags)) {
1997 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1998 				__fl_put(f);
1999 				return err;
2000 			}
2001 			goto next_flow;
2002 		}
2003 
2004 		cls_flower.classid = f->res.classid;
2005 
2006 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2007 					    TC_SETUP_CLSFLOWER, &cls_flower,
2008 					    cb_priv, &f->flags,
2009 					    &f->in_hw_count);
2010 		tc_cleanup_flow_action(&cls_flower.rule->action);
2011 		kfree(cls_flower.rule);
2012 
2013 		if (err) {
2014 			__fl_put(f);
2015 			return err;
2016 		}
2017 next_flow:
2018 		__fl_put(f);
2019 	}
2020 
2021 	return 0;
2022 }
2023 
2024 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2025 {
2026 	struct flow_cls_offload *cls_flower = type_data;
2027 	struct cls_fl_filter *f =
2028 		(struct cls_fl_filter *) cls_flower->cookie;
2029 	struct cls_fl_head *head = fl_head_dereference(tp);
2030 
2031 	spin_lock(&tp->lock);
2032 	list_add(&f->hw_list, &head->hw_filters);
2033 	spin_unlock(&tp->lock);
2034 }
2035 
2036 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2037 {
2038 	struct flow_cls_offload *cls_flower = type_data;
2039 	struct cls_fl_filter *f =
2040 		(struct cls_fl_filter *) cls_flower->cookie;
2041 
2042 	spin_lock(&tp->lock);
2043 	if (!list_empty(&f->hw_list))
2044 		list_del_init(&f->hw_list);
2045 	spin_unlock(&tp->lock);
2046 }
2047 
2048 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2049 			      struct fl_flow_tmplt *tmplt)
2050 {
2051 	struct flow_cls_offload cls_flower = {};
2052 	struct tcf_block *block = chain->block;
2053 
2054 	cls_flower.rule = flow_rule_alloc(0);
2055 	if (!cls_flower.rule)
2056 		return -ENOMEM;
2057 
2058 	cls_flower.common.chain_index = chain->index;
2059 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2060 	cls_flower.cookie = (unsigned long) tmplt;
2061 	cls_flower.rule->match.dissector = &tmplt->dissector;
2062 	cls_flower.rule->match.mask = &tmplt->mask;
2063 	cls_flower.rule->match.key = &tmplt->dummy_key;
2064 
2065 	/* We don't care if driver (any of them) fails to handle this
2066 	 * call. It serves just as a hint for it.
2067 	 */
2068 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2069 	kfree(cls_flower.rule);
2070 
2071 	return 0;
2072 }
2073 
2074 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2075 				struct fl_flow_tmplt *tmplt)
2076 {
2077 	struct flow_cls_offload cls_flower = {};
2078 	struct tcf_block *block = chain->block;
2079 
2080 	cls_flower.common.chain_index = chain->index;
2081 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2082 	cls_flower.cookie = (unsigned long) tmplt;
2083 
2084 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2085 }
2086 
2087 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2088 			     struct nlattr **tca,
2089 			     struct netlink_ext_ack *extack)
2090 {
2091 	struct fl_flow_tmplt *tmplt;
2092 	struct nlattr **tb;
2093 	int err;
2094 
2095 	if (!tca[TCA_OPTIONS])
2096 		return ERR_PTR(-EINVAL);
2097 
2098 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2099 	if (!tb)
2100 		return ERR_PTR(-ENOBUFS);
2101 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2102 					  tca[TCA_OPTIONS], fl_policy, NULL);
2103 	if (err)
2104 		goto errout_tb;
2105 
2106 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2107 	if (!tmplt) {
2108 		err = -ENOMEM;
2109 		goto errout_tb;
2110 	}
2111 	tmplt->chain = chain;
2112 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2113 	if (err)
2114 		goto errout_tmplt;
2115 
2116 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2117 
2118 	err = fl_hw_create_tmplt(chain, tmplt);
2119 	if (err)
2120 		goto errout_tmplt;
2121 
2122 	kfree(tb);
2123 	return tmplt;
2124 
2125 errout_tmplt:
2126 	kfree(tmplt);
2127 errout_tb:
2128 	kfree(tb);
2129 	return ERR_PTR(err);
2130 }
2131 
2132 static void fl_tmplt_destroy(void *tmplt_priv)
2133 {
2134 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2135 
2136 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2137 	kfree(tmplt);
2138 }
2139 
2140 static int fl_dump_key_val(struct sk_buff *skb,
2141 			   void *val, int val_type,
2142 			   void *mask, int mask_type, int len)
2143 {
2144 	int err;
2145 
2146 	if (!memchr_inv(mask, 0, len))
2147 		return 0;
2148 	err = nla_put(skb, val_type, len, val);
2149 	if (err)
2150 		return err;
2151 	if (mask_type != TCA_FLOWER_UNSPEC) {
2152 		err = nla_put(skb, mask_type, len, mask);
2153 		if (err)
2154 			return err;
2155 	}
2156 	return 0;
2157 }
2158 
2159 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2160 				  struct fl_flow_key *mask)
2161 {
2162 	if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
2163 			    &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
2164 			    sizeof(key->tp_min.dst)) ||
2165 	    fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
2166 			    &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
2167 			    sizeof(key->tp_max.dst)) ||
2168 	    fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
2169 			    &mask->tp_min.src, TCA_FLOWER_UNSPEC,
2170 			    sizeof(key->tp_min.src)) ||
2171 	    fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
2172 			    &mask->tp_max.src, TCA_FLOWER_UNSPEC,
2173 			    sizeof(key->tp_max.src)))
2174 		return -1;
2175 
2176 	return 0;
2177 }
2178 
2179 static int fl_dump_key_mpls(struct sk_buff *skb,
2180 			    struct flow_dissector_key_mpls *mpls_key,
2181 			    struct flow_dissector_key_mpls *mpls_mask)
2182 {
2183 	int err;
2184 
2185 	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
2186 		return 0;
2187 	if (mpls_mask->mpls_ttl) {
2188 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2189 				 mpls_key->mpls_ttl);
2190 		if (err)
2191 			return err;
2192 	}
2193 	if (mpls_mask->mpls_tc) {
2194 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2195 				 mpls_key->mpls_tc);
2196 		if (err)
2197 			return err;
2198 	}
2199 	if (mpls_mask->mpls_label) {
2200 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2201 				  mpls_key->mpls_label);
2202 		if (err)
2203 			return err;
2204 	}
2205 	if (mpls_mask->mpls_bos) {
2206 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2207 				 mpls_key->mpls_bos);
2208 		if (err)
2209 			return err;
2210 	}
2211 	return 0;
2212 }
2213 
2214 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2215 			  struct flow_dissector_key_ip *key,
2216 			  struct flow_dissector_key_ip *mask)
2217 {
2218 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2219 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2220 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2221 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2222 
2223 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2224 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2225 		return -1;
2226 
2227 	return 0;
2228 }
2229 
2230 static int fl_dump_key_vlan(struct sk_buff *skb,
2231 			    int vlan_id_key, int vlan_prio_key,
2232 			    struct flow_dissector_key_vlan *vlan_key,
2233 			    struct flow_dissector_key_vlan *vlan_mask)
2234 {
2235 	int err;
2236 
2237 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2238 		return 0;
2239 	if (vlan_mask->vlan_id) {
2240 		err = nla_put_u16(skb, vlan_id_key,
2241 				  vlan_key->vlan_id);
2242 		if (err)
2243 			return err;
2244 	}
2245 	if (vlan_mask->vlan_priority) {
2246 		err = nla_put_u8(skb, vlan_prio_key,
2247 				 vlan_key->vlan_priority);
2248 		if (err)
2249 			return err;
2250 	}
2251 	return 0;
2252 }
2253 
2254 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2255 			    u32 *flower_key, u32 *flower_mask,
2256 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2257 {
2258 	if (dissector_mask & dissector_flag_bit) {
2259 		*flower_mask |= flower_flag_bit;
2260 		if (dissector_key & dissector_flag_bit)
2261 			*flower_key |= flower_flag_bit;
2262 	}
2263 }
2264 
2265 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2266 {
2267 	u32 key, mask;
2268 	__be32 _key, _mask;
2269 	int err;
2270 
2271 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2272 		return 0;
2273 
2274 	key = 0;
2275 	mask = 0;
2276 
2277 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2278 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2279 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2280 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2281 			FLOW_DIS_FIRST_FRAG);
2282 
2283 	_key = cpu_to_be32(key);
2284 	_mask = cpu_to_be32(mask);
2285 
2286 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2287 	if (err)
2288 		return err;
2289 
2290 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2291 }
2292 
2293 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2294 				  struct flow_dissector_key_enc_opts *enc_opts)
2295 {
2296 	struct geneve_opt *opt;
2297 	struct nlattr *nest;
2298 	int opt_off = 0;
2299 
2300 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2301 	if (!nest)
2302 		goto nla_put_failure;
2303 
2304 	while (enc_opts->len > opt_off) {
2305 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2306 
2307 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2308 				 opt->opt_class))
2309 			goto nla_put_failure;
2310 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2311 			       opt->type))
2312 			goto nla_put_failure;
2313 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2314 			    opt->length * 4, opt->opt_data))
2315 			goto nla_put_failure;
2316 
2317 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2318 	}
2319 	nla_nest_end(skb, nest);
2320 	return 0;
2321 
2322 nla_put_failure:
2323 	nla_nest_cancel(skb, nest);
2324 	return -EMSGSIZE;
2325 }
2326 
2327 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2328 				 struct flow_dissector_key_enc_opts *enc_opts)
2329 {
2330 	struct vxlan_metadata *md;
2331 	struct nlattr *nest;
2332 
2333 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2334 	if (!nest)
2335 		goto nla_put_failure;
2336 
2337 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2338 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2339 		goto nla_put_failure;
2340 
2341 	nla_nest_end(skb, nest);
2342 	return 0;
2343 
2344 nla_put_failure:
2345 	nla_nest_cancel(skb, nest);
2346 	return -EMSGSIZE;
2347 }
2348 
2349 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2350 				  struct flow_dissector_key_enc_opts *enc_opts)
2351 {
2352 	struct erspan_metadata *md;
2353 	struct nlattr *nest;
2354 
2355 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2356 	if (!nest)
2357 		goto nla_put_failure;
2358 
2359 	md = (struct erspan_metadata *)&enc_opts->data[0];
2360 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2361 		goto nla_put_failure;
2362 
2363 	if (md->version == 1 &&
2364 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2365 		goto nla_put_failure;
2366 
2367 	if (md->version == 2 &&
2368 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2369 			md->u.md2.dir) ||
2370 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2371 			get_hwid(&md->u.md2))))
2372 		goto nla_put_failure;
2373 
2374 	nla_nest_end(skb, nest);
2375 	return 0;
2376 
2377 nla_put_failure:
2378 	nla_nest_cancel(skb, nest);
2379 	return -EMSGSIZE;
2380 }
2381 
2382 static int fl_dump_key_ct(struct sk_buff *skb,
2383 			  struct flow_dissector_key_ct *key,
2384 			  struct flow_dissector_key_ct *mask)
2385 {
2386 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2387 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2388 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2389 			    sizeof(key->ct_state)))
2390 		goto nla_put_failure;
2391 
2392 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2393 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2394 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2395 			    sizeof(key->ct_zone)))
2396 		goto nla_put_failure;
2397 
2398 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2399 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2400 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2401 			    sizeof(key->ct_mark)))
2402 		goto nla_put_failure;
2403 
2404 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2405 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2406 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2407 			    sizeof(key->ct_labels)))
2408 		goto nla_put_failure;
2409 
2410 	return 0;
2411 
2412 nla_put_failure:
2413 	return -EMSGSIZE;
2414 }
2415 
2416 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2417 			       struct flow_dissector_key_enc_opts *enc_opts)
2418 {
2419 	struct nlattr *nest;
2420 	int err;
2421 
2422 	if (!enc_opts->len)
2423 		return 0;
2424 
2425 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2426 	if (!nest)
2427 		goto nla_put_failure;
2428 
2429 	switch (enc_opts->dst_opt_type) {
2430 	case TUNNEL_GENEVE_OPT:
2431 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2432 		if (err)
2433 			goto nla_put_failure;
2434 		break;
2435 	case TUNNEL_VXLAN_OPT:
2436 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2437 		if (err)
2438 			goto nla_put_failure;
2439 		break;
2440 	case TUNNEL_ERSPAN_OPT:
2441 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2442 		if (err)
2443 			goto nla_put_failure;
2444 		break;
2445 	default:
2446 		goto nla_put_failure;
2447 	}
2448 	nla_nest_end(skb, nest);
2449 	return 0;
2450 
2451 nla_put_failure:
2452 	nla_nest_cancel(skb, nest);
2453 	return -EMSGSIZE;
2454 }
2455 
2456 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2457 			       struct flow_dissector_key_enc_opts *key_opts,
2458 			       struct flow_dissector_key_enc_opts *msk_opts)
2459 {
2460 	int err;
2461 
2462 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2463 	if (err)
2464 		return err;
2465 
2466 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2467 }
2468 
2469 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2470 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2471 {
2472 	if (mask->meta.ingress_ifindex) {
2473 		struct net_device *dev;
2474 
2475 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2476 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2477 			goto nla_put_failure;
2478 	}
2479 
2480 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2481 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2482 			    sizeof(key->eth.dst)) ||
2483 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2484 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2485 			    sizeof(key->eth.src)) ||
2486 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2487 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2488 			    sizeof(key->basic.n_proto)))
2489 		goto nla_put_failure;
2490 
2491 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2492 		goto nla_put_failure;
2493 
2494 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2495 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2496 		goto nla_put_failure;
2497 
2498 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2499 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2500 			     &key->cvlan, &mask->cvlan) ||
2501 	    (mask->cvlan.vlan_tpid &&
2502 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2503 			  key->cvlan.vlan_tpid)))
2504 		goto nla_put_failure;
2505 
2506 	if (mask->basic.n_proto) {
2507 		if (mask->cvlan.vlan_tpid) {
2508 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2509 					 key->basic.n_proto))
2510 				goto nla_put_failure;
2511 		} else if (mask->vlan.vlan_tpid) {
2512 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2513 					 key->basic.n_proto))
2514 				goto nla_put_failure;
2515 		}
2516 	}
2517 
2518 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2519 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2520 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2521 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2522 			    sizeof(key->basic.ip_proto)) ||
2523 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2524 		goto nla_put_failure;
2525 
2526 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2527 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2528 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2529 			     sizeof(key->ipv4.src)) ||
2530 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2531 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2532 			     sizeof(key->ipv4.dst))))
2533 		goto nla_put_failure;
2534 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2535 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2536 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2537 				  sizeof(key->ipv6.src)) ||
2538 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2539 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2540 				  sizeof(key->ipv6.dst))))
2541 		goto nla_put_failure;
2542 
2543 	if (key->basic.ip_proto == IPPROTO_TCP &&
2544 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2545 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2546 			     sizeof(key->tp.src)) ||
2547 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2548 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2549 			     sizeof(key->tp.dst)) ||
2550 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2551 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2552 			     sizeof(key->tcp.flags))))
2553 		goto nla_put_failure;
2554 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2555 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2556 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2557 				  sizeof(key->tp.src)) ||
2558 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2559 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2560 				  sizeof(key->tp.dst))))
2561 		goto nla_put_failure;
2562 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2563 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2564 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2565 				  sizeof(key->tp.src)) ||
2566 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2567 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2568 				  sizeof(key->tp.dst))))
2569 		goto nla_put_failure;
2570 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2571 		 key->basic.ip_proto == IPPROTO_ICMP &&
2572 		 (fl_dump_key_val(skb, &key->icmp.type,
2573 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2574 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2575 				  sizeof(key->icmp.type)) ||
2576 		  fl_dump_key_val(skb, &key->icmp.code,
2577 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2578 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2579 				  sizeof(key->icmp.code))))
2580 		goto nla_put_failure;
2581 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2582 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2583 		 (fl_dump_key_val(skb, &key->icmp.type,
2584 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2585 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2586 				  sizeof(key->icmp.type)) ||
2587 		  fl_dump_key_val(skb, &key->icmp.code,
2588 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2589 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2590 				  sizeof(key->icmp.code))))
2591 		goto nla_put_failure;
2592 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2593 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2594 		 (fl_dump_key_val(skb, &key->arp.sip,
2595 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2596 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2597 				  sizeof(key->arp.sip)) ||
2598 		  fl_dump_key_val(skb, &key->arp.tip,
2599 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2600 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2601 				  sizeof(key->arp.tip)) ||
2602 		  fl_dump_key_val(skb, &key->arp.op,
2603 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2604 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2605 				  sizeof(key->arp.op)) ||
2606 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2607 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2608 				  sizeof(key->arp.sha)) ||
2609 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2610 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2611 				  sizeof(key->arp.tha))))
2612 		goto nla_put_failure;
2613 
2614 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2615 	     key->basic.ip_proto == IPPROTO_UDP ||
2616 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2617 	     fl_dump_key_port_range(skb, key, mask))
2618 		goto nla_put_failure;
2619 
2620 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2621 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2622 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2623 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2624 			    sizeof(key->enc_ipv4.src)) ||
2625 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2626 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2627 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2628 			     sizeof(key->enc_ipv4.dst))))
2629 		goto nla_put_failure;
2630 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2631 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2632 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2633 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2634 			    sizeof(key->enc_ipv6.src)) ||
2635 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2636 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2637 				 &mask->enc_ipv6.dst,
2638 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2639 			    sizeof(key->enc_ipv6.dst))))
2640 		goto nla_put_failure;
2641 
2642 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2643 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2644 			    sizeof(key->enc_key_id)) ||
2645 	    fl_dump_key_val(skb, &key->enc_tp.src,
2646 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2647 			    &mask->enc_tp.src,
2648 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2649 			    sizeof(key->enc_tp.src)) ||
2650 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2651 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2652 			    &mask->enc_tp.dst,
2653 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2654 			    sizeof(key->enc_tp.dst)) ||
2655 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2656 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2657 		goto nla_put_failure;
2658 
2659 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
2660 		goto nla_put_failure;
2661 
2662 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2663 		goto nla_put_failure;
2664 
2665 	return 0;
2666 
2667 nla_put_failure:
2668 	return -EMSGSIZE;
2669 }
2670 
2671 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2672 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2673 {
2674 	struct cls_fl_filter *f = fh;
2675 	struct nlattr *nest;
2676 	struct fl_flow_key *key, *mask;
2677 	bool skip_hw;
2678 
2679 	if (!f)
2680 		return skb->len;
2681 
2682 	t->tcm_handle = f->handle;
2683 
2684 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2685 	if (!nest)
2686 		goto nla_put_failure;
2687 
2688 	spin_lock(&tp->lock);
2689 
2690 	if (f->res.classid &&
2691 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2692 		goto nla_put_failure_locked;
2693 
2694 	key = &f->key;
2695 	mask = &f->mask->key;
2696 	skip_hw = tc_skip_hw(f->flags);
2697 
2698 	if (fl_dump_key(skb, net, key, mask))
2699 		goto nla_put_failure_locked;
2700 
2701 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2702 		goto nla_put_failure_locked;
2703 
2704 	spin_unlock(&tp->lock);
2705 
2706 	if (!skip_hw)
2707 		fl_hw_update_stats(tp, f, rtnl_held);
2708 
2709 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2710 		goto nla_put_failure;
2711 
2712 	if (tcf_exts_dump(skb, &f->exts))
2713 		goto nla_put_failure;
2714 
2715 	nla_nest_end(skb, nest);
2716 
2717 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2718 		goto nla_put_failure;
2719 
2720 	return skb->len;
2721 
2722 nla_put_failure_locked:
2723 	spin_unlock(&tp->lock);
2724 nla_put_failure:
2725 	nla_nest_cancel(skb, nest);
2726 	return -1;
2727 }
2728 
2729 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2730 {
2731 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2732 	struct fl_flow_key *key, *mask;
2733 	struct nlattr *nest;
2734 
2735 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2736 	if (!nest)
2737 		goto nla_put_failure;
2738 
2739 	key = &tmplt->dummy_key;
2740 	mask = &tmplt->mask;
2741 
2742 	if (fl_dump_key(skb, net, key, mask))
2743 		goto nla_put_failure;
2744 
2745 	nla_nest_end(skb, nest);
2746 
2747 	return skb->len;
2748 
2749 nla_put_failure:
2750 	nla_nest_cancel(skb, nest);
2751 	return -EMSGSIZE;
2752 }
2753 
2754 static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2755 {
2756 	struct cls_fl_filter *f = fh;
2757 
2758 	if (f && f->res.classid == classid)
2759 		f->res.class = cl;
2760 }
2761 
2762 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2763 	.kind		= "flower",
2764 	.classify	= fl_classify,
2765 	.init		= fl_init,
2766 	.destroy	= fl_destroy,
2767 	.get		= fl_get,
2768 	.put		= fl_put,
2769 	.change		= fl_change,
2770 	.delete		= fl_delete,
2771 	.walk		= fl_walk,
2772 	.reoffload	= fl_reoffload,
2773 	.hw_add		= fl_hw_add,
2774 	.hw_del		= fl_hw_del,
2775 	.dump		= fl_dump,
2776 	.bind_class	= fl_bind_class,
2777 	.tmplt_create	= fl_tmplt_create,
2778 	.tmplt_destroy	= fl_tmplt_destroy,
2779 	.tmplt_dump	= fl_tmplt_dump,
2780 	.owner		= THIS_MODULE,
2781 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
2782 };
2783 
2784 static int __init cls_fl_init(void)
2785 {
2786 	return register_tcf_proto_ops(&cls_fl_ops);
2787 }
2788 
2789 static void __exit cls_fl_exit(void)
2790 {
2791 	unregister_tcf_proto_ops(&cls_fl_ops);
2792 }
2793 
2794 module_init(cls_fl_init);
2795 module_exit(cls_fl_exit);
2796 
2797 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2798 MODULE_DESCRIPTION("Flower classifier");
2799 MODULE_LICENSE("GPL v2");
2800