xref: /openbmc/linux/net/sched/cls_flower.c (revision 901181b7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flower.c		Flower classifier
4  *
5  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 
15 #include <linux/if_ether.h>
16 #include <linux/in6.h>
17 #include <linux/ip.h>
18 #include <linux/mpls.h>
19 
20 #include <net/sch_generic.h>
21 #include <net/pkt_cls.h>
22 #include <net/ip.h>
23 #include <net/flow_dissector.h>
24 #include <net/geneve.h>
25 #include <net/vxlan.h>
26 #include <net/erspan.h>
27 
28 #include <net/dst.h>
29 #include <net/dst_metadata.h>
30 
31 #include <uapi/linux/netfilter/nf_conntrack_common.h>
32 
33 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
34 		((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
35 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
36 		(TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
37 
38 struct fl_flow_key {
39 	struct flow_dissector_key_meta meta;
40 	struct flow_dissector_key_control control;
41 	struct flow_dissector_key_control enc_control;
42 	struct flow_dissector_key_basic basic;
43 	struct flow_dissector_key_eth_addrs eth;
44 	struct flow_dissector_key_vlan vlan;
45 	struct flow_dissector_key_vlan cvlan;
46 	union {
47 		struct flow_dissector_key_ipv4_addrs ipv4;
48 		struct flow_dissector_key_ipv6_addrs ipv6;
49 	};
50 	struct flow_dissector_key_ports tp;
51 	struct flow_dissector_key_icmp icmp;
52 	struct flow_dissector_key_arp arp;
53 	struct flow_dissector_key_keyid enc_key_id;
54 	union {
55 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
56 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
57 	};
58 	struct flow_dissector_key_ports enc_tp;
59 	struct flow_dissector_key_mpls mpls;
60 	struct flow_dissector_key_tcp tcp;
61 	struct flow_dissector_key_ip ip;
62 	struct flow_dissector_key_ip enc_ip;
63 	struct flow_dissector_key_enc_opts enc_opts;
64 	union {
65 		struct flow_dissector_key_ports tp;
66 		struct {
67 			struct flow_dissector_key_ports tp_min;
68 			struct flow_dissector_key_ports tp_max;
69 		};
70 	} tp_range;
71 	struct flow_dissector_key_ct ct;
72 	struct flow_dissector_key_hash hash;
73 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
74 
75 struct fl_flow_mask_range {
76 	unsigned short int start;
77 	unsigned short int end;
78 };
79 
80 struct fl_flow_mask {
81 	struct fl_flow_key key;
82 	struct fl_flow_mask_range range;
83 	u32 flags;
84 	struct rhash_head ht_node;
85 	struct rhashtable ht;
86 	struct rhashtable_params filter_ht_params;
87 	struct flow_dissector dissector;
88 	struct list_head filters;
89 	struct rcu_work rwork;
90 	struct list_head list;
91 	refcount_t refcnt;
92 };
93 
94 struct fl_flow_tmplt {
95 	struct fl_flow_key dummy_key;
96 	struct fl_flow_key mask;
97 	struct flow_dissector dissector;
98 	struct tcf_chain *chain;
99 };
100 
101 struct cls_fl_head {
102 	struct rhashtable ht;
103 	spinlock_t masks_lock; /* Protect masks list */
104 	struct list_head masks;
105 	struct list_head hw_filters;
106 	struct rcu_work rwork;
107 	struct idr handle_idr;
108 };
109 
110 struct cls_fl_filter {
111 	struct fl_flow_mask *mask;
112 	struct rhash_head ht_node;
113 	struct fl_flow_key mkey;
114 	struct tcf_exts exts;
115 	struct tcf_result res;
116 	struct fl_flow_key key;
117 	struct list_head list;
118 	struct list_head hw_list;
119 	u32 handle;
120 	u32 flags;
121 	u32 in_hw_count;
122 	struct rcu_work rwork;
123 	struct net_device *hw_dev;
124 	/* Flower classifier is unlocked, which means that its reference counter
125 	 * can be changed concurrently without any kind of external
126 	 * synchronization. Use atomic reference counter to be concurrency-safe.
127 	 */
128 	refcount_t refcnt;
129 	bool deleted;
130 };
131 
132 static const struct rhashtable_params mask_ht_params = {
133 	.key_offset = offsetof(struct fl_flow_mask, key),
134 	.key_len = sizeof(struct fl_flow_key),
135 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
136 	.automatic_shrinking = true,
137 };
138 
139 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
140 {
141 	return mask->range.end - mask->range.start;
142 }
143 
144 static void fl_mask_update_range(struct fl_flow_mask *mask)
145 {
146 	const u8 *bytes = (const u8 *) &mask->key;
147 	size_t size = sizeof(mask->key);
148 	size_t i, first = 0, last;
149 
150 	for (i = 0; i < size; i++) {
151 		if (bytes[i]) {
152 			first = i;
153 			break;
154 		}
155 	}
156 	last = first;
157 	for (i = size - 1; i != first; i--) {
158 		if (bytes[i]) {
159 			last = i;
160 			break;
161 		}
162 	}
163 	mask->range.start = rounddown(first, sizeof(long));
164 	mask->range.end = roundup(last + 1, sizeof(long));
165 }
166 
167 static void *fl_key_get_start(struct fl_flow_key *key,
168 			      const struct fl_flow_mask *mask)
169 {
170 	return (u8 *) key + mask->range.start;
171 }
172 
173 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
174 			      struct fl_flow_mask *mask)
175 {
176 	const long *lkey = fl_key_get_start(key, mask);
177 	const long *lmask = fl_key_get_start(&mask->key, mask);
178 	long *lmkey = fl_key_get_start(mkey, mask);
179 	int i;
180 
181 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
182 		*lmkey++ = *lkey++ & *lmask++;
183 }
184 
185 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
186 			       struct fl_flow_mask *mask)
187 {
188 	const long *lmask = fl_key_get_start(&mask->key, mask);
189 	const long *ltmplt;
190 	int i;
191 
192 	if (!tmplt)
193 		return true;
194 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
195 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
196 		if (~*ltmplt++ & *lmask++)
197 			return false;
198 	}
199 	return true;
200 }
201 
202 static void fl_clear_masked_range(struct fl_flow_key *key,
203 				  struct fl_flow_mask *mask)
204 {
205 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
206 }
207 
208 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
209 				  struct fl_flow_key *key,
210 				  struct fl_flow_key *mkey)
211 {
212 	u16 min_mask, max_mask, min_val, max_val;
213 
214 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
215 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
216 	min_val = ntohs(filter->key.tp_range.tp_min.dst);
217 	max_val = ntohs(filter->key.tp_range.tp_max.dst);
218 
219 	if (min_mask && max_mask) {
220 		if (ntohs(key->tp_range.tp.dst) < min_val ||
221 		    ntohs(key->tp_range.tp.dst) > max_val)
222 			return false;
223 
224 		/* skb does not have min and max values */
225 		mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
226 		mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
227 	}
228 	return true;
229 }
230 
231 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
232 				  struct fl_flow_key *key,
233 				  struct fl_flow_key *mkey)
234 {
235 	u16 min_mask, max_mask, min_val, max_val;
236 
237 	min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
238 	max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
239 	min_val = ntohs(filter->key.tp_range.tp_min.src);
240 	max_val = ntohs(filter->key.tp_range.tp_max.src);
241 
242 	if (min_mask && max_mask) {
243 		if (ntohs(key->tp_range.tp.src) < min_val ||
244 		    ntohs(key->tp_range.tp.src) > max_val)
245 			return false;
246 
247 		/* skb does not have min and max values */
248 		mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
249 		mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
250 	}
251 	return true;
252 }
253 
254 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
255 					 struct fl_flow_key *mkey)
256 {
257 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
258 				      mask->filter_ht_params);
259 }
260 
261 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
262 					     struct fl_flow_key *mkey,
263 					     struct fl_flow_key *key)
264 {
265 	struct cls_fl_filter *filter, *f;
266 
267 	list_for_each_entry_rcu(filter, &mask->filters, list) {
268 		if (!fl_range_port_dst_cmp(filter, key, mkey))
269 			continue;
270 
271 		if (!fl_range_port_src_cmp(filter, key, mkey))
272 			continue;
273 
274 		f = __fl_lookup(mask, mkey);
275 		if (f)
276 			return f;
277 	}
278 	return NULL;
279 }
280 
281 static noinline_for_stack
282 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
283 {
284 	struct fl_flow_key mkey;
285 
286 	fl_set_masked_key(&mkey, key, mask);
287 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
288 		return fl_lookup_range(mask, &mkey, key);
289 
290 	return __fl_lookup(mask, &mkey);
291 }
292 
293 static u16 fl_ct_info_to_flower_map[] = {
294 	[IP_CT_ESTABLISHED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
295 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
296 	[IP_CT_RELATED] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
297 					TCA_FLOWER_KEY_CT_FLAGS_RELATED,
298 	[IP_CT_ESTABLISHED_REPLY] =	TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
299 					TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
300 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
301 	[IP_CT_RELATED_REPLY] =		TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
302 					TCA_FLOWER_KEY_CT_FLAGS_RELATED |
303 					TCA_FLOWER_KEY_CT_FLAGS_REPLY,
304 	[IP_CT_NEW] =			TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
305 					TCA_FLOWER_KEY_CT_FLAGS_NEW,
306 };
307 
308 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
309 		       struct tcf_result *res)
310 {
311 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
312 	bool post_ct = qdisc_skb_cb(skb)->post_ct;
313 	struct fl_flow_key skb_key;
314 	struct fl_flow_mask *mask;
315 	struct cls_fl_filter *f;
316 
317 	list_for_each_entry_rcu(mask, &head->masks, list) {
318 		flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
319 		fl_clear_masked_range(&skb_key, mask);
320 
321 		skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
322 		/* skb_flow_dissect() does not set n_proto in case an unknown
323 		 * protocol, so do it rather here.
324 		 */
325 		skb_key.basic.n_proto = skb_protocol(skb, false);
326 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
327 		skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
328 				    fl_ct_info_to_flower_map,
329 				    ARRAY_SIZE(fl_ct_info_to_flower_map),
330 				    post_ct);
331 		skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
332 		skb_flow_dissect(skb, &mask->dissector, &skb_key,
333 				 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
334 
335 		f = fl_mask_lookup(mask, &skb_key);
336 		if (f && !tc_skip_sw(f->flags)) {
337 			*res = f->res;
338 			return tcf_exts_exec(skb, &f->exts, res);
339 		}
340 	}
341 	return -1;
342 }
343 
344 static int fl_init(struct tcf_proto *tp)
345 {
346 	struct cls_fl_head *head;
347 
348 	head = kzalloc(sizeof(*head), GFP_KERNEL);
349 	if (!head)
350 		return -ENOBUFS;
351 
352 	spin_lock_init(&head->masks_lock);
353 	INIT_LIST_HEAD_RCU(&head->masks);
354 	INIT_LIST_HEAD(&head->hw_filters);
355 	rcu_assign_pointer(tp->root, head);
356 	idr_init(&head->handle_idr);
357 
358 	return rhashtable_init(&head->ht, &mask_ht_params);
359 }
360 
361 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
362 {
363 	/* temporary masks don't have their filters list and ht initialized */
364 	if (mask_init_done) {
365 		WARN_ON(!list_empty(&mask->filters));
366 		rhashtable_destroy(&mask->ht);
367 	}
368 	kfree(mask);
369 }
370 
371 static void fl_mask_free_work(struct work_struct *work)
372 {
373 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
374 						 struct fl_flow_mask, rwork);
375 
376 	fl_mask_free(mask, true);
377 }
378 
379 static void fl_uninit_mask_free_work(struct work_struct *work)
380 {
381 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
382 						 struct fl_flow_mask, rwork);
383 
384 	fl_mask_free(mask, false);
385 }
386 
387 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
388 {
389 	if (!refcount_dec_and_test(&mask->refcnt))
390 		return false;
391 
392 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
393 
394 	spin_lock(&head->masks_lock);
395 	list_del_rcu(&mask->list);
396 	spin_unlock(&head->masks_lock);
397 
398 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
399 
400 	return true;
401 }
402 
403 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
404 {
405 	/* Flower classifier only changes root pointer during init and destroy.
406 	 * Users must obtain reference to tcf_proto instance before calling its
407 	 * API, so tp->root pointer is protected from concurrent call to
408 	 * fl_destroy() by reference counting.
409 	 */
410 	return rcu_dereference_raw(tp->root);
411 }
412 
413 static void __fl_destroy_filter(struct cls_fl_filter *f)
414 {
415 	tcf_exts_destroy(&f->exts);
416 	tcf_exts_put_net(&f->exts);
417 	kfree(f);
418 }
419 
420 static void fl_destroy_filter_work(struct work_struct *work)
421 {
422 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
423 					struct cls_fl_filter, rwork);
424 
425 	__fl_destroy_filter(f);
426 }
427 
428 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
429 				 bool rtnl_held, struct netlink_ext_ack *extack)
430 {
431 	struct tcf_block *block = tp->chain->block;
432 	struct flow_cls_offload cls_flower = {};
433 
434 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
435 	cls_flower.command = FLOW_CLS_DESTROY;
436 	cls_flower.cookie = (unsigned long) f;
437 
438 	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
439 			    &f->flags, &f->in_hw_count, rtnl_held);
440 
441 }
442 
443 static int fl_hw_replace_filter(struct tcf_proto *tp,
444 				struct cls_fl_filter *f, bool rtnl_held,
445 				struct netlink_ext_ack *extack)
446 {
447 	struct tcf_block *block = tp->chain->block;
448 	struct flow_cls_offload cls_flower = {};
449 	bool skip_sw = tc_skip_sw(f->flags);
450 	int err = 0;
451 
452 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
453 	if (!cls_flower.rule)
454 		return -ENOMEM;
455 
456 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
457 	cls_flower.command = FLOW_CLS_REPLACE;
458 	cls_flower.cookie = (unsigned long) f;
459 	cls_flower.rule->match.dissector = &f->mask->dissector;
460 	cls_flower.rule->match.mask = &f->mask->key;
461 	cls_flower.rule->match.key = &f->mkey;
462 	cls_flower.classid = f->res.classid;
463 
464 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
465 	if (err) {
466 		kfree(cls_flower.rule);
467 		if (skip_sw) {
468 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
469 			return err;
470 		}
471 		return 0;
472 	}
473 
474 	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
475 			      skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
476 	tc_cleanup_flow_action(&cls_flower.rule->action);
477 	kfree(cls_flower.rule);
478 
479 	if (err) {
480 		fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
481 		return err;
482 	}
483 
484 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
485 		return -EINVAL;
486 
487 	return 0;
488 }
489 
490 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
491 			       bool rtnl_held)
492 {
493 	struct tcf_block *block = tp->chain->block;
494 	struct flow_cls_offload cls_flower = {};
495 
496 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
497 	cls_flower.command = FLOW_CLS_STATS;
498 	cls_flower.cookie = (unsigned long) f;
499 	cls_flower.classid = f->res.classid;
500 
501 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
502 			 rtnl_held);
503 
504 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
505 			      cls_flower.stats.pkts,
506 			      cls_flower.stats.drops,
507 			      cls_flower.stats.lastused,
508 			      cls_flower.stats.used_hw_stats,
509 			      cls_flower.stats.used_hw_stats_valid);
510 }
511 
512 static void __fl_put(struct cls_fl_filter *f)
513 {
514 	if (!refcount_dec_and_test(&f->refcnt))
515 		return;
516 
517 	if (tcf_exts_get_net(&f->exts))
518 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
519 	else
520 		__fl_destroy_filter(f);
521 }
522 
523 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
524 {
525 	struct cls_fl_filter *f;
526 
527 	rcu_read_lock();
528 	f = idr_find(&head->handle_idr, handle);
529 	if (f && !refcount_inc_not_zero(&f->refcnt))
530 		f = NULL;
531 	rcu_read_unlock();
532 
533 	return f;
534 }
535 
536 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
537 		       bool *last, bool rtnl_held,
538 		       struct netlink_ext_ack *extack)
539 {
540 	struct cls_fl_head *head = fl_head_dereference(tp);
541 
542 	*last = false;
543 
544 	spin_lock(&tp->lock);
545 	if (f->deleted) {
546 		spin_unlock(&tp->lock);
547 		return -ENOENT;
548 	}
549 
550 	f->deleted = true;
551 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
552 			       f->mask->filter_ht_params);
553 	idr_remove(&head->handle_idr, f->handle);
554 	list_del_rcu(&f->list);
555 	spin_unlock(&tp->lock);
556 
557 	*last = fl_mask_put(head, f->mask);
558 	if (!tc_skip_hw(f->flags))
559 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
560 	tcf_unbind_filter(tp, &f->res);
561 	__fl_put(f);
562 
563 	return 0;
564 }
565 
566 static void fl_destroy_sleepable(struct work_struct *work)
567 {
568 	struct cls_fl_head *head = container_of(to_rcu_work(work),
569 						struct cls_fl_head,
570 						rwork);
571 
572 	rhashtable_destroy(&head->ht);
573 	kfree(head);
574 	module_put(THIS_MODULE);
575 }
576 
577 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
578 		       struct netlink_ext_ack *extack)
579 {
580 	struct cls_fl_head *head = fl_head_dereference(tp);
581 	struct fl_flow_mask *mask, *next_mask;
582 	struct cls_fl_filter *f, *next;
583 	bool last;
584 
585 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
586 		list_for_each_entry_safe(f, next, &mask->filters, list) {
587 			__fl_delete(tp, f, &last, rtnl_held, extack);
588 			if (last)
589 				break;
590 		}
591 	}
592 	idr_destroy(&head->handle_idr);
593 
594 	__module_get(THIS_MODULE);
595 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
596 }
597 
598 static void fl_put(struct tcf_proto *tp, void *arg)
599 {
600 	struct cls_fl_filter *f = arg;
601 
602 	__fl_put(f);
603 }
604 
605 static void *fl_get(struct tcf_proto *tp, u32 handle)
606 {
607 	struct cls_fl_head *head = fl_head_dereference(tp);
608 
609 	return __fl_get(head, handle);
610 }
611 
612 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
613 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
614 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
615 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
616 					    .len = IFNAMSIZ },
617 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
618 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
619 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
620 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
621 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
622 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
623 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
624 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
625 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
628 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
629 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
633 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
637 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
639 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
640 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
643 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
644 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
645 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
646 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
647 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
659 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
660 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
661 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
670 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
671 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
672 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
673 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
674 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
675 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
676 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
677 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
678 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
679 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
680 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
681 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
683 	[TCA_FLOWER_KEY_MPLS_OPTS]	= { .type = NLA_NESTED },
684 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
685 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
691 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
693 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
694 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
696 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
697 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
698 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
699 	[TCA_FLOWER_KEY_CT_STATE]	=
700 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
701 	[TCA_FLOWER_KEY_CT_STATE_MASK]	=
702 		NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
703 	[TCA_FLOWER_KEY_CT_ZONE]	= { .type = NLA_U16 },
704 	[TCA_FLOWER_KEY_CT_ZONE_MASK]	= { .type = NLA_U16 },
705 	[TCA_FLOWER_KEY_CT_MARK]	= { .type = NLA_U32 },
706 	[TCA_FLOWER_KEY_CT_MARK_MASK]	= { .type = NLA_U32 },
707 	[TCA_FLOWER_KEY_CT_LABELS]	= { .type = NLA_BINARY,
708 					    .len = 128 / BITS_PER_BYTE },
709 	[TCA_FLOWER_KEY_CT_LABELS_MASK]	= { .type = NLA_BINARY,
710 					    .len = 128 / BITS_PER_BYTE },
711 	[TCA_FLOWER_FLAGS]		= { .type = NLA_U32 },
712 	[TCA_FLOWER_KEY_HASH]		= { .type = NLA_U32 },
713 	[TCA_FLOWER_KEY_HASH_MASK]	= { .type = NLA_U32 },
714 
715 };
716 
717 static const struct nla_policy
718 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
719 	[TCA_FLOWER_KEY_ENC_OPTS_UNSPEC]        = {
720 		.strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
721 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
722 	[TCA_FLOWER_KEY_ENC_OPTS_VXLAN]         = { .type = NLA_NESTED },
723 	[TCA_FLOWER_KEY_ENC_OPTS_ERSPAN]        = { .type = NLA_NESTED },
724 };
725 
726 static const struct nla_policy
727 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
728 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
729 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
730 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
731 						       .len = 128 },
732 };
733 
734 static const struct nla_policy
735 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
736 	[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]         = { .type = NLA_U32 },
737 };
738 
739 static const struct nla_policy
740 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
741 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]        = { .type = NLA_U8 },
742 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]      = { .type = NLA_U32 },
743 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]        = { .type = NLA_U8 },
744 	[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]       = { .type = NLA_U8 },
745 };
746 
747 static const struct nla_policy
748 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
749 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]    = { .type = NLA_U8 },
750 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]      = { .type = NLA_U8 },
751 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]      = { .type = NLA_U8 },
752 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]       = { .type = NLA_U8 },
753 	[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]    = { .type = NLA_U32 },
754 };
755 
756 static void fl_set_key_val(struct nlattr **tb,
757 			   void *val, int val_type,
758 			   void *mask, int mask_type, int len)
759 {
760 	if (!tb[val_type])
761 		return;
762 	nla_memcpy(val, tb[val_type], len);
763 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
764 		memset(mask, 0xff, len);
765 	else
766 		nla_memcpy(mask, tb[mask_type], len);
767 }
768 
769 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
770 				 struct fl_flow_key *mask,
771 				 struct netlink_ext_ack *extack)
772 {
773 	fl_set_key_val(tb, &key->tp_range.tp_min.dst,
774 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
775 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
776 	fl_set_key_val(tb, &key->tp_range.tp_max.dst,
777 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
778 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
779 	fl_set_key_val(tb, &key->tp_range.tp_min.src,
780 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
781 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
782 	fl_set_key_val(tb, &key->tp_range.tp_max.src,
783 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
784 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
785 
786 	if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
787 	    ntohs(key->tp_range.tp_max.dst) <=
788 	    ntohs(key->tp_range.tp_min.dst)) {
789 		NL_SET_ERR_MSG_ATTR(extack,
790 				    tb[TCA_FLOWER_KEY_PORT_DST_MIN],
791 				    "Invalid destination port range (min must be strictly smaller than max)");
792 		return -EINVAL;
793 	}
794 	if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
795 	    ntohs(key->tp_range.tp_max.src) <=
796 	    ntohs(key->tp_range.tp_min.src)) {
797 		NL_SET_ERR_MSG_ATTR(extack,
798 				    tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
799 				    "Invalid source port range (min must be strictly smaller than max)");
800 		return -EINVAL;
801 	}
802 
803 	return 0;
804 }
805 
806 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
807 			       struct flow_dissector_key_mpls *key_val,
808 			       struct flow_dissector_key_mpls *key_mask,
809 			       struct netlink_ext_ack *extack)
810 {
811 	struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
812 	struct flow_dissector_mpls_lse *lse_mask;
813 	struct flow_dissector_mpls_lse *lse_val;
814 	u8 lse_index;
815 	u8 depth;
816 	int err;
817 
818 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
819 			       mpls_stack_entry_policy, extack);
820 	if (err < 0)
821 		return err;
822 
823 	if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
824 		NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
825 		return -EINVAL;
826 	}
827 
828 	depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
829 
830 	/* LSE depth starts at 1, for consistency with terminology used by
831 	 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
832 	 */
833 	if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
834 		NL_SET_ERR_MSG_ATTR(extack,
835 				    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
836 				    "Invalid MPLS depth");
837 		return -EINVAL;
838 	}
839 	lse_index = depth - 1;
840 
841 	dissector_set_mpls_lse(key_val, lse_index);
842 	dissector_set_mpls_lse(key_mask, lse_index);
843 
844 	lse_val = &key_val->ls[lse_index];
845 	lse_mask = &key_mask->ls[lse_index];
846 
847 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
848 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
849 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
850 	}
851 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
852 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
853 
854 		if (bos & ~MPLS_BOS_MASK) {
855 			NL_SET_ERR_MSG_ATTR(extack,
856 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
857 					    "Bottom Of Stack (BOS) must be 0 or 1");
858 			return -EINVAL;
859 		}
860 		lse_val->mpls_bos = bos;
861 		lse_mask->mpls_bos = MPLS_BOS_MASK;
862 	}
863 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
864 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
865 
866 		if (tc & ~MPLS_TC_MASK) {
867 			NL_SET_ERR_MSG_ATTR(extack,
868 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
869 					    "Traffic Class (TC) must be between 0 and 7");
870 			return -EINVAL;
871 		}
872 		lse_val->mpls_tc = tc;
873 		lse_mask->mpls_tc = MPLS_TC_MASK;
874 	}
875 	if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
876 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
877 
878 		if (label & ~MPLS_LABEL_MASK) {
879 			NL_SET_ERR_MSG_ATTR(extack,
880 					    tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
881 					    "Label must be between 0 and 1048575");
882 			return -EINVAL;
883 		}
884 		lse_val->mpls_label = label;
885 		lse_mask->mpls_label = MPLS_LABEL_MASK;
886 	}
887 
888 	return 0;
889 }
890 
891 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
892 				struct flow_dissector_key_mpls *key_val,
893 				struct flow_dissector_key_mpls *key_mask,
894 				struct netlink_ext_ack *extack)
895 {
896 	struct nlattr *nla_lse;
897 	int rem;
898 	int err;
899 
900 	if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
901 		NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
902 				    "NLA_F_NESTED is missing");
903 		return -EINVAL;
904 	}
905 
906 	nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
907 		if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
908 			NL_SET_ERR_MSG_ATTR(extack, nla_lse,
909 					    "Invalid MPLS option type");
910 			return -EINVAL;
911 		}
912 
913 		err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
914 		if (err < 0)
915 			return err;
916 	}
917 	if (rem) {
918 		NL_SET_ERR_MSG(extack,
919 			       "Bytes leftover after parsing MPLS options");
920 		return -EINVAL;
921 	}
922 
923 	return 0;
924 }
925 
926 static int fl_set_key_mpls(struct nlattr **tb,
927 			   struct flow_dissector_key_mpls *key_val,
928 			   struct flow_dissector_key_mpls *key_mask,
929 			   struct netlink_ext_ack *extack)
930 {
931 	struct flow_dissector_mpls_lse *lse_mask;
932 	struct flow_dissector_mpls_lse *lse_val;
933 
934 	if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
935 		if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
936 		    tb[TCA_FLOWER_KEY_MPLS_BOS] ||
937 		    tb[TCA_FLOWER_KEY_MPLS_TC] ||
938 		    tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
939 			NL_SET_ERR_MSG_ATTR(extack,
940 					    tb[TCA_FLOWER_KEY_MPLS_OPTS],
941 					    "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
942 			return -EBADMSG;
943 		}
944 
945 		return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
946 					    key_val, key_mask, extack);
947 	}
948 
949 	lse_val = &key_val->ls[0];
950 	lse_mask = &key_mask->ls[0];
951 
952 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
953 		lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
954 		lse_mask->mpls_ttl = MPLS_TTL_MASK;
955 		dissector_set_mpls_lse(key_val, 0);
956 		dissector_set_mpls_lse(key_mask, 0);
957 	}
958 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
959 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
960 
961 		if (bos & ~MPLS_BOS_MASK) {
962 			NL_SET_ERR_MSG_ATTR(extack,
963 					    tb[TCA_FLOWER_KEY_MPLS_BOS],
964 					    "Bottom Of Stack (BOS) must be 0 or 1");
965 			return -EINVAL;
966 		}
967 		lse_val->mpls_bos = bos;
968 		lse_mask->mpls_bos = MPLS_BOS_MASK;
969 		dissector_set_mpls_lse(key_val, 0);
970 		dissector_set_mpls_lse(key_mask, 0);
971 	}
972 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
973 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
974 
975 		if (tc & ~MPLS_TC_MASK) {
976 			NL_SET_ERR_MSG_ATTR(extack,
977 					    tb[TCA_FLOWER_KEY_MPLS_TC],
978 					    "Traffic Class (TC) must be between 0 and 7");
979 			return -EINVAL;
980 		}
981 		lse_val->mpls_tc = tc;
982 		lse_mask->mpls_tc = MPLS_TC_MASK;
983 		dissector_set_mpls_lse(key_val, 0);
984 		dissector_set_mpls_lse(key_mask, 0);
985 	}
986 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
987 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
988 
989 		if (label & ~MPLS_LABEL_MASK) {
990 			NL_SET_ERR_MSG_ATTR(extack,
991 					    tb[TCA_FLOWER_KEY_MPLS_LABEL],
992 					    "Label must be between 0 and 1048575");
993 			return -EINVAL;
994 		}
995 		lse_val->mpls_label = label;
996 		lse_mask->mpls_label = MPLS_LABEL_MASK;
997 		dissector_set_mpls_lse(key_val, 0);
998 		dissector_set_mpls_lse(key_mask, 0);
999 	}
1000 	return 0;
1001 }
1002 
1003 static void fl_set_key_vlan(struct nlattr **tb,
1004 			    __be16 ethertype,
1005 			    int vlan_id_key, int vlan_prio_key,
1006 			    struct flow_dissector_key_vlan *key_val,
1007 			    struct flow_dissector_key_vlan *key_mask)
1008 {
1009 #define VLAN_PRIORITY_MASK	0x7
1010 
1011 	if (tb[vlan_id_key]) {
1012 		key_val->vlan_id =
1013 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1014 		key_mask->vlan_id = VLAN_VID_MASK;
1015 	}
1016 	if (tb[vlan_prio_key]) {
1017 		key_val->vlan_priority =
1018 			nla_get_u8(tb[vlan_prio_key]) &
1019 			VLAN_PRIORITY_MASK;
1020 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1021 	}
1022 	key_val->vlan_tpid = ethertype;
1023 	key_mask->vlan_tpid = cpu_to_be16(~0);
1024 }
1025 
1026 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1027 			    u32 *dissector_key, u32 *dissector_mask,
1028 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1029 {
1030 	if (flower_mask & flower_flag_bit) {
1031 		*dissector_mask |= dissector_flag_bit;
1032 		if (flower_key & flower_flag_bit)
1033 			*dissector_key |= dissector_flag_bit;
1034 	}
1035 }
1036 
1037 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1038 			    u32 *flags_mask, struct netlink_ext_ack *extack)
1039 {
1040 	u32 key, mask;
1041 
1042 	/* mask is mandatory for flags */
1043 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1044 		NL_SET_ERR_MSG(extack, "Missing flags mask");
1045 		return -EINVAL;
1046 	}
1047 
1048 	key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1049 	mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1050 
1051 	*flags_key  = 0;
1052 	*flags_mask = 0;
1053 
1054 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1055 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1056 	fl_set_key_flag(key, mask, flags_key, flags_mask,
1057 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1058 			FLOW_DIS_FIRST_FRAG);
1059 
1060 	return 0;
1061 }
1062 
1063 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1064 			  struct flow_dissector_key_ip *key,
1065 			  struct flow_dissector_key_ip *mask)
1066 {
1067 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1068 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1069 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1070 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1071 
1072 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1073 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1074 }
1075 
1076 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1077 			     int depth, int option_len,
1078 			     struct netlink_ext_ack *extack)
1079 {
1080 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1081 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
1082 	struct geneve_opt *opt;
1083 	int err, data_len = 0;
1084 
1085 	if (option_len > sizeof(struct geneve_opt))
1086 		data_len = option_len - sizeof(struct geneve_opt);
1087 
1088 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1089 	memset(opt, 0xff, option_len);
1090 	opt->length = data_len / 4;
1091 	opt->r1 = 0;
1092 	opt->r2 = 0;
1093 	opt->r3 = 0;
1094 
1095 	/* If no mask has been prodived we assume an exact match. */
1096 	if (!depth)
1097 		return sizeof(struct geneve_opt) + data_len;
1098 
1099 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1100 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1101 		return -EINVAL;
1102 	}
1103 
1104 	err = nla_parse_nested_deprecated(tb,
1105 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1106 					  nla, geneve_opt_policy, extack);
1107 	if (err < 0)
1108 		return err;
1109 
1110 	/* We are not allowed to omit any of CLASS, TYPE or DATA
1111 	 * fields from the key.
1112 	 */
1113 	if (!option_len &&
1114 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1115 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1116 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1117 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1118 		return -EINVAL;
1119 	}
1120 
1121 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
1122 	 * for the mask.
1123 	 */
1124 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1125 		int new_len = key->enc_opts.len;
1126 
1127 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1128 		data_len = nla_len(data);
1129 		if (data_len < 4) {
1130 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1131 			return -ERANGE;
1132 		}
1133 		if (data_len % 4) {
1134 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1135 			return -ERANGE;
1136 		}
1137 
1138 		new_len += sizeof(struct geneve_opt) + data_len;
1139 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1140 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1141 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1142 			return -ERANGE;
1143 		}
1144 		opt->length = data_len / 4;
1145 		memcpy(opt->opt_data, nla_data(data), data_len);
1146 	}
1147 
1148 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1149 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1150 		opt->opt_class = nla_get_be16(class);
1151 	}
1152 
1153 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1154 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1155 		opt->type = nla_get_u8(type);
1156 	}
1157 
1158 	return sizeof(struct geneve_opt) + data_len;
1159 }
1160 
1161 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1162 			    int depth, int option_len,
1163 			    struct netlink_ext_ack *extack)
1164 {
1165 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1166 	struct vxlan_metadata *md;
1167 	int err;
1168 
1169 	md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1170 	memset(md, 0xff, sizeof(*md));
1171 
1172 	if (!depth)
1173 		return sizeof(*md);
1174 
1175 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1176 		NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1177 		return -EINVAL;
1178 	}
1179 
1180 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1181 			       vxlan_opt_policy, extack);
1182 	if (err < 0)
1183 		return err;
1184 
1185 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1186 		NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1187 		return -EINVAL;
1188 	}
1189 
1190 	if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1191 		md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1192 		md->gbp &= VXLAN_GBP_MASK;
1193 	}
1194 
1195 	return sizeof(*md);
1196 }
1197 
1198 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1199 			     int depth, int option_len,
1200 			     struct netlink_ext_ack *extack)
1201 {
1202 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1203 	struct erspan_metadata *md;
1204 	int err;
1205 
1206 	md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1207 	memset(md, 0xff, sizeof(*md));
1208 	md->version = 1;
1209 
1210 	if (!depth)
1211 		return sizeof(*md);
1212 
1213 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1214 		NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1215 		return -EINVAL;
1216 	}
1217 
1218 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1219 			       erspan_opt_policy, extack);
1220 	if (err < 0)
1221 		return err;
1222 
1223 	if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1224 		NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1225 		return -EINVAL;
1226 	}
1227 
1228 	if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1229 		md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1230 
1231 	if (md->version == 1) {
1232 		if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1233 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1234 			return -EINVAL;
1235 		}
1236 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1237 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1238 			memset(&md->u, 0x00, sizeof(md->u));
1239 			md->u.index = nla_get_be32(nla);
1240 		}
1241 	} else if (md->version == 2) {
1242 		if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1243 				    !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1244 			NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1245 			return -EINVAL;
1246 		}
1247 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1248 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1249 			md->u.md2.dir = nla_get_u8(nla);
1250 		}
1251 		if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1252 			nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1253 			set_hwid(&md->u.md2, nla_get_u8(nla));
1254 		}
1255 	} else {
1256 		NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1257 		return -EINVAL;
1258 	}
1259 
1260 	return sizeof(*md);
1261 }
1262 
1263 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1264 			  struct fl_flow_key *mask,
1265 			  struct netlink_ext_ack *extack)
1266 {
1267 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1268 	int err, option_len, key_depth, msk_depth = 0;
1269 
1270 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1271 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1272 					     enc_opts_policy, extack);
1273 	if (err)
1274 		return err;
1275 
1276 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1277 
1278 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1279 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1280 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
1281 						     enc_opts_policy, extack);
1282 		if (err)
1283 			return err;
1284 
1285 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1286 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1287 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1288 			NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1289 			return -EINVAL;
1290 		}
1291 	}
1292 
1293 	nla_for_each_attr(nla_opt_key, nla_enc_key,
1294 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1295 		switch (nla_type(nla_opt_key)) {
1296 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1297 			if (key->enc_opts.dst_opt_type &&
1298 			    key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1299 				NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1300 				return -EINVAL;
1301 			}
1302 			option_len = 0;
1303 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1304 			option_len = fl_set_geneve_opt(nla_opt_key, key,
1305 						       key_depth, option_len,
1306 						       extack);
1307 			if (option_len < 0)
1308 				return option_len;
1309 
1310 			key->enc_opts.len += option_len;
1311 			/* At the same time we need to parse through the mask
1312 			 * in order to verify exact and mask attribute lengths.
1313 			 */
1314 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1315 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1316 						       msk_depth, option_len,
1317 						       extack);
1318 			if (option_len < 0)
1319 				return option_len;
1320 
1321 			mask->enc_opts.len += option_len;
1322 			if (key->enc_opts.len != mask->enc_opts.len) {
1323 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1324 				return -EINVAL;
1325 			}
1326 			break;
1327 		case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1328 			if (key->enc_opts.dst_opt_type) {
1329 				NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1330 				return -EINVAL;
1331 			}
1332 			option_len = 0;
1333 			key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1334 			option_len = fl_set_vxlan_opt(nla_opt_key, key,
1335 						      key_depth, option_len,
1336 						      extack);
1337 			if (option_len < 0)
1338 				return option_len;
1339 
1340 			key->enc_opts.len += option_len;
1341 			/* At the same time we need to parse through the mask
1342 			 * in order to verify exact and mask attribute lengths.
1343 			 */
1344 			mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1345 			option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1346 						      msk_depth, option_len,
1347 						      extack);
1348 			if (option_len < 0)
1349 				return option_len;
1350 
1351 			mask->enc_opts.len += option_len;
1352 			if (key->enc_opts.len != mask->enc_opts.len) {
1353 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1354 				return -EINVAL;
1355 			}
1356 			break;
1357 		case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1358 			if (key->enc_opts.dst_opt_type) {
1359 				NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1360 				return -EINVAL;
1361 			}
1362 			option_len = 0;
1363 			key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1364 			option_len = fl_set_erspan_opt(nla_opt_key, key,
1365 						       key_depth, option_len,
1366 						       extack);
1367 			if (option_len < 0)
1368 				return option_len;
1369 
1370 			key->enc_opts.len += option_len;
1371 			/* At the same time we need to parse through the mask
1372 			 * in order to verify exact and mask attribute lengths.
1373 			 */
1374 			mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1375 			option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1376 						       msk_depth, option_len,
1377 						       extack);
1378 			if (option_len < 0)
1379 				return option_len;
1380 
1381 			mask->enc_opts.len += option_len;
1382 			if (key->enc_opts.len != mask->enc_opts.len) {
1383 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1384 				return -EINVAL;
1385 			}
1386 			break;
1387 		default:
1388 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1389 			return -EINVAL;
1390 		}
1391 
1392 		if (!msk_depth)
1393 			continue;
1394 
1395 		if (!nla_ok(nla_opt_msk, msk_depth)) {
1396 			NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1397 			return -EINVAL;
1398 		}
1399 		nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1400 	}
1401 
1402 	return 0;
1403 }
1404 
1405 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1406 				struct netlink_ext_ack *extack)
1407 {
1408 	if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1409 		NL_SET_ERR_MSG_ATTR(extack, tb,
1410 				    "no trk, so no other flag can be set");
1411 		return -EINVAL;
1412 	}
1413 
1414 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1415 	    state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1416 		NL_SET_ERR_MSG_ATTR(extack, tb,
1417 				    "new and est are mutually exclusive");
1418 		return -EINVAL;
1419 	}
1420 
1421 	if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1422 	    state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1423 		      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1424 		NL_SET_ERR_MSG_ATTR(extack, tb,
1425 				    "when inv is set, only trk may be set");
1426 		return -EINVAL;
1427 	}
1428 
1429 	if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1430 	    state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1431 		NL_SET_ERR_MSG_ATTR(extack, tb,
1432 				    "new and rpl are mutually exclusive");
1433 		return -EINVAL;
1434 	}
1435 
1436 	return 0;
1437 }
1438 
1439 static int fl_set_key_ct(struct nlattr **tb,
1440 			 struct flow_dissector_key_ct *key,
1441 			 struct flow_dissector_key_ct *mask,
1442 			 struct netlink_ext_ack *extack)
1443 {
1444 	if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1445 		int err;
1446 
1447 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1448 			NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1449 			return -EOPNOTSUPP;
1450 		}
1451 		fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1452 			       &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1453 			       sizeof(key->ct_state));
1454 
1455 		err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1456 					   tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1457 					   extack);
1458 		if (err)
1459 			return err;
1460 
1461 	}
1462 	if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1463 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1464 			NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1465 			return -EOPNOTSUPP;
1466 		}
1467 		fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1468 			       &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1469 			       sizeof(key->ct_zone));
1470 	}
1471 	if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1472 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1473 			NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1474 			return -EOPNOTSUPP;
1475 		}
1476 		fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1477 			       &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1478 			       sizeof(key->ct_mark));
1479 	}
1480 	if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1481 		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1482 			NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1483 			return -EOPNOTSUPP;
1484 		}
1485 		fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1486 			       mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1487 			       sizeof(key->ct_labels));
1488 	}
1489 
1490 	return 0;
1491 }
1492 
1493 static int fl_set_key(struct net *net, struct nlattr **tb,
1494 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1495 		      struct netlink_ext_ack *extack)
1496 {
1497 	__be16 ethertype;
1498 	int ret = 0;
1499 
1500 	if (tb[TCA_FLOWER_INDEV]) {
1501 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1502 		if (err < 0)
1503 			return err;
1504 		key->meta.ingress_ifindex = err;
1505 		mask->meta.ingress_ifindex = 0xffffffff;
1506 	}
1507 
1508 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1509 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1510 		       sizeof(key->eth.dst));
1511 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1512 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1513 		       sizeof(key->eth.src));
1514 
1515 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1516 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1517 
1518 		if (eth_type_vlan(ethertype)) {
1519 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1520 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1521 					&mask->vlan);
1522 
1523 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1524 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1525 				if (eth_type_vlan(ethertype)) {
1526 					fl_set_key_vlan(tb, ethertype,
1527 							TCA_FLOWER_KEY_CVLAN_ID,
1528 							TCA_FLOWER_KEY_CVLAN_PRIO,
1529 							&key->cvlan, &mask->cvlan);
1530 					fl_set_key_val(tb, &key->basic.n_proto,
1531 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1532 						       &mask->basic.n_proto,
1533 						       TCA_FLOWER_UNSPEC,
1534 						       sizeof(key->basic.n_proto));
1535 				} else {
1536 					key->basic.n_proto = ethertype;
1537 					mask->basic.n_proto = cpu_to_be16(~0);
1538 				}
1539 			}
1540 		} else {
1541 			key->basic.n_proto = ethertype;
1542 			mask->basic.n_proto = cpu_to_be16(~0);
1543 		}
1544 	}
1545 
1546 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1547 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1548 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1549 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1550 			       sizeof(key->basic.ip_proto));
1551 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1552 	}
1553 
1554 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1555 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1556 		mask->control.addr_type = ~0;
1557 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1558 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1559 			       sizeof(key->ipv4.src));
1560 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1561 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1562 			       sizeof(key->ipv4.dst));
1563 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1564 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1565 		mask->control.addr_type = ~0;
1566 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1567 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1568 			       sizeof(key->ipv6.src));
1569 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1570 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1571 			       sizeof(key->ipv6.dst));
1572 	}
1573 
1574 	if (key->basic.ip_proto == IPPROTO_TCP) {
1575 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1576 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1577 			       sizeof(key->tp.src));
1578 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1579 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1580 			       sizeof(key->tp.dst));
1581 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1582 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1583 			       sizeof(key->tcp.flags));
1584 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1585 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1586 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1587 			       sizeof(key->tp.src));
1588 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1589 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1590 			       sizeof(key->tp.dst));
1591 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1592 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1593 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1594 			       sizeof(key->tp.src));
1595 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1596 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1597 			       sizeof(key->tp.dst));
1598 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1599 		   key->basic.ip_proto == IPPROTO_ICMP) {
1600 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1601 			       &mask->icmp.type,
1602 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1603 			       sizeof(key->icmp.type));
1604 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1605 			       &mask->icmp.code,
1606 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1607 			       sizeof(key->icmp.code));
1608 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1609 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1610 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1611 			       &mask->icmp.type,
1612 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1613 			       sizeof(key->icmp.type));
1614 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1615 			       &mask->icmp.code,
1616 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1617 			       sizeof(key->icmp.code));
1618 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1619 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1620 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1621 		if (ret)
1622 			return ret;
1623 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1624 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1625 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1626 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1627 			       sizeof(key->arp.sip));
1628 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1629 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1630 			       sizeof(key->arp.tip));
1631 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1632 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1633 			       sizeof(key->arp.op));
1634 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1635 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1636 			       sizeof(key->arp.sha));
1637 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1638 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1639 			       sizeof(key->arp.tha));
1640 	}
1641 
1642 	if (key->basic.ip_proto == IPPROTO_TCP ||
1643 	    key->basic.ip_proto == IPPROTO_UDP ||
1644 	    key->basic.ip_proto == IPPROTO_SCTP) {
1645 		ret = fl_set_key_port_range(tb, key, mask, extack);
1646 		if (ret)
1647 			return ret;
1648 	}
1649 
1650 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1651 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1652 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1653 		mask->enc_control.addr_type = ~0;
1654 		fl_set_key_val(tb, &key->enc_ipv4.src,
1655 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1656 			       &mask->enc_ipv4.src,
1657 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1658 			       sizeof(key->enc_ipv4.src));
1659 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1660 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1661 			       &mask->enc_ipv4.dst,
1662 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1663 			       sizeof(key->enc_ipv4.dst));
1664 	}
1665 
1666 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1667 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1668 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1669 		mask->enc_control.addr_type = ~0;
1670 		fl_set_key_val(tb, &key->enc_ipv6.src,
1671 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1672 			       &mask->enc_ipv6.src,
1673 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1674 			       sizeof(key->enc_ipv6.src));
1675 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1676 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1677 			       &mask->enc_ipv6.dst,
1678 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1679 			       sizeof(key->enc_ipv6.dst));
1680 	}
1681 
1682 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1683 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1684 		       sizeof(key->enc_key_id.keyid));
1685 
1686 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1687 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1688 		       sizeof(key->enc_tp.src));
1689 
1690 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1691 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1692 		       sizeof(key->enc_tp.dst));
1693 
1694 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1695 
1696 	fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1697 		       &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1698 		       sizeof(key->hash.hash));
1699 
1700 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1701 		ret = fl_set_enc_opt(tb, key, mask, extack);
1702 		if (ret)
1703 			return ret;
1704 	}
1705 
1706 	ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1707 	if (ret)
1708 		return ret;
1709 
1710 	if (tb[TCA_FLOWER_KEY_FLAGS])
1711 		ret = fl_set_key_flags(tb, &key->control.flags,
1712 				       &mask->control.flags, extack);
1713 
1714 	return ret;
1715 }
1716 
1717 static void fl_mask_copy(struct fl_flow_mask *dst,
1718 			 struct fl_flow_mask *src)
1719 {
1720 	const void *psrc = fl_key_get_start(&src->key, src);
1721 	void *pdst = fl_key_get_start(&dst->key, src);
1722 
1723 	memcpy(pdst, psrc, fl_mask_range(src));
1724 	dst->range = src->range;
1725 }
1726 
1727 static const struct rhashtable_params fl_ht_params = {
1728 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1729 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1730 	.automatic_shrinking = true,
1731 };
1732 
1733 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1734 {
1735 	mask->filter_ht_params = fl_ht_params;
1736 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1737 	mask->filter_ht_params.key_offset += mask->range.start;
1738 
1739 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1740 }
1741 
1742 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1743 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
1744 
1745 #define FL_KEY_IS_MASKED(mask, member)						\
1746 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1747 		   0, FL_KEY_MEMBER_SIZE(member))				\
1748 
1749 #define FL_KEY_SET(keys, cnt, id, member)					\
1750 	do {									\
1751 		keys[cnt].key_id = id;						\
1752 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1753 		cnt++;								\
1754 	} while(0);
1755 
1756 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1757 	do {									\
1758 		if (FL_KEY_IS_MASKED(mask, member))				\
1759 			FL_KEY_SET(keys, cnt, id, member);			\
1760 	} while(0);
1761 
1762 static void fl_init_dissector(struct flow_dissector *dissector,
1763 			      struct fl_flow_key *mask)
1764 {
1765 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1766 	size_t cnt = 0;
1767 
1768 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1769 			     FLOW_DISSECTOR_KEY_META, meta);
1770 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1771 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1772 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1773 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1774 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1775 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1776 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1777 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1778 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1779 			     FLOW_DISSECTOR_KEY_PORTS, tp);
1780 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1781 			     FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
1782 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1783 			     FLOW_DISSECTOR_KEY_IP, ip);
1784 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1785 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1786 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1787 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1788 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1789 			     FLOW_DISSECTOR_KEY_ARP, arp);
1790 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1791 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1792 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1793 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1794 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1795 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1796 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1797 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1798 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1799 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1800 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1801 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1802 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1803 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1804 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1805 			   enc_control);
1806 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1807 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1808 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1809 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1810 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1811 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1812 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1813 			     FLOW_DISSECTOR_KEY_CT, ct);
1814 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1815 			     FLOW_DISSECTOR_KEY_HASH, hash);
1816 
1817 	skb_flow_dissector_init(dissector, keys, cnt);
1818 }
1819 
1820 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1821 					       struct fl_flow_mask *mask)
1822 {
1823 	struct fl_flow_mask *newmask;
1824 	int err;
1825 
1826 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1827 	if (!newmask)
1828 		return ERR_PTR(-ENOMEM);
1829 
1830 	fl_mask_copy(newmask, mask);
1831 
1832 	if ((newmask->key.tp_range.tp_min.dst &&
1833 	     newmask->key.tp_range.tp_max.dst) ||
1834 	    (newmask->key.tp_range.tp_min.src &&
1835 	     newmask->key.tp_range.tp_max.src))
1836 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1837 
1838 	err = fl_init_mask_hashtable(newmask);
1839 	if (err)
1840 		goto errout_free;
1841 
1842 	fl_init_dissector(&newmask->dissector, &newmask->key);
1843 
1844 	INIT_LIST_HEAD_RCU(&newmask->filters);
1845 
1846 	refcount_set(&newmask->refcnt, 1);
1847 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1848 				      &newmask->ht_node, mask_ht_params);
1849 	if (err)
1850 		goto errout_destroy;
1851 
1852 	spin_lock(&head->masks_lock);
1853 	list_add_tail_rcu(&newmask->list, &head->masks);
1854 	spin_unlock(&head->masks_lock);
1855 
1856 	return newmask;
1857 
1858 errout_destroy:
1859 	rhashtable_destroy(&newmask->ht);
1860 errout_free:
1861 	kfree(newmask);
1862 
1863 	return ERR_PTR(err);
1864 }
1865 
1866 static int fl_check_assign_mask(struct cls_fl_head *head,
1867 				struct cls_fl_filter *fnew,
1868 				struct cls_fl_filter *fold,
1869 				struct fl_flow_mask *mask)
1870 {
1871 	struct fl_flow_mask *newmask;
1872 	int ret = 0;
1873 
1874 	rcu_read_lock();
1875 
1876 	/* Insert mask as temporary node to prevent concurrent creation of mask
1877 	 * with same key. Any concurrent lookups with same key will return
1878 	 * -EAGAIN because mask's refcnt is zero.
1879 	 */
1880 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1881 						       &mask->ht_node,
1882 						       mask_ht_params);
1883 	if (!fnew->mask) {
1884 		rcu_read_unlock();
1885 
1886 		if (fold) {
1887 			ret = -EINVAL;
1888 			goto errout_cleanup;
1889 		}
1890 
1891 		newmask = fl_create_new_mask(head, mask);
1892 		if (IS_ERR(newmask)) {
1893 			ret = PTR_ERR(newmask);
1894 			goto errout_cleanup;
1895 		}
1896 
1897 		fnew->mask = newmask;
1898 		return 0;
1899 	} else if (IS_ERR(fnew->mask)) {
1900 		ret = PTR_ERR(fnew->mask);
1901 	} else if (fold && fold->mask != fnew->mask) {
1902 		ret = -EINVAL;
1903 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1904 		/* Mask was deleted concurrently, try again */
1905 		ret = -EAGAIN;
1906 	}
1907 	rcu_read_unlock();
1908 	return ret;
1909 
1910 errout_cleanup:
1911 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1912 			       mask_ht_params);
1913 	return ret;
1914 }
1915 
1916 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1917 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1918 			unsigned long base, struct nlattr **tb,
1919 			struct nlattr *est,
1920 			struct fl_flow_tmplt *tmplt, u32 flags,
1921 			struct netlink_ext_ack *extack)
1922 {
1923 	int err;
1924 
1925 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
1926 	if (err < 0)
1927 		return err;
1928 
1929 	if (tb[TCA_FLOWER_CLASSID]) {
1930 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1931 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
1932 			rtnl_lock();
1933 		tcf_bind_filter(tp, &f->res, base);
1934 		if (flags & TCA_ACT_FLAGS_NO_RTNL)
1935 			rtnl_unlock();
1936 	}
1937 
1938 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1939 	if (err)
1940 		return err;
1941 
1942 	fl_mask_update_range(mask);
1943 	fl_set_masked_key(&f->mkey, &f->key, mask);
1944 
1945 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1946 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1947 		return -EINVAL;
1948 	}
1949 
1950 	return 0;
1951 }
1952 
1953 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1954 			       struct cls_fl_filter *fold,
1955 			       bool *in_ht)
1956 {
1957 	struct fl_flow_mask *mask = fnew->mask;
1958 	int err;
1959 
1960 	err = rhashtable_lookup_insert_fast(&mask->ht,
1961 					    &fnew->ht_node,
1962 					    mask->filter_ht_params);
1963 	if (err) {
1964 		*in_ht = false;
1965 		/* It is okay if filter with same key exists when
1966 		 * overwriting.
1967 		 */
1968 		return fold && err == -EEXIST ? 0 : err;
1969 	}
1970 
1971 	*in_ht = true;
1972 	return 0;
1973 }
1974 
1975 static int fl_change(struct net *net, struct sk_buff *in_skb,
1976 		     struct tcf_proto *tp, unsigned long base,
1977 		     u32 handle, struct nlattr **tca,
1978 		     void **arg, u32 flags,
1979 		     struct netlink_ext_ack *extack)
1980 {
1981 	struct cls_fl_head *head = fl_head_dereference(tp);
1982 	bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
1983 	struct cls_fl_filter *fold = *arg;
1984 	struct cls_fl_filter *fnew;
1985 	struct fl_flow_mask *mask;
1986 	struct nlattr **tb;
1987 	bool in_ht;
1988 	int err;
1989 
1990 	if (!tca[TCA_OPTIONS]) {
1991 		err = -EINVAL;
1992 		goto errout_fold;
1993 	}
1994 
1995 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1996 	if (!mask) {
1997 		err = -ENOBUFS;
1998 		goto errout_fold;
1999 	}
2000 
2001 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2002 	if (!tb) {
2003 		err = -ENOBUFS;
2004 		goto errout_mask_alloc;
2005 	}
2006 
2007 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2008 					  tca[TCA_OPTIONS], fl_policy, NULL);
2009 	if (err < 0)
2010 		goto errout_tb;
2011 
2012 	if (fold && handle && fold->handle != handle) {
2013 		err = -EINVAL;
2014 		goto errout_tb;
2015 	}
2016 
2017 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2018 	if (!fnew) {
2019 		err = -ENOBUFS;
2020 		goto errout_tb;
2021 	}
2022 	INIT_LIST_HEAD(&fnew->hw_list);
2023 	refcount_set(&fnew->refcnt, 1);
2024 
2025 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
2026 	if (err < 0)
2027 		goto errout;
2028 
2029 	if (tb[TCA_FLOWER_FLAGS]) {
2030 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2031 
2032 		if (!tc_flags_valid(fnew->flags)) {
2033 			err = -EINVAL;
2034 			goto errout;
2035 		}
2036 	}
2037 
2038 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
2039 			   tp->chain->tmplt_priv, flags, extack);
2040 	if (err)
2041 		goto errout;
2042 
2043 	err = fl_check_assign_mask(head, fnew, fold, mask);
2044 	if (err)
2045 		goto errout;
2046 
2047 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
2048 	if (err)
2049 		goto errout_mask;
2050 
2051 	if (!tc_skip_hw(fnew->flags)) {
2052 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2053 		if (err)
2054 			goto errout_ht;
2055 	}
2056 
2057 	if (!tc_in_hw(fnew->flags))
2058 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2059 
2060 	spin_lock(&tp->lock);
2061 
2062 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2063 	 * proto again or create new one, if necessary.
2064 	 */
2065 	if (tp->deleting) {
2066 		err = -EAGAIN;
2067 		goto errout_hw;
2068 	}
2069 
2070 	if (fold) {
2071 		/* Fold filter was deleted concurrently. Retry lookup. */
2072 		if (fold->deleted) {
2073 			err = -EAGAIN;
2074 			goto errout_hw;
2075 		}
2076 
2077 		fnew->handle = handle;
2078 
2079 		if (!in_ht) {
2080 			struct rhashtable_params params =
2081 				fnew->mask->filter_ht_params;
2082 
2083 			err = rhashtable_insert_fast(&fnew->mask->ht,
2084 						     &fnew->ht_node,
2085 						     params);
2086 			if (err)
2087 				goto errout_hw;
2088 			in_ht = true;
2089 		}
2090 
2091 		refcount_inc(&fnew->refcnt);
2092 		rhashtable_remove_fast(&fold->mask->ht,
2093 				       &fold->ht_node,
2094 				       fold->mask->filter_ht_params);
2095 		idr_replace(&head->handle_idr, fnew, fnew->handle);
2096 		list_replace_rcu(&fold->list, &fnew->list);
2097 		fold->deleted = true;
2098 
2099 		spin_unlock(&tp->lock);
2100 
2101 		fl_mask_put(head, fold->mask);
2102 		if (!tc_skip_hw(fold->flags))
2103 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2104 		tcf_unbind_filter(tp, &fold->res);
2105 		/* Caller holds reference to fold, so refcnt is always > 0
2106 		 * after this.
2107 		 */
2108 		refcount_dec(&fold->refcnt);
2109 		__fl_put(fold);
2110 	} else {
2111 		if (handle) {
2112 			/* user specifies a handle and it doesn't exist */
2113 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2114 					    handle, GFP_ATOMIC);
2115 
2116 			/* Filter with specified handle was concurrently
2117 			 * inserted after initial check in cls_api. This is not
2118 			 * necessarily an error if NLM_F_EXCL is not set in
2119 			 * message flags. Returning EAGAIN will cause cls_api to
2120 			 * try to update concurrently inserted rule.
2121 			 */
2122 			if (err == -ENOSPC)
2123 				err = -EAGAIN;
2124 		} else {
2125 			handle = 1;
2126 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
2127 					    INT_MAX, GFP_ATOMIC);
2128 		}
2129 		if (err)
2130 			goto errout_hw;
2131 
2132 		refcount_inc(&fnew->refcnt);
2133 		fnew->handle = handle;
2134 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2135 		spin_unlock(&tp->lock);
2136 	}
2137 
2138 	*arg = fnew;
2139 
2140 	kfree(tb);
2141 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2142 	return 0;
2143 
2144 errout_ht:
2145 	spin_lock(&tp->lock);
2146 errout_hw:
2147 	fnew->deleted = true;
2148 	spin_unlock(&tp->lock);
2149 	if (!tc_skip_hw(fnew->flags))
2150 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2151 	if (in_ht)
2152 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2153 				       fnew->mask->filter_ht_params);
2154 errout_mask:
2155 	fl_mask_put(head, fnew->mask);
2156 errout:
2157 	__fl_put(fnew);
2158 errout_tb:
2159 	kfree(tb);
2160 errout_mask_alloc:
2161 	tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2162 errout_fold:
2163 	if (fold)
2164 		__fl_put(fold);
2165 	return err;
2166 }
2167 
2168 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2169 		     bool rtnl_held, struct netlink_ext_ack *extack)
2170 {
2171 	struct cls_fl_head *head = fl_head_dereference(tp);
2172 	struct cls_fl_filter *f = arg;
2173 	bool last_on_mask;
2174 	int err = 0;
2175 
2176 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2177 	*last = list_empty(&head->masks);
2178 	__fl_put(f);
2179 
2180 	return err;
2181 }
2182 
2183 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2184 		    bool rtnl_held)
2185 {
2186 	struct cls_fl_head *head = fl_head_dereference(tp);
2187 	unsigned long id = arg->cookie, tmp;
2188 	struct cls_fl_filter *f;
2189 
2190 	arg->count = arg->skip;
2191 
2192 	rcu_read_lock();
2193 	idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2194 		/* don't return filters that are being deleted */
2195 		if (!refcount_inc_not_zero(&f->refcnt))
2196 			continue;
2197 		rcu_read_unlock();
2198 
2199 		if (arg->fn(tp, f, arg) < 0) {
2200 			__fl_put(f);
2201 			arg->stop = 1;
2202 			rcu_read_lock();
2203 			break;
2204 		}
2205 		__fl_put(f);
2206 		arg->count++;
2207 		rcu_read_lock();
2208 	}
2209 	rcu_read_unlock();
2210 	arg->cookie = id;
2211 }
2212 
2213 static struct cls_fl_filter *
2214 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2215 {
2216 	struct cls_fl_head *head = fl_head_dereference(tp);
2217 
2218 	spin_lock(&tp->lock);
2219 	if (list_empty(&head->hw_filters)) {
2220 		spin_unlock(&tp->lock);
2221 		return NULL;
2222 	}
2223 
2224 	if (!f)
2225 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
2226 			       hw_list);
2227 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2228 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2229 			spin_unlock(&tp->lock);
2230 			return f;
2231 		}
2232 	}
2233 
2234 	spin_unlock(&tp->lock);
2235 	return NULL;
2236 }
2237 
2238 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2239 			void *cb_priv, struct netlink_ext_ack *extack)
2240 {
2241 	struct tcf_block *block = tp->chain->block;
2242 	struct flow_cls_offload cls_flower = {};
2243 	struct cls_fl_filter *f = NULL;
2244 	int err;
2245 
2246 	/* hw_filters list can only be changed by hw offload functions after
2247 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2248 	 * iterating it.
2249 	 */
2250 	ASSERT_RTNL();
2251 
2252 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
2253 		cls_flower.rule =
2254 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2255 		if (!cls_flower.rule) {
2256 			__fl_put(f);
2257 			return -ENOMEM;
2258 		}
2259 
2260 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2261 					   extack);
2262 		cls_flower.command = add ?
2263 			FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2264 		cls_flower.cookie = (unsigned long)f;
2265 		cls_flower.rule->match.dissector = &f->mask->dissector;
2266 		cls_flower.rule->match.mask = &f->mask->key;
2267 		cls_flower.rule->match.key = &f->mkey;
2268 
2269 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
2270 		if (err) {
2271 			kfree(cls_flower.rule);
2272 			if (tc_skip_sw(f->flags)) {
2273 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
2274 				__fl_put(f);
2275 				return err;
2276 			}
2277 			goto next_flow;
2278 		}
2279 
2280 		cls_flower.classid = f->res.classid;
2281 
2282 		err = tc_setup_cb_reoffload(block, tp, add, cb,
2283 					    TC_SETUP_CLSFLOWER, &cls_flower,
2284 					    cb_priv, &f->flags,
2285 					    &f->in_hw_count);
2286 		tc_cleanup_flow_action(&cls_flower.rule->action);
2287 		kfree(cls_flower.rule);
2288 
2289 		if (err) {
2290 			__fl_put(f);
2291 			return err;
2292 		}
2293 next_flow:
2294 		__fl_put(f);
2295 	}
2296 
2297 	return 0;
2298 }
2299 
2300 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2301 {
2302 	struct flow_cls_offload *cls_flower = type_data;
2303 	struct cls_fl_filter *f =
2304 		(struct cls_fl_filter *) cls_flower->cookie;
2305 	struct cls_fl_head *head = fl_head_dereference(tp);
2306 
2307 	spin_lock(&tp->lock);
2308 	list_add(&f->hw_list, &head->hw_filters);
2309 	spin_unlock(&tp->lock);
2310 }
2311 
2312 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2313 {
2314 	struct flow_cls_offload *cls_flower = type_data;
2315 	struct cls_fl_filter *f =
2316 		(struct cls_fl_filter *) cls_flower->cookie;
2317 
2318 	spin_lock(&tp->lock);
2319 	if (!list_empty(&f->hw_list))
2320 		list_del_init(&f->hw_list);
2321 	spin_unlock(&tp->lock);
2322 }
2323 
2324 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2325 			      struct fl_flow_tmplt *tmplt)
2326 {
2327 	struct flow_cls_offload cls_flower = {};
2328 	struct tcf_block *block = chain->block;
2329 
2330 	cls_flower.rule = flow_rule_alloc(0);
2331 	if (!cls_flower.rule)
2332 		return -ENOMEM;
2333 
2334 	cls_flower.common.chain_index = chain->index;
2335 	cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2336 	cls_flower.cookie = (unsigned long) tmplt;
2337 	cls_flower.rule->match.dissector = &tmplt->dissector;
2338 	cls_flower.rule->match.mask = &tmplt->mask;
2339 	cls_flower.rule->match.key = &tmplt->dummy_key;
2340 
2341 	/* We don't care if driver (any of them) fails to handle this
2342 	 * call. It serves just as a hint for it.
2343 	 */
2344 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2345 	kfree(cls_flower.rule);
2346 
2347 	return 0;
2348 }
2349 
2350 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2351 				struct fl_flow_tmplt *tmplt)
2352 {
2353 	struct flow_cls_offload cls_flower = {};
2354 	struct tcf_block *block = chain->block;
2355 
2356 	cls_flower.common.chain_index = chain->index;
2357 	cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2358 	cls_flower.cookie = (unsigned long) tmplt;
2359 
2360 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2361 }
2362 
2363 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2364 			     struct nlattr **tca,
2365 			     struct netlink_ext_ack *extack)
2366 {
2367 	struct fl_flow_tmplt *tmplt;
2368 	struct nlattr **tb;
2369 	int err;
2370 
2371 	if (!tca[TCA_OPTIONS])
2372 		return ERR_PTR(-EINVAL);
2373 
2374 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2375 	if (!tb)
2376 		return ERR_PTR(-ENOBUFS);
2377 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2378 					  tca[TCA_OPTIONS], fl_policy, NULL);
2379 	if (err)
2380 		goto errout_tb;
2381 
2382 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2383 	if (!tmplt) {
2384 		err = -ENOMEM;
2385 		goto errout_tb;
2386 	}
2387 	tmplt->chain = chain;
2388 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2389 	if (err)
2390 		goto errout_tmplt;
2391 
2392 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2393 
2394 	err = fl_hw_create_tmplt(chain, tmplt);
2395 	if (err)
2396 		goto errout_tmplt;
2397 
2398 	kfree(tb);
2399 	return tmplt;
2400 
2401 errout_tmplt:
2402 	kfree(tmplt);
2403 errout_tb:
2404 	kfree(tb);
2405 	return ERR_PTR(err);
2406 }
2407 
2408 static void fl_tmplt_destroy(void *tmplt_priv)
2409 {
2410 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2411 
2412 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2413 	kfree(tmplt);
2414 }
2415 
2416 static int fl_dump_key_val(struct sk_buff *skb,
2417 			   void *val, int val_type,
2418 			   void *mask, int mask_type, int len)
2419 {
2420 	int err;
2421 
2422 	if (!memchr_inv(mask, 0, len))
2423 		return 0;
2424 	err = nla_put(skb, val_type, len, val);
2425 	if (err)
2426 		return err;
2427 	if (mask_type != TCA_FLOWER_UNSPEC) {
2428 		err = nla_put(skb, mask_type, len, mask);
2429 		if (err)
2430 			return err;
2431 	}
2432 	return 0;
2433 }
2434 
2435 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2436 				  struct fl_flow_key *mask)
2437 {
2438 	if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2439 			    TCA_FLOWER_KEY_PORT_DST_MIN,
2440 			    &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2441 			    sizeof(key->tp_range.tp_min.dst)) ||
2442 	    fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2443 			    TCA_FLOWER_KEY_PORT_DST_MAX,
2444 			    &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2445 			    sizeof(key->tp_range.tp_max.dst)) ||
2446 	    fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2447 			    TCA_FLOWER_KEY_PORT_SRC_MIN,
2448 			    &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2449 			    sizeof(key->tp_range.tp_min.src)) ||
2450 	    fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2451 			    TCA_FLOWER_KEY_PORT_SRC_MAX,
2452 			    &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2453 			    sizeof(key->tp_range.tp_max.src)))
2454 		return -1;
2455 
2456 	return 0;
2457 }
2458 
2459 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2460 				    struct flow_dissector_key_mpls *mpls_key,
2461 				    struct flow_dissector_key_mpls *mpls_mask,
2462 				    u8 lse_index)
2463 {
2464 	struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2465 	struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2466 	int err;
2467 
2468 	err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2469 			 lse_index + 1);
2470 	if (err)
2471 		return err;
2472 
2473 	if (lse_mask->mpls_ttl) {
2474 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2475 				 lse_key->mpls_ttl);
2476 		if (err)
2477 			return err;
2478 	}
2479 	if (lse_mask->mpls_bos) {
2480 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2481 				 lse_key->mpls_bos);
2482 		if (err)
2483 			return err;
2484 	}
2485 	if (lse_mask->mpls_tc) {
2486 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2487 				 lse_key->mpls_tc);
2488 		if (err)
2489 			return err;
2490 	}
2491 	if (lse_mask->mpls_label) {
2492 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2493 				  lse_key->mpls_label);
2494 		if (err)
2495 			return err;
2496 	}
2497 
2498 	return 0;
2499 }
2500 
2501 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2502 				 struct flow_dissector_key_mpls *mpls_key,
2503 				 struct flow_dissector_key_mpls *mpls_mask)
2504 {
2505 	struct nlattr *opts;
2506 	struct nlattr *lse;
2507 	u8 lse_index;
2508 	int err;
2509 
2510 	opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2511 	if (!opts)
2512 		return -EMSGSIZE;
2513 
2514 	for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2515 		if (!(mpls_mask->used_lses & 1 << lse_index))
2516 			continue;
2517 
2518 		lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2519 		if (!lse) {
2520 			err = -EMSGSIZE;
2521 			goto err_opts;
2522 		}
2523 
2524 		err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2525 					       lse_index);
2526 		if (err)
2527 			goto err_opts_lse;
2528 		nla_nest_end(skb, lse);
2529 	}
2530 	nla_nest_end(skb, opts);
2531 
2532 	return 0;
2533 
2534 err_opts_lse:
2535 	nla_nest_cancel(skb, lse);
2536 err_opts:
2537 	nla_nest_cancel(skb, opts);
2538 
2539 	return err;
2540 }
2541 
2542 static int fl_dump_key_mpls(struct sk_buff *skb,
2543 			    struct flow_dissector_key_mpls *mpls_key,
2544 			    struct flow_dissector_key_mpls *mpls_mask)
2545 {
2546 	struct flow_dissector_mpls_lse *lse_mask;
2547 	struct flow_dissector_mpls_lse *lse_key;
2548 	int err;
2549 
2550 	if (!mpls_mask->used_lses)
2551 		return 0;
2552 
2553 	lse_mask = &mpls_mask->ls[0];
2554 	lse_key = &mpls_key->ls[0];
2555 
2556 	/* For backward compatibility, don't use the MPLS nested attributes if
2557 	 * the rule can be expressed using the old attributes.
2558 	 */
2559 	if (mpls_mask->used_lses & ~1 ||
2560 	    (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2561 	     !lse_mask->mpls_tc && !lse_mask->mpls_label))
2562 		return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2563 
2564 	if (lse_mask->mpls_ttl) {
2565 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2566 				 lse_key->mpls_ttl);
2567 		if (err)
2568 			return err;
2569 	}
2570 	if (lse_mask->mpls_tc) {
2571 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2572 				 lse_key->mpls_tc);
2573 		if (err)
2574 			return err;
2575 	}
2576 	if (lse_mask->mpls_label) {
2577 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2578 				  lse_key->mpls_label);
2579 		if (err)
2580 			return err;
2581 	}
2582 	if (lse_mask->mpls_bos) {
2583 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2584 				 lse_key->mpls_bos);
2585 		if (err)
2586 			return err;
2587 	}
2588 	return 0;
2589 }
2590 
2591 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2592 			  struct flow_dissector_key_ip *key,
2593 			  struct flow_dissector_key_ip *mask)
2594 {
2595 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2596 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2597 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2598 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2599 
2600 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2601 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2602 		return -1;
2603 
2604 	return 0;
2605 }
2606 
2607 static int fl_dump_key_vlan(struct sk_buff *skb,
2608 			    int vlan_id_key, int vlan_prio_key,
2609 			    struct flow_dissector_key_vlan *vlan_key,
2610 			    struct flow_dissector_key_vlan *vlan_mask)
2611 {
2612 	int err;
2613 
2614 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2615 		return 0;
2616 	if (vlan_mask->vlan_id) {
2617 		err = nla_put_u16(skb, vlan_id_key,
2618 				  vlan_key->vlan_id);
2619 		if (err)
2620 			return err;
2621 	}
2622 	if (vlan_mask->vlan_priority) {
2623 		err = nla_put_u8(skb, vlan_prio_key,
2624 				 vlan_key->vlan_priority);
2625 		if (err)
2626 			return err;
2627 	}
2628 	return 0;
2629 }
2630 
2631 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2632 			    u32 *flower_key, u32 *flower_mask,
2633 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2634 {
2635 	if (dissector_mask & dissector_flag_bit) {
2636 		*flower_mask |= flower_flag_bit;
2637 		if (dissector_key & dissector_flag_bit)
2638 			*flower_key |= flower_flag_bit;
2639 	}
2640 }
2641 
2642 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2643 {
2644 	u32 key, mask;
2645 	__be32 _key, _mask;
2646 	int err;
2647 
2648 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2649 		return 0;
2650 
2651 	key = 0;
2652 	mask = 0;
2653 
2654 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2655 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2656 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2657 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2658 			FLOW_DIS_FIRST_FRAG);
2659 
2660 	_key = cpu_to_be32(key);
2661 	_mask = cpu_to_be32(mask);
2662 
2663 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2664 	if (err)
2665 		return err;
2666 
2667 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2668 }
2669 
2670 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2671 				  struct flow_dissector_key_enc_opts *enc_opts)
2672 {
2673 	struct geneve_opt *opt;
2674 	struct nlattr *nest;
2675 	int opt_off = 0;
2676 
2677 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2678 	if (!nest)
2679 		goto nla_put_failure;
2680 
2681 	while (enc_opts->len > opt_off) {
2682 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2683 
2684 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2685 				 opt->opt_class))
2686 			goto nla_put_failure;
2687 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2688 			       opt->type))
2689 			goto nla_put_failure;
2690 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2691 			    opt->length * 4, opt->opt_data))
2692 			goto nla_put_failure;
2693 
2694 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2695 	}
2696 	nla_nest_end(skb, nest);
2697 	return 0;
2698 
2699 nla_put_failure:
2700 	nla_nest_cancel(skb, nest);
2701 	return -EMSGSIZE;
2702 }
2703 
2704 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
2705 				 struct flow_dissector_key_enc_opts *enc_opts)
2706 {
2707 	struct vxlan_metadata *md;
2708 	struct nlattr *nest;
2709 
2710 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
2711 	if (!nest)
2712 		goto nla_put_failure;
2713 
2714 	md = (struct vxlan_metadata *)&enc_opts->data[0];
2715 	if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
2716 		goto nla_put_failure;
2717 
2718 	nla_nest_end(skb, nest);
2719 	return 0;
2720 
2721 nla_put_failure:
2722 	nla_nest_cancel(skb, nest);
2723 	return -EMSGSIZE;
2724 }
2725 
2726 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
2727 				  struct flow_dissector_key_enc_opts *enc_opts)
2728 {
2729 	struct erspan_metadata *md;
2730 	struct nlattr *nest;
2731 
2732 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
2733 	if (!nest)
2734 		goto nla_put_failure;
2735 
2736 	md = (struct erspan_metadata *)&enc_opts->data[0];
2737 	if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
2738 		goto nla_put_failure;
2739 
2740 	if (md->version == 1 &&
2741 	    nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
2742 		goto nla_put_failure;
2743 
2744 	if (md->version == 2 &&
2745 	    (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
2746 			md->u.md2.dir) ||
2747 	     nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
2748 			get_hwid(&md->u.md2))))
2749 		goto nla_put_failure;
2750 
2751 	nla_nest_end(skb, nest);
2752 	return 0;
2753 
2754 nla_put_failure:
2755 	nla_nest_cancel(skb, nest);
2756 	return -EMSGSIZE;
2757 }
2758 
2759 static int fl_dump_key_ct(struct sk_buff *skb,
2760 			  struct flow_dissector_key_ct *key,
2761 			  struct flow_dissector_key_ct *mask)
2762 {
2763 	if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
2764 	    fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
2765 			    &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
2766 			    sizeof(key->ct_state)))
2767 		goto nla_put_failure;
2768 
2769 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
2770 	    fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
2771 			    &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
2772 			    sizeof(key->ct_zone)))
2773 		goto nla_put_failure;
2774 
2775 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
2776 	    fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
2777 			    &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
2778 			    sizeof(key->ct_mark)))
2779 		goto nla_put_failure;
2780 
2781 	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
2782 	    fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
2783 			    &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
2784 			    sizeof(key->ct_labels)))
2785 		goto nla_put_failure;
2786 
2787 	return 0;
2788 
2789 nla_put_failure:
2790 	return -EMSGSIZE;
2791 }
2792 
2793 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2794 			       struct flow_dissector_key_enc_opts *enc_opts)
2795 {
2796 	struct nlattr *nest;
2797 	int err;
2798 
2799 	if (!enc_opts->len)
2800 		return 0;
2801 
2802 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2803 	if (!nest)
2804 		goto nla_put_failure;
2805 
2806 	switch (enc_opts->dst_opt_type) {
2807 	case TUNNEL_GENEVE_OPT:
2808 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2809 		if (err)
2810 			goto nla_put_failure;
2811 		break;
2812 	case TUNNEL_VXLAN_OPT:
2813 		err = fl_dump_key_vxlan_opt(skb, enc_opts);
2814 		if (err)
2815 			goto nla_put_failure;
2816 		break;
2817 	case TUNNEL_ERSPAN_OPT:
2818 		err = fl_dump_key_erspan_opt(skb, enc_opts);
2819 		if (err)
2820 			goto nla_put_failure;
2821 		break;
2822 	default:
2823 		goto nla_put_failure;
2824 	}
2825 	nla_nest_end(skb, nest);
2826 	return 0;
2827 
2828 nla_put_failure:
2829 	nla_nest_cancel(skb, nest);
2830 	return -EMSGSIZE;
2831 }
2832 
2833 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2834 			       struct flow_dissector_key_enc_opts *key_opts,
2835 			       struct flow_dissector_key_enc_opts *msk_opts)
2836 {
2837 	int err;
2838 
2839 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2840 	if (err)
2841 		return err;
2842 
2843 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2844 }
2845 
2846 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2847 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2848 {
2849 	if (mask->meta.ingress_ifindex) {
2850 		struct net_device *dev;
2851 
2852 		dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
2853 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2854 			goto nla_put_failure;
2855 	}
2856 
2857 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2858 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2859 			    sizeof(key->eth.dst)) ||
2860 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2861 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2862 			    sizeof(key->eth.src)) ||
2863 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2864 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2865 			    sizeof(key->basic.n_proto)))
2866 		goto nla_put_failure;
2867 
2868 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2869 		goto nla_put_failure;
2870 
2871 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2872 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2873 		goto nla_put_failure;
2874 
2875 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2876 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2877 			     &key->cvlan, &mask->cvlan) ||
2878 	    (mask->cvlan.vlan_tpid &&
2879 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2880 			  key->cvlan.vlan_tpid)))
2881 		goto nla_put_failure;
2882 
2883 	if (mask->basic.n_proto) {
2884 		if (mask->cvlan.vlan_tpid) {
2885 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2886 					 key->basic.n_proto))
2887 				goto nla_put_failure;
2888 		} else if (mask->vlan.vlan_tpid) {
2889 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2890 					 key->basic.n_proto))
2891 				goto nla_put_failure;
2892 		}
2893 	}
2894 
2895 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2896 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2897 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2898 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2899 			    sizeof(key->basic.ip_proto)) ||
2900 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2901 		goto nla_put_failure;
2902 
2903 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2904 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2905 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2906 			     sizeof(key->ipv4.src)) ||
2907 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2908 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2909 			     sizeof(key->ipv4.dst))))
2910 		goto nla_put_failure;
2911 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2912 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2913 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2914 				  sizeof(key->ipv6.src)) ||
2915 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2916 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2917 				  sizeof(key->ipv6.dst))))
2918 		goto nla_put_failure;
2919 
2920 	if (key->basic.ip_proto == IPPROTO_TCP &&
2921 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2922 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2923 			     sizeof(key->tp.src)) ||
2924 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2925 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2926 			     sizeof(key->tp.dst)) ||
2927 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2928 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2929 			     sizeof(key->tcp.flags))))
2930 		goto nla_put_failure;
2931 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2932 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2933 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2934 				  sizeof(key->tp.src)) ||
2935 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2936 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2937 				  sizeof(key->tp.dst))))
2938 		goto nla_put_failure;
2939 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2940 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2941 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2942 				  sizeof(key->tp.src)) ||
2943 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2944 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2945 				  sizeof(key->tp.dst))))
2946 		goto nla_put_failure;
2947 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2948 		 key->basic.ip_proto == IPPROTO_ICMP &&
2949 		 (fl_dump_key_val(skb, &key->icmp.type,
2950 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2951 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2952 				  sizeof(key->icmp.type)) ||
2953 		  fl_dump_key_val(skb, &key->icmp.code,
2954 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2955 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2956 				  sizeof(key->icmp.code))))
2957 		goto nla_put_failure;
2958 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2959 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2960 		 (fl_dump_key_val(skb, &key->icmp.type,
2961 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2962 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2963 				  sizeof(key->icmp.type)) ||
2964 		  fl_dump_key_val(skb, &key->icmp.code,
2965 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2966 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2967 				  sizeof(key->icmp.code))))
2968 		goto nla_put_failure;
2969 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2970 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2971 		 (fl_dump_key_val(skb, &key->arp.sip,
2972 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2973 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2974 				  sizeof(key->arp.sip)) ||
2975 		  fl_dump_key_val(skb, &key->arp.tip,
2976 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2977 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2978 				  sizeof(key->arp.tip)) ||
2979 		  fl_dump_key_val(skb, &key->arp.op,
2980 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2981 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2982 				  sizeof(key->arp.op)) ||
2983 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2984 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2985 				  sizeof(key->arp.sha)) ||
2986 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2987 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2988 				  sizeof(key->arp.tha))))
2989 		goto nla_put_failure;
2990 
2991 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2992 	     key->basic.ip_proto == IPPROTO_UDP ||
2993 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2994 	     fl_dump_key_port_range(skb, key, mask))
2995 		goto nla_put_failure;
2996 
2997 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2998 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2999 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3000 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3001 			    sizeof(key->enc_ipv4.src)) ||
3002 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
3003 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3004 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3005 			     sizeof(key->enc_ipv4.dst))))
3006 		goto nla_put_failure;
3007 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3008 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3009 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3010 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3011 			    sizeof(key->enc_ipv6.src)) ||
3012 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3013 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
3014 				 &mask->enc_ipv6.dst,
3015 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3016 			    sizeof(key->enc_ipv6.dst))))
3017 		goto nla_put_failure;
3018 
3019 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3020 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3021 			    sizeof(key->enc_key_id)) ||
3022 	    fl_dump_key_val(skb, &key->enc_tp.src,
3023 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3024 			    &mask->enc_tp.src,
3025 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3026 			    sizeof(key->enc_tp.src)) ||
3027 	    fl_dump_key_val(skb, &key->enc_tp.dst,
3028 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3029 			    &mask->enc_tp.dst,
3030 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3031 			    sizeof(key->enc_tp.dst)) ||
3032 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3033 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3034 		goto nla_put_failure;
3035 
3036 	if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3037 		goto nla_put_failure;
3038 
3039 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3040 		goto nla_put_failure;
3041 
3042 	if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3043 			     &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3044 			     sizeof(key->hash.hash)))
3045 		goto nla_put_failure;
3046 
3047 	return 0;
3048 
3049 nla_put_failure:
3050 	return -EMSGSIZE;
3051 }
3052 
3053 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3054 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3055 {
3056 	struct cls_fl_filter *f = fh;
3057 	struct nlattr *nest;
3058 	struct fl_flow_key *key, *mask;
3059 	bool skip_hw;
3060 
3061 	if (!f)
3062 		return skb->len;
3063 
3064 	t->tcm_handle = f->handle;
3065 
3066 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3067 	if (!nest)
3068 		goto nla_put_failure;
3069 
3070 	spin_lock(&tp->lock);
3071 
3072 	if (f->res.classid &&
3073 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3074 		goto nla_put_failure_locked;
3075 
3076 	key = &f->key;
3077 	mask = &f->mask->key;
3078 	skip_hw = tc_skip_hw(f->flags);
3079 
3080 	if (fl_dump_key(skb, net, key, mask))
3081 		goto nla_put_failure_locked;
3082 
3083 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3084 		goto nla_put_failure_locked;
3085 
3086 	spin_unlock(&tp->lock);
3087 
3088 	if (!skip_hw)
3089 		fl_hw_update_stats(tp, f, rtnl_held);
3090 
3091 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3092 		goto nla_put_failure;
3093 
3094 	if (tcf_exts_dump(skb, &f->exts))
3095 		goto nla_put_failure;
3096 
3097 	nla_nest_end(skb, nest);
3098 
3099 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3100 		goto nla_put_failure;
3101 
3102 	return skb->len;
3103 
3104 nla_put_failure_locked:
3105 	spin_unlock(&tp->lock);
3106 nla_put_failure:
3107 	nla_nest_cancel(skb, nest);
3108 	return -1;
3109 }
3110 
3111 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3112 			 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3113 {
3114 	struct cls_fl_filter *f = fh;
3115 	struct nlattr *nest;
3116 	bool skip_hw;
3117 
3118 	if (!f)
3119 		return skb->len;
3120 
3121 	t->tcm_handle = f->handle;
3122 
3123 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3124 	if (!nest)
3125 		goto nla_put_failure;
3126 
3127 	spin_lock(&tp->lock);
3128 
3129 	skip_hw = tc_skip_hw(f->flags);
3130 
3131 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3132 		goto nla_put_failure_locked;
3133 
3134 	spin_unlock(&tp->lock);
3135 
3136 	if (!skip_hw)
3137 		fl_hw_update_stats(tp, f, rtnl_held);
3138 
3139 	if (tcf_exts_terse_dump(skb, &f->exts))
3140 		goto nla_put_failure;
3141 
3142 	nla_nest_end(skb, nest);
3143 
3144 	return skb->len;
3145 
3146 nla_put_failure_locked:
3147 	spin_unlock(&tp->lock);
3148 nla_put_failure:
3149 	nla_nest_cancel(skb, nest);
3150 	return -1;
3151 }
3152 
3153 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3154 {
3155 	struct fl_flow_tmplt *tmplt = tmplt_priv;
3156 	struct fl_flow_key *key, *mask;
3157 	struct nlattr *nest;
3158 
3159 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3160 	if (!nest)
3161 		goto nla_put_failure;
3162 
3163 	key = &tmplt->dummy_key;
3164 	mask = &tmplt->mask;
3165 
3166 	if (fl_dump_key(skb, net, key, mask))
3167 		goto nla_put_failure;
3168 
3169 	nla_nest_end(skb, nest);
3170 
3171 	return skb->len;
3172 
3173 nla_put_failure:
3174 	nla_nest_cancel(skb, nest);
3175 	return -EMSGSIZE;
3176 }
3177 
3178 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3179 			  unsigned long base)
3180 {
3181 	struct cls_fl_filter *f = fh;
3182 
3183 	if (f && f->res.classid == classid) {
3184 		if (cl)
3185 			__tcf_bind_filter(q, &f->res, base);
3186 		else
3187 			__tcf_unbind_filter(q, &f->res);
3188 	}
3189 }
3190 
3191 static bool fl_delete_empty(struct tcf_proto *tp)
3192 {
3193 	struct cls_fl_head *head = fl_head_dereference(tp);
3194 
3195 	spin_lock(&tp->lock);
3196 	tp->deleting = idr_is_empty(&head->handle_idr);
3197 	spin_unlock(&tp->lock);
3198 
3199 	return tp->deleting;
3200 }
3201 
3202 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3203 	.kind		= "flower",
3204 	.classify	= fl_classify,
3205 	.init		= fl_init,
3206 	.destroy	= fl_destroy,
3207 	.get		= fl_get,
3208 	.put		= fl_put,
3209 	.change		= fl_change,
3210 	.delete		= fl_delete,
3211 	.delete_empty	= fl_delete_empty,
3212 	.walk		= fl_walk,
3213 	.reoffload	= fl_reoffload,
3214 	.hw_add		= fl_hw_add,
3215 	.hw_del		= fl_hw_del,
3216 	.dump		= fl_dump,
3217 	.terse_dump	= fl_terse_dump,
3218 	.bind_class	= fl_bind_class,
3219 	.tmplt_create	= fl_tmplt_create,
3220 	.tmplt_destroy	= fl_tmplt_destroy,
3221 	.tmplt_dump	= fl_tmplt_dump,
3222 	.owner		= THIS_MODULE,
3223 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
3224 };
3225 
3226 static int __init cls_fl_init(void)
3227 {
3228 	return register_tcf_proto_ops(&cls_fl_ops);
3229 }
3230 
3231 static void __exit cls_fl_exit(void)
3232 {
3233 	unregister_tcf_proto_ops(&cls_fl_ops);
3234 }
3235 
3236 module_init(cls_fl_init);
3237 module_exit(cls_fl_exit);
3238 
3239 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3240 MODULE_DESCRIPTION("Flower classifier");
3241 MODULE_LICENSE("GPL v2");
3242