xref: /openbmc/linux/net/sched/cls_flower.c (revision 4a075bd4)
1 /*
2  * net/sched/cls_flower.c		Flower classifier
3  *
4  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
17 #include <linux/refcount.h>
18 
19 #include <linux/if_ether.h>
20 #include <linux/in6.h>
21 #include <linux/ip.h>
22 #include <linux/mpls.h>
23 
24 #include <net/sch_generic.h>
25 #include <net/pkt_cls.h>
26 #include <net/ip.h>
27 #include <net/flow_dissector.h>
28 #include <net/geneve.h>
29 
30 #include <net/dst.h>
31 #include <net/dst_metadata.h>
32 
33 struct fl_flow_key {
34 	int	indev_ifindex;
35 	struct flow_dissector_key_control control;
36 	struct flow_dissector_key_control enc_control;
37 	struct flow_dissector_key_basic basic;
38 	struct flow_dissector_key_eth_addrs eth;
39 	struct flow_dissector_key_vlan vlan;
40 	struct flow_dissector_key_vlan cvlan;
41 	union {
42 		struct flow_dissector_key_ipv4_addrs ipv4;
43 		struct flow_dissector_key_ipv6_addrs ipv6;
44 	};
45 	struct flow_dissector_key_ports tp;
46 	struct flow_dissector_key_icmp icmp;
47 	struct flow_dissector_key_arp arp;
48 	struct flow_dissector_key_keyid enc_key_id;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 	};
53 	struct flow_dissector_key_ports enc_tp;
54 	struct flow_dissector_key_mpls mpls;
55 	struct flow_dissector_key_tcp tcp;
56 	struct flow_dissector_key_ip ip;
57 	struct flow_dissector_key_ip enc_ip;
58 	struct flow_dissector_key_enc_opts enc_opts;
59 	struct flow_dissector_key_ports tp_min;
60 	struct flow_dissector_key_ports tp_max;
61 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
62 
63 struct fl_flow_mask_range {
64 	unsigned short int start;
65 	unsigned short int end;
66 };
67 
68 struct fl_flow_mask {
69 	struct fl_flow_key key;
70 	struct fl_flow_mask_range range;
71 	u32 flags;
72 	struct rhash_head ht_node;
73 	struct rhashtable ht;
74 	struct rhashtable_params filter_ht_params;
75 	struct flow_dissector dissector;
76 	struct list_head filters;
77 	struct rcu_work rwork;
78 	struct list_head list;
79 	refcount_t refcnt;
80 };
81 
82 struct fl_flow_tmplt {
83 	struct fl_flow_key dummy_key;
84 	struct fl_flow_key mask;
85 	struct flow_dissector dissector;
86 	struct tcf_chain *chain;
87 };
88 
89 struct cls_fl_head {
90 	struct rhashtable ht;
91 	spinlock_t masks_lock; /* Protect masks list */
92 	struct list_head masks;
93 	struct list_head hw_filters;
94 	struct rcu_work rwork;
95 	struct idr handle_idr;
96 };
97 
98 struct cls_fl_filter {
99 	struct fl_flow_mask *mask;
100 	struct rhash_head ht_node;
101 	struct fl_flow_key mkey;
102 	struct tcf_exts exts;
103 	struct tcf_result res;
104 	struct fl_flow_key key;
105 	struct list_head list;
106 	struct list_head hw_list;
107 	u32 handle;
108 	u32 flags;
109 	u32 in_hw_count;
110 	struct rcu_work rwork;
111 	struct net_device *hw_dev;
112 	/* Flower classifier is unlocked, which means that its reference counter
113 	 * can be changed concurrently without any kind of external
114 	 * synchronization. Use atomic reference counter to be concurrency-safe.
115 	 */
116 	refcount_t refcnt;
117 	bool deleted;
118 };
119 
120 static const struct rhashtable_params mask_ht_params = {
121 	.key_offset = offsetof(struct fl_flow_mask, key),
122 	.key_len = sizeof(struct fl_flow_key),
123 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
124 	.automatic_shrinking = true,
125 };
126 
127 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
128 {
129 	return mask->range.end - mask->range.start;
130 }
131 
132 static void fl_mask_update_range(struct fl_flow_mask *mask)
133 {
134 	const u8 *bytes = (const u8 *) &mask->key;
135 	size_t size = sizeof(mask->key);
136 	size_t i, first = 0, last;
137 
138 	for (i = 0; i < size; i++) {
139 		if (bytes[i]) {
140 			first = i;
141 			break;
142 		}
143 	}
144 	last = first;
145 	for (i = size - 1; i != first; i--) {
146 		if (bytes[i]) {
147 			last = i;
148 			break;
149 		}
150 	}
151 	mask->range.start = rounddown(first, sizeof(long));
152 	mask->range.end = roundup(last + 1, sizeof(long));
153 }
154 
155 static void *fl_key_get_start(struct fl_flow_key *key,
156 			      const struct fl_flow_mask *mask)
157 {
158 	return (u8 *) key + mask->range.start;
159 }
160 
161 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
162 			      struct fl_flow_mask *mask)
163 {
164 	const long *lkey = fl_key_get_start(key, mask);
165 	const long *lmask = fl_key_get_start(&mask->key, mask);
166 	long *lmkey = fl_key_get_start(mkey, mask);
167 	int i;
168 
169 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
170 		*lmkey++ = *lkey++ & *lmask++;
171 }
172 
173 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
174 			       struct fl_flow_mask *mask)
175 {
176 	const long *lmask = fl_key_get_start(&mask->key, mask);
177 	const long *ltmplt;
178 	int i;
179 
180 	if (!tmplt)
181 		return true;
182 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
183 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
184 		if (~*ltmplt++ & *lmask++)
185 			return false;
186 	}
187 	return true;
188 }
189 
190 static void fl_clear_masked_range(struct fl_flow_key *key,
191 				  struct fl_flow_mask *mask)
192 {
193 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
194 }
195 
196 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
197 				  struct fl_flow_key *key,
198 				  struct fl_flow_key *mkey)
199 {
200 	__be16 min_mask, max_mask, min_val, max_val;
201 
202 	min_mask = htons(filter->mask->key.tp_min.dst);
203 	max_mask = htons(filter->mask->key.tp_max.dst);
204 	min_val = htons(filter->key.tp_min.dst);
205 	max_val = htons(filter->key.tp_max.dst);
206 
207 	if (min_mask && max_mask) {
208 		if (htons(key->tp.dst) < min_val ||
209 		    htons(key->tp.dst) > max_val)
210 			return false;
211 
212 		/* skb does not have min and max values */
213 		mkey->tp_min.dst = filter->mkey.tp_min.dst;
214 		mkey->tp_max.dst = filter->mkey.tp_max.dst;
215 	}
216 	return true;
217 }
218 
219 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
220 				  struct fl_flow_key *key,
221 				  struct fl_flow_key *mkey)
222 {
223 	__be16 min_mask, max_mask, min_val, max_val;
224 
225 	min_mask = htons(filter->mask->key.tp_min.src);
226 	max_mask = htons(filter->mask->key.tp_max.src);
227 	min_val = htons(filter->key.tp_min.src);
228 	max_val = htons(filter->key.tp_max.src);
229 
230 	if (min_mask && max_mask) {
231 		if (htons(key->tp.src) < min_val ||
232 		    htons(key->tp.src) > max_val)
233 			return false;
234 
235 		/* skb does not have min and max values */
236 		mkey->tp_min.src = filter->mkey.tp_min.src;
237 		mkey->tp_max.src = filter->mkey.tp_max.src;
238 	}
239 	return true;
240 }
241 
242 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
243 					 struct fl_flow_key *mkey)
244 {
245 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
246 				      mask->filter_ht_params);
247 }
248 
249 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
250 					     struct fl_flow_key *mkey,
251 					     struct fl_flow_key *key)
252 {
253 	struct cls_fl_filter *filter, *f;
254 
255 	list_for_each_entry_rcu(filter, &mask->filters, list) {
256 		if (!fl_range_port_dst_cmp(filter, key, mkey))
257 			continue;
258 
259 		if (!fl_range_port_src_cmp(filter, key, mkey))
260 			continue;
261 
262 		f = __fl_lookup(mask, mkey);
263 		if (f)
264 			return f;
265 	}
266 	return NULL;
267 }
268 
269 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
270 				       struct fl_flow_key *mkey,
271 				       struct fl_flow_key *key)
272 {
273 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
274 		return fl_lookup_range(mask, mkey, key);
275 
276 	return __fl_lookup(mask, mkey);
277 }
278 
279 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
280 		       struct tcf_result *res)
281 {
282 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
283 	struct cls_fl_filter *f;
284 	struct fl_flow_mask *mask;
285 	struct fl_flow_key skb_key;
286 	struct fl_flow_key skb_mkey;
287 
288 	list_for_each_entry_rcu(mask, &head->masks, list) {
289 		fl_clear_masked_range(&skb_key, mask);
290 
291 		skb_key.indev_ifindex = skb->skb_iif;
292 		/* skb_flow_dissect() does not set n_proto in case an unknown
293 		 * protocol, so do it rather here.
294 		 */
295 		skb_key.basic.n_proto = skb->protocol;
296 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
297 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
298 
299 		fl_set_masked_key(&skb_mkey, &skb_key, mask);
300 
301 		f = fl_lookup(mask, &skb_mkey, &skb_key);
302 		if (f && !tc_skip_sw(f->flags)) {
303 			*res = f->res;
304 			return tcf_exts_exec(skb, &f->exts, res);
305 		}
306 	}
307 	return -1;
308 }
309 
310 static int fl_init(struct tcf_proto *tp)
311 {
312 	struct cls_fl_head *head;
313 
314 	head = kzalloc(sizeof(*head), GFP_KERNEL);
315 	if (!head)
316 		return -ENOBUFS;
317 
318 	spin_lock_init(&head->masks_lock);
319 	INIT_LIST_HEAD_RCU(&head->masks);
320 	INIT_LIST_HEAD(&head->hw_filters);
321 	rcu_assign_pointer(tp->root, head);
322 	idr_init(&head->handle_idr);
323 
324 	return rhashtable_init(&head->ht, &mask_ht_params);
325 }
326 
327 static void fl_mask_free(struct fl_flow_mask *mask)
328 {
329 	WARN_ON(!list_empty(&mask->filters));
330 	rhashtable_destroy(&mask->ht);
331 	kfree(mask);
332 }
333 
334 static void fl_mask_free_work(struct work_struct *work)
335 {
336 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
337 						 struct fl_flow_mask, rwork);
338 
339 	fl_mask_free(mask);
340 }
341 
342 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
343 {
344 	if (!refcount_dec_and_test(&mask->refcnt))
345 		return false;
346 
347 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
348 
349 	spin_lock(&head->masks_lock);
350 	list_del_rcu(&mask->list);
351 	spin_unlock(&head->masks_lock);
352 
353 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
354 
355 	return true;
356 }
357 
358 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
359 {
360 	/* Flower classifier only changes root pointer during init and destroy.
361 	 * Users must obtain reference to tcf_proto instance before calling its
362 	 * API, so tp->root pointer is protected from concurrent call to
363 	 * fl_destroy() by reference counting.
364 	 */
365 	return rcu_dereference_raw(tp->root);
366 }
367 
368 static void __fl_destroy_filter(struct cls_fl_filter *f)
369 {
370 	tcf_exts_destroy(&f->exts);
371 	tcf_exts_put_net(&f->exts);
372 	kfree(f);
373 }
374 
375 static void fl_destroy_filter_work(struct work_struct *work)
376 {
377 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
378 					struct cls_fl_filter, rwork);
379 
380 	__fl_destroy_filter(f);
381 }
382 
383 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
384 				 bool rtnl_held, struct netlink_ext_ack *extack)
385 {
386 	struct tc_cls_flower_offload cls_flower = {};
387 	struct tcf_block *block = tp->chain->block;
388 
389 	if (!rtnl_held)
390 		rtnl_lock();
391 
392 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
393 	cls_flower.command = TC_CLSFLOWER_DESTROY;
394 	cls_flower.cookie = (unsigned long) f;
395 
396 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
397 	spin_lock(&tp->lock);
398 	list_del_init(&f->hw_list);
399 	tcf_block_offload_dec(block, &f->flags);
400 	spin_unlock(&tp->lock);
401 
402 	if (!rtnl_held)
403 		rtnl_unlock();
404 }
405 
406 static int fl_hw_replace_filter(struct tcf_proto *tp,
407 				struct cls_fl_filter *f, bool rtnl_held,
408 				struct netlink_ext_ack *extack)
409 {
410 	struct cls_fl_head *head = fl_head_dereference(tp);
411 	struct tc_cls_flower_offload cls_flower = {};
412 	struct tcf_block *block = tp->chain->block;
413 	bool skip_sw = tc_skip_sw(f->flags);
414 	int err = 0;
415 
416 	if (!rtnl_held)
417 		rtnl_lock();
418 
419 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
420 	if (!cls_flower.rule) {
421 		err = -ENOMEM;
422 		goto errout;
423 	}
424 
425 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
426 	cls_flower.command = TC_CLSFLOWER_REPLACE;
427 	cls_flower.cookie = (unsigned long) f;
428 	cls_flower.rule->match.dissector = &f->mask->dissector;
429 	cls_flower.rule->match.mask = &f->mask->key;
430 	cls_flower.rule->match.key = &f->mkey;
431 	cls_flower.classid = f->res.classid;
432 
433 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
434 	if (err) {
435 		kfree(cls_flower.rule);
436 		if (skip_sw)
437 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
438 		else
439 			err = 0;
440 		goto errout;
441 	}
442 
443 	err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
444 	kfree(cls_flower.rule);
445 
446 	if (err < 0) {
447 		fl_hw_destroy_filter(tp, f, true, NULL);
448 		goto errout;
449 	} else if (err > 0) {
450 		f->in_hw_count = err;
451 		err = 0;
452 		spin_lock(&tp->lock);
453 		tcf_block_offload_inc(block, &f->flags);
454 		spin_unlock(&tp->lock);
455 	}
456 
457 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
458 		err = -EINVAL;
459 		goto errout;
460 	}
461 
462 	spin_lock(&tp->lock);
463 	list_add(&f->hw_list, &head->hw_filters);
464 	spin_unlock(&tp->lock);
465 errout:
466 	if (!rtnl_held)
467 		rtnl_unlock();
468 
469 	return err;
470 }
471 
472 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
473 			       bool rtnl_held)
474 {
475 	struct tc_cls_flower_offload cls_flower = {};
476 	struct tcf_block *block = tp->chain->block;
477 
478 	if (!rtnl_held)
479 		rtnl_lock();
480 
481 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
482 	cls_flower.command = TC_CLSFLOWER_STATS;
483 	cls_flower.cookie = (unsigned long) f;
484 	cls_flower.classid = f->res.classid;
485 
486 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
487 
488 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
489 			      cls_flower.stats.pkts,
490 			      cls_flower.stats.lastused);
491 
492 	if (!rtnl_held)
493 		rtnl_unlock();
494 }
495 
496 static void __fl_put(struct cls_fl_filter *f)
497 {
498 	if (!refcount_dec_and_test(&f->refcnt))
499 		return;
500 
501 	if (tcf_exts_get_net(&f->exts))
502 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
503 	else
504 		__fl_destroy_filter(f);
505 }
506 
507 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
508 {
509 	struct cls_fl_filter *f;
510 
511 	rcu_read_lock();
512 	f = idr_find(&head->handle_idr, handle);
513 	if (f && !refcount_inc_not_zero(&f->refcnt))
514 		f = NULL;
515 	rcu_read_unlock();
516 
517 	return f;
518 }
519 
520 static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
521 						unsigned long *handle)
522 {
523 	struct cls_fl_head *head = fl_head_dereference(tp);
524 	struct cls_fl_filter *f;
525 
526 	rcu_read_lock();
527 	while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
528 		/* don't return filters that are being deleted */
529 		if (refcount_inc_not_zero(&f->refcnt))
530 			break;
531 		++(*handle);
532 	}
533 	rcu_read_unlock();
534 
535 	return f;
536 }
537 
538 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
539 		       bool *last, bool rtnl_held,
540 		       struct netlink_ext_ack *extack)
541 {
542 	struct cls_fl_head *head = fl_head_dereference(tp);
543 
544 	*last = false;
545 
546 	spin_lock(&tp->lock);
547 	if (f->deleted) {
548 		spin_unlock(&tp->lock);
549 		return -ENOENT;
550 	}
551 
552 	f->deleted = true;
553 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
554 			       f->mask->filter_ht_params);
555 	idr_remove(&head->handle_idr, f->handle);
556 	list_del_rcu(&f->list);
557 	spin_unlock(&tp->lock);
558 
559 	*last = fl_mask_put(head, f->mask);
560 	if (!tc_skip_hw(f->flags))
561 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
562 	tcf_unbind_filter(tp, &f->res);
563 	__fl_put(f);
564 
565 	return 0;
566 }
567 
568 static void fl_destroy_sleepable(struct work_struct *work)
569 {
570 	struct cls_fl_head *head = container_of(to_rcu_work(work),
571 						struct cls_fl_head,
572 						rwork);
573 
574 	rhashtable_destroy(&head->ht);
575 	kfree(head);
576 	module_put(THIS_MODULE);
577 }
578 
579 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
580 		       struct netlink_ext_ack *extack)
581 {
582 	struct cls_fl_head *head = fl_head_dereference(tp);
583 	struct fl_flow_mask *mask, *next_mask;
584 	struct cls_fl_filter *f, *next;
585 	bool last;
586 
587 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
588 		list_for_each_entry_safe(f, next, &mask->filters, list) {
589 			__fl_delete(tp, f, &last, rtnl_held, extack);
590 			if (last)
591 				break;
592 		}
593 	}
594 	idr_destroy(&head->handle_idr);
595 
596 	__module_get(THIS_MODULE);
597 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
598 }
599 
600 static void fl_put(struct tcf_proto *tp, void *arg)
601 {
602 	struct cls_fl_filter *f = arg;
603 
604 	__fl_put(f);
605 }
606 
607 static void *fl_get(struct tcf_proto *tp, u32 handle)
608 {
609 	struct cls_fl_head *head = fl_head_dereference(tp);
610 
611 	return __fl_get(head, handle);
612 }
613 
614 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
615 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
616 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
617 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
618 					    .len = IFNAMSIZ },
619 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
620 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
621 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
622 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
623 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
624 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
625 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
626 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
627 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
628 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
629 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
630 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
631 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
632 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
633 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
635 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
636 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
637 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
638 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
639 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
640 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
641 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
642 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
643 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
644 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
645 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
646 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
647 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
648 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
649 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
656 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
657 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
658 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
659 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
660 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
661 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
662 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
663 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
666 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
667 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
668 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
669 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
672 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
673 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
674 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
675 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
678 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
679 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
680 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
681 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
685 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
687 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
688 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
692 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
693 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
694 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
695 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
696 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
697 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
698 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
699 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
700 };
701 
702 static const struct nla_policy
703 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
704 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
705 };
706 
707 static const struct nla_policy
708 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
709 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
710 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
711 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
712 						       .len = 128 },
713 };
714 
715 static void fl_set_key_val(struct nlattr **tb,
716 			   void *val, int val_type,
717 			   void *mask, int mask_type, int len)
718 {
719 	if (!tb[val_type])
720 		return;
721 	memcpy(val, nla_data(tb[val_type]), len);
722 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
723 		memset(mask, 0xff, len);
724 	else
725 		memcpy(mask, nla_data(tb[mask_type]), len);
726 }
727 
728 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
729 				 struct fl_flow_key *mask)
730 {
731 	fl_set_key_val(tb, &key->tp_min.dst,
732 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
733 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
734 	fl_set_key_val(tb, &key->tp_max.dst,
735 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
736 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
737 	fl_set_key_val(tb, &key->tp_min.src,
738 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
739 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
740 	fl_set_key_val(tb, &key->tp_max.src,
741 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
742 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
743 
744 	if ((mask->tp_min.dst && mask->tp_max.dst &&
745 	     htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
746 	     (mask->tp_min.src && mask->tp_max.src &&
747 	      htons(key->tp_max.src) <= htons(key->tp_min.src)))
748 		return -EINVAL;
749 
750 	return 0;
751 }
752 
753 static int fl_set_key_mpls(struct nlattr **tb,
754 			   struct flow_dissector_key_mpls *key_val,
755 			   struct flow_dissector_key_mpls *key_mask)
756 {
757 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
758 		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
759 		key_mask->mpls_ttl = MPLS_TTL_MASK;
760 	}
761 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
762 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
763 
764 		if (bos & ~MPLS_BOS_MASK)
765 			return -EINVAL;
766 		key_val->mpls_bos = bos;
767 		key_mask->mpls_bos = MPLS_BOS_MASK;
768 	}
769 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
770 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
771 
772 		if (tc & ~MPLS_TC_MASK)
773 			return -EINVAL;
774 		key_val->mpls_tc = tc;
775 		key_mask->mpls_tc = MPLS_TC_MASK;
776 	}
777 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
778 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
779 
780 		if (label & ~MPLS_LABEL_MASK)
781 			return -EINVAL;
782 		key_val->mpls_label = label;
783 		key_mask->mpls_label = MPLS_LABEL_MASK;
784 	}
785 	return 0;
786 }
787 
788 static void fl_set_key_vlan(struct nlattr **tb,
789 			    __be16 ethertype,
790 			    int vlan_id_key, int vlan_prio_key,
791 			    struct flow_dissector_key_vlan *key_val,
792 			    struct flow_dissector_key_vlan *key_mask)
793 {
794 #define VLAN_PRIORITY_MASK	0x7
795 
796 	if (tb[vlan_id_key]) {
797 		key_val->vlan_id =
798 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
799 		key_mask->vlan_id = VLAN_VID_MASK;
800 	}
801 	if (tb[vlan_prio_key]) {
802 		key_val->vlan_priority =
803 			nla_get_u8(tb[vlan_prio_key]) &
804 			VLAN_PRIORITY_MASK;
805 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
806 	}
807 	key_val->vlan_tpid = ethertype;
808 	key_mask->vlan_tpid = cpu_to_be16(~0);
809 }
810 
811 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
812 			    u32 *dissector_key, u32 *dissector_mask,
813 			    u32 flower_flag_bit, u32 dissector_flag_bit)
814 {
815 	if (flower_mask & flower_flag_bit) {
816 		*dissector_mask |= dissector_flag_bit;
817 		if (flower_key & flower_flag_bit)
818 			*dissector_key |= dissector_flag_bit;
819 	}
820 }
821 
822 static int fl_set_key_flags(struct nlattr **tb,
823 			    u32 *flags_key, u32 *flags_mask)
824 {
825 	u32 key, mask;
826 
827 	/* mask is mandatory for flags */
828 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
829 		return -EINVAL;
830 
831 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
832 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
833 
834 	*flags_key  = 0;
835 	*flags_mask = 0;
836 
837 	fl_set_key_flag(key, mask, flags_key, flags_mask,
838 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
839 	fl_set_key_flag(key, mask, flags_key, flags_mask,
840 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
841 			FLOW_DIS_FIRST_FRAG);
842 
843 	return 0;
844 }
845 
846 static void fl_set_key_ip(struct nlattr **tb, bool encap,
847 			  struct flow_dissector_key_ip *key,
848 			  struct flow_dissector_key_ip *mask)
849 {
850 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
851 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
852 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
853 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
854 
855 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
856 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
857 }
858 
859 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
860 			     int depth, int option_len,
861 			     struct netlink_ext_ack *extack)
862 {
863 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
864 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
865 	struct geneve_opt *opt;
866 	int err, data_len = 0;
867 
868 	if (option_len > sizeof(struct geneve_opt))
869 		data_len = option_len - sizeof(struct geneve_opt);
870 
871 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
872 	memset(opt, 0xff, option_len);
873 	opt->length = data_len / 4;
874 	opt->r1 = 0;
875 	opt->r2 = 0;
876 	opt->r3 = 0;
877 
878 	/* If no mask has been prodived we assume an exact match. */
879 	if (!depth)
880 		return sizeof(struct geneve_opt) + data_len;
881 
882 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
883 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
884 		return -EINVAL;
885 	}
886 
887 	err = nla_parse_nested_deprecated(tb,
888 					  TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
889 					  nla, geneve_opt_policy, extack);
890 	if (err < 0)
891 		return err;
892 
893 	/* We are not allowed to omit any of CLASS, TYPE or DATA
894 	 * fields from the key.
895 	 */
896 	if (!option_len &&
897 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
898 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
899 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
900 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
901 		return -EINVAL;
902 	}
903 
904 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
905 	 * for the mask.
906 	 */
907 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
908 		int new_len = key->enc_opts.len;
909 
910 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
911 		data_len = nla_len(data);
912 		if (data_len < 4) {
913 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
914 			return -ERANGE;
915 		}
916 		if (data_len % 4) {
917 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
918 			return -ERANGE;
919 		}
920 
921 		new_len += sizeof(struct geneve_opt) + data_len;
922 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
923 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
924 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
925 			return -ERANGE;
926 		}
927 		opt->length = data_len / 4;
928 		memcpy(opt->opt_data, nla_data(data), data_len);
929 	}
930 
931 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
932 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
933 		opt->opt_class = nla_get_be16(class);
934 	}
935 
936 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
937 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
938 		opt->type = nla_get_u8(type);
939 	}
940 
941 	return sizeof(struct geneve_opt) + data_len;
942 }
943 
944 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
945 			  struct fl_flow_key *mask,
946 			  struct netlink_ext_ack *extack)
947 {
948 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
949 	int err, option_len, key_depth, msk_depth = 0;
950 
951 	err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
952 					     TCA_FLOWER_KEY_ENC_OPTS_MAX,
953 					     enc_opts_policy, extack);
954 	if (err)
955 		return err;
956 
957 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
958 
959 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
960 		err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
961 						     TCA_FLOWER_KEY_ENC_OPTS_MAX,
962 						     enc_opts_policy, extack);
963 		if (err)
964 			return err;
965 
966 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
967 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
968 	}
969 
970 	nla_for_each_attr(nla_opt_key, nla_enc_key,
971 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
972 		switch (nla_type(nla_opt_key)) {
973 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
974 			option_len = 0;
975 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
976 			option_len = fl_set_geneve_opt(nla_opt_key, key,
977 						       key_depth, option_len,
978 						       extack);
979 			if (option_len < 0)
980 				return option_len;
981 
982 			key->enc_opts.len += option_len;
983 			/* At the same time we need to parse through the mask
984 			 * in order to verify exact and mask attribute lengths.
985 			 */
986 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
987 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
988 						       msk_depth, option_len,
989 						       extack);
990 			if (option_len < 0)
991 				return option_len;
992 
993 			mask->enc_opts.len += option_len;
994 			if (key->enc_opts.len != mask->enc_opts.len) {
995 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
996 				return -EINVAL;
997 			}
998 
999 			if (msk_depth)
1000 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1001 			break;
1002 		default:
1003 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1004 			return -EINVAL;
1005 		}
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 static int fl_set_key(struct net *net, struct nlattr **tb,
1012 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1013 		      struct netlink_ext_ack *extack)
1014 {
1015 	__be16 ethertype;
1016 	int ret = 0;
1017 #ifdef CONFIG_NET_CLS_IND
1018 	if (tb[TCA_FLOWER_INDEV]) {
1019 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1020 		if (err < 0)
1021 			return err;
1022 		key->indev_ifindex = err;
1023 		mask->indev_ifindex = 0xffffffff;
1024 	}
1025 #endif
1026 
1027 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1028 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1029 		       sizeof(key->eth.dst));
1030 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1031 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1032 		       sizeof(key->eth.src));
1033 
1034 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1035 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1036 
1037 		if (eth_type_vlan(ethertype)) {
1038 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1039 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1040 					&mask->vlan);
1041 
1042 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1043 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1044 				if (eth_type_vlan(ethertype)) {
1045 					fl_set_key_vlan(tb, ethertype,
1046 							TCA_FLOWER_KEY_CVLAN_ID,
1047 							TCA_FLOWER_KEY_CVLAN_PRIO,
1048 							&key->cvlan, &mask->cvlan);
1049 					fl_set_key_val(tb, &key->basic.n_proto,
1050 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1051 						       &mask->basic.n_proto,
1052 						       TCA_FLOWER_UNSPEC,
1053 						       sizeof(key->basic.n_proto));
1054 				} else {
1055 					key->basic.n_proto = ethertype;
1056 					mask->basic.n_proto = cpu_to_be16(~0);
1057 				}
1058 			}
1059 		} else {
1060 			key->basic.n_proto = ethertype;
1061 			mask->basic.n_proto = cpu_to_be16(~0);
1062 		}
1063 	}
1064 
1065 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1066 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1067 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1068 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1069 			       sizeof(key->basic.ip_proto));
1070 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1071 	}
1072 
1073 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1074 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1075 		mask->control.addr_type = ~0;
1076 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1077 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1078 			       sizeof(key->ipv4.src));
1079 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1080 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1081 			       sizeof(key->ipv4.dst));
1082 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1083 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1084 		mask->control.addr_type = ~0;
1085 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1086 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1087 			       sizeof(key->ipv6.src));
1088 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1089 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1090 			       sizeof(key->ipv6.dst));
1091 	}
1092 
1093 	if (key->basic.ip_proto == IPPROTO_TCP) {
1094 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1095 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1096 			       sizeof(key->tp.src));
1097 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1098 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1099 			       sizeof(key->tp.dst));
1100 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1101 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1102 			       sizeof(key->tcp.flags));
1103 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1104 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1105 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1106 			       sizeof(key->tp.src));
1107 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1108 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1109 			       sizeof(key->tp.dst));
1110 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1111 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1112 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1113 			       sizeof(key->tp.src));
1114 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1115 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1116 			       sizeof(key->tp.dst));
1117 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1118 		   key->basic.ip_proto == IPPROTO_ICMP) {
1119 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1120 			       &mask->icmp.type,
1121 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1122 			       sizeof(key->icmp.type));
1123 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1124 			       &mask->icmp.code,
1125 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1126 			       sizeof(key->icmp.code));
1127 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1128 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1129 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1130 			       &mask->icmp.type,
1131 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1132 			       sizeof(key->icmp.type));
1133 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1134 			       &mask->icmp.code,
1135 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1136 			       sizeof(key->icmp.code));
1137 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1138 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1139 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1140 		if (ret)
1141 			return ret;
1142 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1143 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1144 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1145 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1146 			       sizeof(key->arp.sip));
1147 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1148 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1149 			       sizeof(key->arp.tip));
1150 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1151 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1152 			       sizeof(key->arp.op));
1153 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1154 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1155 			       sizeof(key->arp.sha));
1156 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1157 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1158 			       sizeof(key->arp.tha));
1159 	}
1160 
1161 	if (key->basic.ip_proto == IPPROTO_TCP ||
1162 	    key->basic.ip_proto == IPPROTO_UDP ||
1163 	    key->basic.ip_proto == IPPROTO_SCTP) {
1164 		ret = fl_set_key_port_range(tb, key, mask);
1165 		if (ret)
1166 			return ret;
1167 	}
1168 
1169 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1170 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1171 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1172 		mask->enc_control.addr_type = ~0;
1173 		fl_set_key_val(tb, &key->enc_ipv4.src,
1174 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1175 			       &mask->enc_ipv4.src,
1176 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1177 			       sizeof(key->enc_ipv4.src));
1178 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1179 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1180 			       &mask->enc_ipv4.dst,
1181 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1182 			       sizeof(key->enc_ipv4.dst));
1183 	}
1184 
1185 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1186 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1187 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1188 		mask->enc_control.addr_type = ~0;
1189 		fl_set_key_val(tb, &key->enc_ipv6.src,
1190 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1191 			       &mask->enc_ipv6.src,
1192 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1193 			       sizeof(key->enc_ipv6.src));
1194 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1195 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1196 			       &mask->enc_ipv6.dst,
1197 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1198 			       sizeof(key->enc_ipv6.dst));
1199 	}
1200 
1201 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1202 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1203 		       sizeof(key->enc_key_id.keyid));
1204 
1205 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1206 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1207 		       sizeof(key->enc_tp.src));
1208 
1209 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1210 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1211 		       sizeof(key->enc_tp.dst));
1212 
1213 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1214 
1215 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1216 		ret = fl_set_enc_opt(tb, key, mask, extack);
1217 		if (ret)
1218 			return ret;
1219 	}
1220 
1221 	if (tb[TCA_FLOWER_KEY_FLAGS])
1222 		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1223 
1224 	return ret;
1225 }
1226 
1227 static void fl_mask_copy(struct fl_flow_mask *dst,
1228 			 struct fl_flow_mask *src)
1229 {
1230 	const void *psrc = fl_key_get_start(&src->key, src);
1231 	void *pdst = fl_key_get_start(&dst->key, src);
1232 
1233 	memcpy(pdst, psrc, fl_mask_range(src));
1234 	dst->range = src->range;
1235 }
1236 
1237 static const struct rhashtable_params fl_ht_params = {
1238 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1239 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1240 	.automatic_shrinking = true,
1241 };
1242 
1243 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1244 {
1245 	mask->filter_ht_params = fl_ht_params;
1246 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1247 	mask->filter_ht_params.key_offset += mask->range.start;
1248 
1249 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1250 }
1251 
1252 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1253 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1254 
1255 #define FL_KEY_IS_MASKED(mask, member)						\
1256 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1257 		   0, FL_KEY_MEMBER_SIZE(member))				\
1258 
1259 #define FL_KEY_SET(keys, cnt, id, member)					\
1260 	do {									\
1261 		keys[cnt].key_id = id;						\
1262 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1263 		cnt++;								\
1264 	} while(0);
1265 
1266 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1267 	do {									\
1268 		if (FL_KEY_IS_MASKED(mask, member))				\
1269 			FL_KEY_SET(keys, cnt, id, member);			\
1270 	} while(0);
1271 
1272 static void fl_init_dissector(struct flow_dissector *dissector,
1273 			      struct fl_flow_key *mask)
1274 {
1275 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1276 	size_t cnt = 0;
1277 
1278 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1279 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1280 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1281 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1282 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1283 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1284 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1285 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1286 	if (FL_KEY_IS_MASKED(mask, tp) ||
1287 	    FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1288 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
1289 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1290 			     FLOW_DISSECTOR_KEY_IP, ip);
1291 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1292 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1293 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1294 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1295 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1296 			     FLOW_DISSECTOR_KEY_ARP, arp);
1297 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1298 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1299 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1300 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1301 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1302 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1303 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1304 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1305 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1306 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1307 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1308 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1309 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1310 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1311 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1312 			   enc_control);
1313 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1314 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1315 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1316 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1317 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1318 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1319 
1320 	skb_flow_dissector_init(dissector, keys, cnt);
1321 }
1322 
1323 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1324 					       struct fl_flow_mask *mask)
1325 {
1326 	struct fl_flow_mask *newmask;
1327 	int err;
1328 
1329 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1330 	if (!newmask)
1331 		return ERR_PTR(-ENOMEM);
1332 
1333 	fl_mask_copy(newmask, mask);
1334 
1335 	if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1336 	    (newmask->key.tp_min.src && newmask->key.tp_max.src))
1337 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1338 
1339 	err = fl_init_mask_hashtable(newmask);
1340 	if (err)
1341 		goto errout_free;
1342 
1343 	fl_init_dissector(&newmask->dissector, &newmask->key);
1344 
1345 	INIT_LIST_HEAD_RCU(&newmask->filters);
1346 
1347 	refcount_set(&newmask->refcnt, 1);
1348 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1349 				      &newmask->ht_node, mask_ht_params);
1350 	if (err)
1351 		goto errout_destroy;
1352 
1353 	/* Wait until any potential concurrent users of mask are finished */
1354 	synchronize_rcu();
1355 
1356 	spin_lock(&head->masks_lock);
1357 	list_add_tail_rcu(&newmask->list, &head->masks);
1358 	spin_unlock(&head->masks_lock);
1359 
1360 	return newmask;
1361 
1362 errout_destroy:
1363 	rhashtable_destroy(&newmask->ht);
1364 errout_free:
1365 	kfree(newmask);
1366 
1367 	return ERR_PTR(err);
1368 }
1369 
1370 static int fl_check_assign_mask(struct cls_fl_head *head,
1371 				struct cls_fl_filter *fnew,
1372 				struct cls_fl_filter *fold,
1373 				struct fl_flow_mask *mask)
1374 {
1375 	struct fl_flow_mask *newmask;
1376 	int ret = 0;
1377 
1378 	rcu_read_lock();
1379 
1380 	/* Insert mask as temporary node to prevent concurrent creation of mask
1381 	 * with same key. Any concurrent lookups with same key will return
1382 	 * -EAGAIN because mask's refcnt is zero. It is safe to insert
1383 	 * stack-allocated 'mask' to masks hash table because we call
1384 	 * synchronize_rcu() before returning from this function (either in case
1385 	 * of error or after replacing it with heap-allocated mask in
1386 	 * fl_create_new_mask()).
1387 	 */
1388 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1389 						       &mask->ht_node,
1390 						       mask_ht_params);
1391 	if (!fnew->mask) {
1392 		rcu_read_unlock();
1393 
1394 		if (fold) {
1395 			ret = -EINVAL;
1396 			goto errout_cleanup;
1397 		}
1398 
1399 		newmask = fl_create_new_mask(head, mask);
1400 		if (IS_ERR(newmask)) {
1401 			ret = PTR_ERR(newmask);
1402 			goto errout_cleanup;
1403 		}
1404 
1405 		fnew->mask = newmask;
1406 		return 0;
1407 	} else if (IS_ERR(fnew->mask)) {
1408 		ret = PTR_ERR(fnew->mask);
1409 	} else if (fold && fold->mask != fnew->mask) {
1410 		ret = -EINVAL;
1411 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1412 		/* Mask was deleted concurrently, try again */
1413 		ret = -EAGAIN;
1414 	}
1415 	rcu_read_unlock();
1416 	return ret;
1417 
1418 errout_cleanup:
1419 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1420 			       mask_ht_params);
1421 	/* Wait until any potential concurrent users of mask are finished */
1422 	synchronize_rcu();
1423 	return ret;
1424 }
1425 
1426 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1427 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1428 			unsigned long base, struct nlattr **tb,
1429 			struct nlattr *est, bool ovr,
1430 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1431 			struct netlink_ext_ack *extack)
1432 {
1433 	int err;
1434 
1435 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1436 				extack);
1437 	if (err < 0)
1438 		return err;
1439 
1440 	if (tb[TCA_FLOWER_CLASSID]) {
1441 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1442 		if (!rtnl_held)
1443 			rtnl_lock();
1444 		tcf_bind_filter(tp, &f->res, base);
1445 		if (!rtnl_held)
1446 			rtnl_unlock();
1447 	}
1448 
1449 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1450 	if (err)
1451 		return err;
1452 
1453 	fl_mask_update_range(mask);
1454 	fl_set_masked_key(&f->mkey, &f->key, mask);
1455 
1456 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1457 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1458 		return -EINVAL;
1459 	}
1460 
1461 	return 0;
1462 }
1463 
1464 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1465 			       struct cls_fl_filter *fold,
1466 			       bool *in_ht)
1467 {
1468 	struct fl_flow_mask *mask = fnew->mask;
1469 	int err;
1470 
1471 	err = rhashtable_lookup_insert_fast(&mask->ht,
1472 					    &fnew->ht_node,
1473 					    mask->filter_ht_params);
1474 	if (err) {
1475 		*in_ht = false;
1476 		/* It is okay if filter with same key exists when
1477 		 * overwriting.
1478 		 */
1479 		return fold && err == -EEXIST ? 0 : err;
1480 	}
1481 
1482 	*in_ht = true;
1483 	return 0;
1484 }
1485 
1486 static int fl_change(struct net *net, struct sk_buff *in_skb,
1487 		     struct tcf_proto *tp, unsigned long base,
1488 		     u32 handle, struct nlattr **tca,
1489 		     void **arg, bool ovr, bool rtnl_held,
1490 		     struct netlink_ext_ack *extack)
1491 {
1492 	struct cls_fl_head *head = fl_head_dereference(tp);
1493 	struct cls_fl_filter *fold = *arg;
1494 	struct cls_fl_filter *fnew;
1495 	struct fl_flow_mask *mask;
1496 	struct nlattr **tb;
1497 	bool in_ht;
1498 	int err;
1499 
1500 	if (!tca[TCA_OPTIONS]) {
1501 		err = -EINVAL;
1502 		goto errout_fold;
1503 	}
1504 
1505 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1506 	if (!mask) {
1507 		err = -ENOBUFS;
1508 		goto errout_fold;
1509 	}
1510 
1511 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1512 	if (!tb) {
1513 		err = -ENOBUFS;
1514 		goto errout_mask_alloc;
1515 	}
1516 
1517 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1518 					  tca[TCA_OPTIONS], fl_policy, NULL);
1519 	if (err < 0)
1520 		goto errout_tb;
1521 
1522 	if (fold && handle && fold->handle != handle) {
1523 		err = -EINVAL;
1524 		goto errout_tb;
1525 	}
1526 
1527 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1528 	if (!fnew) {
1529 		err = -ENOBUFS;
1530 		goto errout_tb;
1531 	}
1532 	INIT_LIST_HEAD(&fnew->hw_list);
1533 	refcount_set(&fnew->refcnt, 1);
1534 
1535 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1536 	if (err < 0)
1537 		goto errout;
1538 
1539 	if (tb[TCA_FLOWER_FLAGS]) {
1540 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1541 
1542 		if (!tc_flags_valid(fnew->flags)) {
1543 			err = -EINVAL;
1544 			goto errout;
1545 		}
1546 	}
1547 
1548 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1549 			   tp->chain->tmplt_priv, rtnl_held, extack);
1550 	if (err)
1551 		goto errout;
1552 
1553 	err = fl_check_assign_mask(head, fnew, fold, mask);
1554 	if (err)
1555 		goto errout;
1556 
1557 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1558 	if (err)
1559 		goto errout_mask;
1560 
1561 	if (!tc_skip_hw(fnew->flags)) {
1562 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1563 		if (err)
1564 			goto errout_ht;
1565 	}
1566 
1567 	if (!tc_in_hw(fnew->flags))
1568 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1569 
1570 	spin_lock(&tp->lock);
1571 
1572 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1573 	 * proto again or create new one, if necessary.
1574 	 */
1575 	if (tp->deleting) {
1576 		err = -EAGAIN;
1577 		goto errout_hw;
1578 	}
1579 
1580 	if (fold) {
1581 		/* Fold filter was deleted concurrently. Retry lookup. */
1582 		if (fold->deleted) {
1583 			err = -EAGAIN;
1584 			goto errout_hw;
1585 		}
1586 
1587 		fnew->handle = handle;
1588 
1589 		if (!in_ht) {
1590 			struct rhashtable_params params =
1591 				fnew->mask->filter_ht_params;
1592 
1593 			err = rhashtable_insert_fast(&fnew->mask->ht,
1594 						     &fnew->ht_node,
1595 						     params);
1596 			if (err)
1597 				goto errout_hw;
1598 			in_ht = true;
1599 		}
1600 
1601 		refcount_inc(&fnew->refcnt);
1602 		rhashtable_remove_fast(&fold->mask->ht,
1603 				       &fold->ht_node,
1604 				       fold->mask->filter_ht_params);
1605 		idr_replace(&head->handle_idr, fnew, fnew->handle);
1606 		list_replace_rcu(&fold->list, &fnew->list);
1607 		fold->deleted = true;
1608 
1609 		spin_unlock(&tp->lock);
1610 
1611 		fl_mask_put(head, fold->mask);
1612 		if (!tc_skip_hw(fold->flags))
1613 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1614 		tcf_unbind_filter(tp, &fold->res);
1615 		/* Caller holds reference to fold, so refcnt is always > 0
1616 		 * after this.
1617 		 */
1618 		refcount_dec(&fold->refcnt);
1619 		__fl_put(fold);
1620 	} else {
1621 		if (handle) {
1622 			/* user specifies a handle and it doesn't exist */
1623 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1624 					    handle, GFP_ATOMIC);
1625 
1626 			/* Filter with specified handle was concurrently
1627 			 * inserted after initial check in cls_api. This is not
1628 			 * necessarily an error if NLM_F_EXCL is not set in
1629 			 * message flags. Returning EAGAIN will cause cls_api to
1630 			 * try to update concurrently inserted rule.
1631 			 */
1632 			if (err == -ENOSPC)
1633 				err = -EAGAIN;
1634 		} else {
1635 			handle = 1;
1636 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1637 					    INT_MAX, GFP_ATOMIC);
1638 		}
1639 		if (err)
1640 			goto errout_hw;
1641 
1642 		refcount_inc(&fnew->refcnt);
1643 		fnew->handle = handle;
1644 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1645 		spin_unlock(&tp->lock);
1646 	}
1647 
1648 	*arg = fnew;
1649 
1650 	kfree(tb);
1651 	kfree(mask);
1652 	return 0;
1653 
1654 errout_ht:
1655 	spin_lock(&tp->lock);
1656 errout_hw:
1657 	fnew->deleted = true;
1658 	spin_unlock(&tp->lock);
1659 	if (!tc_skip_hw(fnew->flags))
1660 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1661 	if (in_ht)
1662 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1663 				       fnew->mask->filter_ht_params);
1664 errout_mask:
1665 	fl_mask_put(head, fnew->mask);
1666 errout:
1667 	__fl_put(fnew);
1668 errout_tb:
1669 	kfree(tb);
1670 errout_mask_alloc:
1671 	kfree(mask);
1672 errout_fold:
1673 	if (fold)
1674 		__fl_put(fold);
1675 	return err;
1676 }
1677 
1678 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1679 		     bool rtnl_held, struct netlink_ext_ack *extack)
1680 {
1681 	struct cls_fl_head *head = fl_head_dereference(tp);
1682 	struct cls_fl_filter *f = arg;
1683 	bool last_on_mask;
1684 	int err = 0;
1685 
1686 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1687 	*last = list_empty(&head->masks);
1688 	__fl_put(f);
1689 
1690 	return err;
1691 }
1692 
1693 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1694 		    bool rtnl_held)
1695 {
1696 	struct cls_fl_filter *f;
1697 
1698 	arg->count = arg->skip;
1699 
1700 	while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
1701 		if (arg->fn(tp, f, arg) < 0) {
1702 			__fl_put(f);
1703 			arg->stop = 1;
1704 			break;
1705 		}
1706 		__fl_put(f);
1707 		arg->cookie++;
1708 		arg->count++;
1709 	}
1710 }
1711 
1712 static struct cls_fl_filter *
1713 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
1714 {
1715 	struct cls_fl_head *head = fl_head_dereference(tp);
1716 
1717 	spin_lock(&tp->lock);
1718 	if (list_empty(&head->hw_filters)) {
1719 		spin_unlock(&tp->lock);
1720 		return NULL;
1721 	}
1722 
1723 	if (!f)
1724 		f = list_entry(&head->hw_filters, struct cls_fl_filter,
1725 			       hw_list);
1726 	list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
1727 		if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
1728 			spin_unlock(&tp->lock);
1729 			return f;
1730 		}
1731 	}
1732 
1733 	spin_unlock(&tp->lock);
1734 	return NULL;
1735 }
1736 
1737 static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1738 			void *cb_priv, struct netlink_ext_ack *extack)
1739 {
1740 	struct tc_cls_flower_offload cls_flower = {};
1741 	struct tcf_block *block = tp->chain->block;
1742 	struct cls_fl_filter *f = NULL;
1743 	int err;
1744 
1745 	/* hw_filters list can only be changed by hw offload functions after
1746 	 * obtaining rtnl lock. Make sure it is not changed while reoffload is
1747 	 * iterating it.
1748 	 */
1749 	ASSERT_RTNL();
1750 
1751 	while ((f = fl_get_next_hw_filter(tp, f, add))) {
1752 		cls_flower.rule =
1753 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1754 		if (!cls_flower.rule) {
1755 			__fl_put(f);
1756 			return -ENOMEM;
1757 		}
1758 
1759 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1760 					   extack);
1761 		cls_flower.command = add ?
1762 			TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1763 		cls_flower.cookie = (unsigned long)f;
1764 		cls_flower.rule->match.dissector = &f->mask->dissector;
1765 		cls_flower.rule->match.mask = &f->mask->key;
1766 		cls_flower.rule->match.key = &f->mkey;
1767 
1768 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1769 		if (err) {
1770 			kfree(cls_flower.rule);
1771 			if (tc_skip_sw(f->flags)) {
1772 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1773 				__fl_put(f);
1774 				return err;
1775 			}
1776 			goto next_flow;
1777 		}
1778 
1779 		cls_flower.classid = f->res.classid;
1780 
1781 		err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1782 		kfree(cls_flower.rule);
1783 
1784 		if (err) {
1785 			if (add && tc_skip_sw(f->flags)) {
1786 				__fl_put(f);
1787 				return err;
1788 			}
1789 			goto next_flow;
1790 		}
1791 
1792 		spin_lock(&tp->lock);
1793 		tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1794 					  add);
1795 		spin_unlock(&tp->lock);
1796 next_flow:
1797 		__fl_put(f);
1798 	}
1799 
1800 	return 0;
1801 }
1802 
1803 static int fl_hw_create_tmplt(struct tcf_chain *chain,
1804 			      struct fl_flow_tmplt *tmplt)
1805 {
1806 	struct tc_cls_flower_offload cls_flower = {};
1807 	struct tcf_block *block = chain->block;
1808 
1809 	cls_flower.rule = flow_rule_alloc(0);
1810 	if (!cls_flower.rule)
1811 		return -ENOMEM;
1812 
1813 	cls_flower.common.chain_index = chain->index;
1814 	cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1815 	cls_flower.cookie = (unsigned long) tmplt;
1816 	cls_flower.rule->match.dissector = &tmplt->dissector;
1817 	cls_flower.rule->match.mask = &tmplt->mask;
1818 	cls_flower.rule->match.key = &tmplt->dummy_key;
1819 
1820 	/* We don't care if driver (any of them) fails to handle this
1821 	 * call. It serves just as a hint for it.
1822 	 */
1823 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
1824 	kfree(cls_flower.rule);
1825 
1826 	return 0;
1827 }
1828 
1829 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1830 				struct fl_flow_tmplt *tmplt)
1831 {
1832 	struct tc_cls_flower_offload cls_flower = {};
1833 	struct tcf_block *block = chain->block;
1834 
1835 	cls_flower.common.chain_index = chain->index;
1836 	cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1837 	cls_flower.cookie = (unsigned long) tmplt;
1838 
1839 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
1840 }
1841 
1842 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1843 			     struct nlattr **tca,
1844 			     struct netlink_ext_ack *extack)
1845 {
1846 	struct fl_flow_tmplt *tmplt;
1847 	struct nlattr **tb;
1848 	int err;
1849 
1850 	if (!tca[TCA_OPTIONS])
1851 		return ERR_PTR(-EINVAL);
1852 
1853 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1854 	if (!tb)
1855 		return ERR_PTR(-ENOBUFS);
1856 	err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
1857 					  tca[TCA_OPTIONS], fl_policy, NULL);
1858 	if (err)
1859 		goto errout_tb;
1860 
1861 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1862 	if (!tmplt) {
1863 		err = -ENOMEM;
1864 		goto errout_tb;
1865 	}
1866 	tmplt->chain = chain;
1867 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1868 	if (err)
1869 		goto errout_tmplt;
1870 
1871 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1872 
1873 	err = fl_hw_create_tmplt(chain, tmplt);
1874 	if (err)
1875 		goto errout_tmplt;
1876 
1877 	kfree(tb);
1878 	return tmplt;
1879 
1880 errout_tmplt:
1881 	kfree(tmplt);
1882 errout_tb:
1883 	kfree(tb);
1884 	return ERR_PTR(err);
1885 }
1886 
1887 static void fl_tmplt_destroy(void *tmplt_priv)
1888 {
1889 	struct fl_flow_tmplt *tmplt = tmplt_priv;
1890 
1891 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1892 	kfree(tmplt);
1893 }
1894 
1895 static int fl_dump_key_val(struct sk_buff *skb,
1896 			   void *val, int val_type,
1897 			   void *mask, int mask_type, int len)
1898 {
1899 	int err;
1900 
1901 	if (!memchr_inv(mask, 0, len))
1902 		return 0;
1903 	err = nla_put(skb, val_type, len, val);
1904 	if (err)
1905 		return err;
1906 	if (mask_type != TCA_FLOWER_UNSPEC) {
1907 		err = nla_put(skb, mask_type, len, mask);
1908 		if (err)
1909 			return err;
1910 	}
1911 	return 0;
1912 }
1913 
1914 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1915 				  struct fl_flow_key *mask)
1916 {
1917 	if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1918 			    &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1919 			    sizeof(key->tp_min.dst)) ||
1920 	    fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1921 			    &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1922 			    sizeof(key->tp_max.dst)) ||
1923 	    fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1924 			    &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1925 			    sizeof(key->tp_min.src)) ||
1926 	    fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1927 			    &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1928 			    sizeof(key->tp_max.src)))
1929 		return -1;
1930 
1931 	return 0;
1932 }
1933 
1934 static int fl_dump_key_mpls(struct sk_buff *skb,
1935 			    struct flow_dissector_key_mpls *mpls_key,
1936 			    struct flow_dissector_key_mpls *mpls_mask)
1937 {
1938 	int err;
1939 
1940 	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1941 		return 0;
1942 	if (mpls_mask->mpls_ttl) {
1943 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1944 				 mpls_key->mpls_ttl);
1945 		if (err)
1946 			return err;
1947 	}
1948 	if (mpls_mask->mpls_tc) {
1949 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1950 				 mpls_key->mpls_tc);
1951 		if (err)
1952 			return err;
1953 	}
1954 	if (mpls_mask->mpls_label) {
1955 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1956 				  mpls_key->mpls_label);
1957 		if (err)
1958 			return err;
1959 	}
1960 	if (mpls_mask->mpls_bos) {
1961 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1962 				 mpls_key->mpls_bos);
1963 		if (err)
1964 			return err;
1965 	}
1966 	return 0;
1967 }
1968 
1969 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
1970 			  struct flow_dissector_key_ip *key,
1971 			  struct flow_dissector_key_ip *mask)
1972 {
1973 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1974 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1975 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1976 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1977 
1978 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1979 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
1980 		return -1;
1981 
1982 	return 0;
1983 }
1984 
1985 static int fl_dump_key_vlan(struct sk_buff *skb,
1986 			    int vlan_id_key, int vlan_prio_key,
1987 			    struct flow_dissector_key_vlan *vlan_key,
1988 			    struct flow_dissector_key_vlan *vlan_mask)
1989 {
1990 	int err;
1991 
1992 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1993 		return 0;
1994 	if (vlan_mask->vlan_id) {
1995 		err = nla_put_u16(skb, vlan_id_key,
1996 				  vlan_key->vlan_id);
1997 		if (err)
1998 			return err;
1999 	}
2000 	if (vlan_mask->vlan_priority) {
2001 		err = nla_put_u8(skb, vlan_prio_key,
2002 				 vlan_key->vlan_priority);
2003 		if (err)
2004 			return err;
2005 	}
2006 	return 0;
2007 }
2008 
2009 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2010 			    u32 *flower_key, u32 *flower_mask,
2011 			    u32 flower_flag_bit, u32 dissector_flag_bit)
2012 {
2013 	if (dissector_mask & dissector_flag_bit) {
2014 		*flower_mask |= flower_flag_bit;
2015 		if (dissector_key & dissector_flag_bit)
2016 			*flower_key |= flower_flag_bit;
2017 	}
2018 }
2019 
2020 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2021 {
2022 	u32 key, mask;
2023 	__be32 _key, _mask;
2024 	int err;
2025 
2026 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2027 		return 0;
2028 
2029 	key = 0;
2030 	mask = 0;
2031 
2032 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2033 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2034 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2035 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2036 			FLOW_DIS_FIRST_FRAG);
2037 
2038 	_key = cpu_to_be32(key);
2039 	_mask = cpu_to_be32(mask);
2040 
2041 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2042 	if (err)
2043 		return err;
2044 
2045 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2046 }
2047 
2048 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2049 				  struct flow_dissector_key_enc_opts *enc_opts)
2050 {
2051 	struct geneve_opt *opt;
2052 	struct nlattr *nest;
2053 	int opt_off = 0;
2054 
2055 	nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2056 	if (!nest)
2057 		goto nla_put_failure;
2058 
2059 	while (enc_opts->len > opt_off) {
2060 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2061 
2062 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2063 				 opt->opt_class))
2064 			goto nla_put_failure;
2065 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2066 			       opt->type))
2067 			goto nla_put_failure;
2068 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2069 			    opt->length * 4, opt->opt_data))
2070 			goto nla_put_failure;
2071 
2072 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2073 	}
2074 	nla_nest_end(skb, nest);
2075 	return 0;
2076 
2077 nla_put_failure:
2078 	nla_nest_cancel(skb, nest);
2079 	return -EMSGSIZE;
2080 }
2081 
2082 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2083 			       struct flow_dissector_key_enc_opts *enc_opts)
2084 {
2085 	struct nlattr *nest;
2086 	int err;
2087 
2088 	if (!enc_opts->len)
2089 		return 0;
2090 
2091 	nest = nla_nest_start_noflag(skb, enc_opt_type);
2092 	if (!nest)
2093 		goto nla_put_failure;
2094 
2095 	switch (enc_opts->dst_opt_type) {
2096 	case TUNNEL_GENEVE_OPT:
2097 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2098 		if (err)
2099 			goto nla_put_failure;
2100 		break;
2101 	default:
2102 		goto nla_put_failure;
2103 	}
2104 	nla_nest_end(skb, nest);
2105 	return 0;
2106 
2107 nla_put_failure:
2108 	nla_nest_cancel(skb, nest);
2109 	return -EMSGSIZE;
2110 }
2111 
2112 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2113 			       struct flow_dissector_key_enc_opts *key_opts,
2114 			       struct flow_dissector_key_enc_opts *msk_opts)
2115 {
2116 	int err;
2117 
2118 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2119 	if (err)
2120 		return err;
2121 
2122 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2123 }
2124 
2125 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2126 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2127 {
2128 	if (mask->indev_ifindex) {
2129 		struct net_device *dev;
2130 
2131 		dev = __dev_get_by_index(net, key->indev_ifindex);
2132 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2133 			goto nla_put_failure;
2134 	}
2135 
2136 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2137 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2138 			    sizeof(key->eth.dst)) ||
2139 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2140 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2141 			    sizeof(key->eth.src)) ||
2142 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2143 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2144 			    sizeof(key->basic.n_proto)))
2145 		goto nla_put_failure;
2146 
2147 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2148 		goto nla_put_failure;
2149 
2150 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2151 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2152 		goto nla_put_failure;
2153 
2154 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2155 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2156 			     &key->cvlan, &mask->cvlan) ||
2157 	    (mask->cvlan.vlan_tpid &&
2158 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2159 			  key->cvlan.vlan_tpid)))
2160 		goto nla_put_failure;
2161 
2162 	if (mask->basic.n_proto) {
2163 		if (mask->cvlan.vlan_tpid) {
2164 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2165 					 key->basic.n_proto))
2166 				goto nla_put_failure;
2167 		} else if (mask->vlan.vlan_tpid) {
2168 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2169 					 key->basic.n_proto))
2170 				goto nla_put_failure;
2171 		}
2172 	}
2173 
2174 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2175 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2176 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2177 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2178 			    sizeof(key->basic.ip_proto)) ||
2179 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2180 		goto nla_put_failure;
2181 
2182 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2183 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2184 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2185 			     sizeof(key->ipv4.src)) ||
2186 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2187 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2188 			     sizeof(key->ipv4.dst))))
2189 		goto nla_put_failure;
2190 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2191 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2192 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2193 				  sizeof(key->ipv6.src)) ||
2194 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2195 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2196 				  sizeof(key->ipv6.dst))))
2197 		goto nla_put_failure;
2198 
2199 	if (key->basic.ip_proto == IPPROTO_TCP &&
2200 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2201 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2202 			     sizeof(key->tp.src)) ||
2203 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2204 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2205 			     sizeof(key->tp.dst)) ||
2206 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2207 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2208 			     sizeof(key->tcp.flags))))
2209 		goto nla_put_failure;
2210 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2211 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2212 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2213 				  sizeof(key->tp.src)) ||
2214 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2215 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2216 				  sizeof(key->tp.dst))))
2217 		goto nla_put_failure;
2218 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2219 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2220 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2221 				  sizeof(key->tp.src)) ||
2222 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2223 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2224 				  sizeof(key->tp.dst))))
2225 		goto nla_put_failure;
2226 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2227 		 key->basic.ip_proto == IPPROTO_ICMP &&
2228 		 (fl_dump_key_val(skb, &key->icmp.type,
2229 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2230 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2231 				  sizeof(key->icmp.type)) ||
2232 		  fl_dump_key_val(skb, &key->icmp.code,
2233 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2234 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2235 				  sizeof(key->icmp.code))))
2236 		goto nla_put_failure;
2237 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2238 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2239 		 (fl_dump_key_val(skb, &key->icmp.type,
2240 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2241 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2242 				  sizeof(key->icmp.type)) ||
2243 		  fl_dump_key_val(skb, &key->icmp.code,
2244 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2245 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2246 				  sizeof(key->icmp.code))))
2247 		goto nla_put_failure;
2248 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2249 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2250 		 (fl_dump_key_val(skb, &key->arp.sip,
2251 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2252 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2253 				  sizeof(key->arp.sip)) ||
2254 		  fl_dump_key_val(skb, &key->arp.tip,
2255 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2256 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2257 				  sizeof(key->arp.tip)) ||
2258 		  fl_dump_key_val(skb, &key->arp.op,
2259 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2260 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2261 				  sizeof(key->arp.op)) ||
2262 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2263 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2264 				  sizeof(key->arp.sha)) ||
2265 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2266 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2267 				  sizeof(key->arp.tha))))
2268 		goto nla_put_failure;
2269 
2270 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2271 	     key->basic.ip_proto == IPPROTO_UDP ||
2272 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2273 	     fl_dump_key_port_range(skb, key, mask))
2274 		goto nla_put_failure;
2275 
2276 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2277 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2278 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2279 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2280 			    sizeof(key->enc_ipv4.src)) ||
2281 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2282 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2283 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2284 			     sizeof(key->enc_ipv4.dst))))
2285 		goto nla_put_failure;
2286 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2287 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2288 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2289 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2290 			    sizeof(key->enc_ipv6.src)) ||
2291 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2292 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2293 				 &mask->enc_ipv6.dst,
2294 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2295 			    sizeof(key->enc_ipv6.dst))))
2296 		goto nla_put_failure;
2297 
2298 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2299 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2300 			    sizeof(key->enc_key_id)) ||
2301 	    fl_dump_key_val(skb, &key->enc_tp.src,
2302 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2303 			    &mask->enc_tp.src,
2304 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2305 			    sizeof(key->enc_tp.src)) ||
2306 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2307 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2308 			    &mask->enc_tp.dst,
2309 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2310 			    sizeof(key->enc_tp.dst)) ||
2311 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2312 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2313 		goto nla_put_failure;
2314 
2315 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2316 		goto nla_put_failure;
2317 
2318 	return 0;
2319 
2320 nla_put_failure:
2321 	return -EMSGSIZE;
2322 }
2323 
2324 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2325 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2326 {
2327 	struct cls_fl_filter *f = fh;
2328 	struct nlattr *nest;
2329 	struct fl_flow_key *key, *mask;
2330 	bool skip_hw;
2331 
2332 	if (!f)
2333 		return skb->len;
2334 
2335 	t->tcm_handle = f->handle;
2336 
2337 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2338 	if (!nest)
2339 		goto nla_put_failure;
2340 
2341 	spin_lock(&tp->lock);
2342 
2343 	if (f->res.classid &&
2344 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2345 		goto nla_put_failure_locked;
2346 
2347 	key = &f->key;
2348 	mask = &f->mask->key;
2349 	skip_hw = tc_skip_hw(f->flags);
2350 
2351 	if (fl_dump_key(skb, net, key, mask))
2352 		goto nla_put_failure_locked;
2353 
2354 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2355 		goto nla_put_failure_locked;
2356 
2357 	spin_unlock(&tp->lock);
2358 
2359 	if (!skip_hw)
2360 		fl_hw_update_stats(tp, f, rtnl_held);
2361 
2362 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2363 		goto nla_put_failure;
2364 
2365 	if (tcf_exts_dump(skb, &f->exts))
2366 		goto nla_put_failure;
2367 
2368 	nla_nest_end(skb, nest);
2369 
2370 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2371 		goto nla_put_failure;
2372 
2373 	return skb->len;
2374 
2375 nla_put_failure_locked:
2376 	spin_unlock(&tp->lock);
2377 nla_put_failure:
2378 	nla_nest_cancel(skb, nest);
2379 	return -1;
2380 }
2381 
2382 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2383 {
2384 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2385 	struct fl_flow_key *key, *mask;
2386 	struct nlattr *nest;
2387 
2388 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
2389 	if (!nest)
2390 		goto nla_put_failure;
2391 
2392 	key = &tmplt->dummy_key;
2393 	mask = &tmplt->mask;
2394 
2395 	if (fl_dump_key(skb, net, key, mask))
2396 		goto nla_put_failure;
2397 
2398 	nla_nest_end(skb, nest);
2399 
2400 	return skb->len;
2401 
2402 nla_put_failure:
2403 	nla_nest_cancel(skb, nest);
2404 	return -EMSGSIZE;
2405 }
2406 
2407 static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2408 {
2409 	struct cls_fl_filter *f = fh;
2410 
2411 	if (f && f->res.classid == classid)
2412 		f->res.class = cl;
2413 }
2414 
2415 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2416 	.kind		= "flower",
2417 	.classify	= fl_classify,
2418 	.init		= fl_init,
2419 	.destroy	= fl_destroy,
2420 	.get		= fl_get,
2421 	.put		= fl_put,
2422 	.change		= fl_change,
2423 	.delete		= fl_delete,
2424 	.walk		= fl_walk,
2425 	.reoffload	= fl_reoffload,
2426 	.dump		= fl_dump,
2427 	.bind_class	= fl_bind_class,
2428 	.tmplt_create	= fl_tmplt_create,
2429 	.tmplt_destroy	= fl_tmplt_destroy,
2430 	.tmplt_dump	= fl_tmplt_dump,
2431 	.owner		= THIS_MODULE,
2432 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
2433 };
2434 
2435 static int __init cls_fl_init(void)
2436 {
2437 	return register_tcf_proto_ops(&cls_fl_ops);
2438 }
2439 
2440 static void __exit cls_fl_exit(void)
2441 {
2442 	unregister_tcf_proto_ops(&cls_fl_ops);
2443 }
2444 
2445 module_init(cls_fl_init);
2446 module_exit(cls_fl_exit);
2447 
2448 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2449 MODULE_DESCRIPTION("Flower classifier");
2450 MODULE_LICENSE("GPL v2");
2451