xref: /openbmc/linux/net/sched/cls_flower.c (revision 0c7beb2d)
1 /*
2  * net/sched/cls_flower.c		Flower classifier
3  *
4  * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/rhashtable.h>
16 #include <linux/workqueue.h>
17 #include <linux/refcount.h>
18 
19 #include <linux/if_ether.h>
20 #include <linux/in6.h>
21 #include <linux/ip.h>
22 #include <linux/mpls.h>
23 
24 #include <net/sch_generic.h>
25 #include <net/pkt_cls.h>
26 #include <net/ip.h>
27 #include <net/flow_dissector.h>
28 #include <net/geneve.h>
29 
30 #include <net/dst.h>
31 #include <net/dst_metadata.h>
32 
33 struct fl_flow_key {
34 	int	indev_ifindex;
35 	struct flow_dissector_key_control control;
36 	struct flow_dissector_key_control enc_control;
37 	struct flow_dissector_key_basic basic;
38 	struct flow_dissector_key_eth_addrs eth;
39 	struct flow_dissector_key_vlan vlan;
40 	struct flow_dissector_key_vlan cvlan;
41 	union {
42 		struct flow_dissector_key_ipv4_addrs ipv4;
43 		struct flow_dissector_key_ipv6_addrs ipv6;
44 	};
45 	struct flow_dissector_key_ports tp;
46 	struct flow_dissector_key_icmp icmp;
47 	struct flow_dissector_key_arp arp;
48 	struct flow_dissector_key_keyid enc_key_id;
49 	union {
50 		struct flow_dissector_key_ipv4_addrs enc_ipv4;
51 		struct flow_dissector_key_ipv6_addrs enc_ipv6;
52 	};
53 	struct flow_dissector_key_ports enc_tp;
54 	struct flow_dissector_key_mpls mpls;
55 	struct flow_dissector_key_tcp tcp;
56 	struct flow_dissector_key_ip ip;
57 	struct flow_dissector_key_ip enc_ip;
58 	struct flow_dissector_key_enc_opts enc_opts;
59 	struct flow_dissector_key_ports tp_min;
60 	struct flow_dissector_key_ports tp_max;
61 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
62 
63 struct fl_flow_mask_range {
64 	unsigned short int start;
65 	unsigned short int end;
66 };
67 
68 struct fl_flow_mask {
69 	struct fl_flow_key key;
70 	struct fl_flow_mask_range range;
71 	u32 flags;
72 	struct rhash_head ht_node;
73 	struct rhashtable ht;
74 	struct rhashtable_params filter_ht_params;
75 	struct flow_dissector dissector;
76 	struct list_head filters;
77 	struct rcu_work rwork;
78 	struct list_head list;
79 	refcount_t refcnt;
80 };
81 
82 struct fl_flow_tmplt {
83 	struct fl_flow_key dummy_key;
84 	struct fl_flow_key mask;
85 	struct flow_dissector dissector;
86 	struct tcf_chain *chain;
87 };
88 
89 struct cls_fl_head {
90 	struct rhashtable ht;
91 	spinlock_t masks_lock; /* Protect masks list */
92 	struct list_head masks;
93 	struct rcu_work rwork;
94 	struct idr handle_idr;
95 };
96 
97 struct cls_fl_filter {
98 	struct fl_flow_mask *mask;
99 	struct rhash_head ht_node;
100 	struct fl_flow_key mkey;
101 	struct tcf_exts exts;
102 	struct tcf_result res;
103 	struct fl_flow_key key;
104 	struct list_head list;
105 	u32 handle;
106 	u32 flags;
107 	u32 in_hw_count;
108 	struct rcu_work rwork;
109 	struct net_device *hw_dev;
110 	/* Flower classifier is unlocked, which means that its reference counter
111 	 * can be changed concurrently without any kind of external
112 	 * synchronization. Use atomic reference counter to be concurrency-safe.
113 	 */
114 	refcount_t refcnt;
115 	bool deleted;
116 };
117 
118 static const struct rhashtable_params mask_ht_params = {
119 	.key_offset = offsetof(struct fl_flow_mask, key),
120 	.key_len = sizeof(struct fl_flow_key),
121 	.head_offset = offsetof(struct fl_flow_mask, ht_node),
122 	.automatic_shrinking = true,
123 };
124 
125 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
126 {
127 	return mask->range.end - mask->range.start;
128 }
129 
130 static void fl_mask_update_range(struct fl_flow_mask *mask)
131 {
132 	const u8 *bytes = (const u8 *) &mask->key;
133 	size_t size = sizeof(mask->key);
134 	size_t i, first = 0, last;
135 
136 	for (i = 0; i < size; i++) {
137 		if (bytes[i]) {
138 			first = i;
139 			break;
140 		}
141 	}
142 	last = first;
143 	for (i = size - 1; i != first; i--) {
144 		if (bytes[i]) {
145 			last = i;
146 			break;
147 		}
148 	}
149 	mask->range.start = rounddown(first, sizeof(long));
150 	mask->range.end = roundup(last + 1, sizeof(long));
151 }
152 
153 static void *fl_key_get_start(struct fl_flow_key *key,
154 			      const struct fl_flow_mask *mask)
155 {
156 	return (u8 *) key + mask->range.start;
157 }
158 
159 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
160 			      struct fl_flow_mask *mask)
161 {
162 	const long *lkey = fl_key_get_start(key, mask);
163 	const long *lmask = fl_key_get_start(&mask->key, mask);
164 	long *lmkey = fl_key_get_start(mkey, mask);
165 	int i;
166 
167 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
168 		*lmkey++ = *lkey++ & *lmask++;
169 }
170 
171 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
172 			       struct fl_flow_mask *mask)
173 {
174 	const long *lmask = fl_key_get_start(&mask->key, mask);
175 	const long *ltmplt;
176 	int i;
177 
178 	if (!tmplt)
179 		return true;
180 	ltmplt = fl_key_get_start(&tmplt->mask, mask);
181 	for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
182 		if (~*ltmplt++ & *lmask++)
183 			return false;
184 	}
185 	return true;
186 }
187 
188 static void fl_clear_masked_range(struct fl_flow_key *key,
189 				  struct fl_flow_mask *mask)
190 {
191 	memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
192 }
193 
194 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
195 				  struct fl_flow_key *key,
196 				  struct fl_flow_key *mkey)
197 {
198 	__be16 min_mask, max_mask, min_val, max_val;
199 
200 	min_mask = htons(filter->mask->key.tp_min.dst);
201 	max_mask = htons(filter->mask->key.tp_max.dst);
202 	min_val = htons(filter->key.tp_min.dst);
203 	max_val = htons(filter->key.tp_max.dst);
204 
205 	if (min_mask && max_mask) {
206 		if (htons(key->tp.dst) < min_val ||
207 		    htons(key->tp.dst) > max_val)
208 			return false;
209 
210 		/* skb does not have min and max values */
211 		mkey->tp_min.dst = filter->mkey.tp_min.dst;
212 		mkey->tp_max.dst = filter->mkey.tp_max.dst;
213 	}
214 	return true;
215 }
216 
217 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
218 				  struct fl_flow_key *key,
219 				  struct fl_flow_key *mkey)
220 {
221 	__be16 min_mask, max_mask, min_val, max_val;
222 
223 	min_mask = htons(filter->mask->key.tp_min.src);
224 	max_mask = htons(filter->mask->key.tp_max.src);
225 	min_val = htons(filter->key.tp_min.src);
226 	max_val = htons(filter->key.tp_max.src);
227 
228 	if (min_mask && max_mask) {
229 		if (htons(key->tp.src) < min_val ||
230 		    htons(key->tp.src) > max_val)
231 			return false;
232 
233 		/* skb does not have min and max values */
234 		mkey->tp_min.src = filter->mkey.tp_min.src;
235 		mkey->tp_max.src = filter->mkey.tp_max.src;
236 	}
237 	return true;
238 }
239 
240 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
241 					 struct fl_flow_key *mkey)
242 {
243 	return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
244 				      mask->filter_ht_params);
245 }
246 
247 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
248 					     struct fl_flow_key *mkey,
249 					     struct fl_flow_key *key)
250 {
251 	struct cls_fl_filter *filter, *f;
252 
253 	list_for_each_entry_rcu(filter, &mask->filters, list) {
254 		if (!fl_range_port_dst_cmp(filter, key, mkey))
255 			continue;
256 
257 		if (!fl_range_port_src_cmp(filter, key, mkey))
258 			continue;
259 
260 		f = __fl_lookup(mask, mkey);
261 		if (f)
262 			return f;
263 	}
264 	return NULL;
265 }
266 
267 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
268 				       struct fl_flow_key *mkey,
269 				       struct fl_flow_key *key)
270 {
271 	if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
272 		return fl_lookup_range(mask, mkey, key);
273 
274 	return __fl_lookup(mask, mkey);
275 }
276 
277 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
278 		       struct tcf_result *res)
279 {
280 	struct cls_fl_head *head = rcu_dereference_bh(tp->root);
281 	struct cls_fl_filter *f;
282 	struct fl_flow_mask *mask;
283 	struct fl_flow_key skb_key;
284 	struct fl_flow_key skb_mkey;
285 
286 	list_for_each_entry_rcu(mask, &head->masks, list) {
287 		fl_clear_masked_range(&skb_key, mask);
288 
289 		skb_key.indev_ifindex = skb->skb_iif;
290 		/* skb_flow_dissect() does not set n_proto in case an unknown
291 		 * protocol, so do it rather here.
292 		 */
293 		skb_key.basic.n_proto = skb->protocol;
294 		skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
295 		skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
296 
297 		fl_set_masked_key(&skb_mkey, &skb_key, mask);
298 
299 		f = fl_lookup(mask, &skb_mkey, &skb_key);
300 		if (f && !tc_skip_sw(f->flags)) {
301 			*res = f->res;
302 			return tcf_exts_exec(skb, &f->exts, res);
303 		}
304 	}
305 	return -1;
306 }
307 
308 static int fl_init(struct tcf_proto *tp)
309 {
310 	struct cls_fl_head *head;
311 
312 	head = kzalloc(sizeof(*head), GFP_KERNEL);
313 	if (!head)
314 		return -ENOBUFS;
315 
316 	spin_lock_init(&head->masks_lock);
317 	INIT_LIST_HEAD_RCU(&head->masks);
318 	rcu_assign_pointer(tp->root, head);
319 	idr_init(&head->handle_idr);
320 
321 	return rhashtable_init(&head->ht, &mask_ht_params);
322 }
323 
324 static void fl_mask_free(struct fl_flow_mask *mask)
325 {
326 	WARN_ON(!list_empty(&mask->filters));
327 	rhashtable_destroy(&mask->ht);
328 	kfree(mask);
329 }
330 
331 static void fl_mask_free_work(struct work_struct *work)
332 {
333 	struct fl_flow_mask *mask = container_of(to_rcu_work(work),
334 						 struct fl_flow_mask, rwork);
335 
336 	fl_mask_free(mask);
337 }
338 
339 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
340 {
341 	if (!refcount_dec_and_test(&mask->refcnt))
342 		return false;
343 
344 	rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
345 
346 	spin_lock(&head->masks_lock);
347 	list_del_rcu(&mask->list);
348 	spin_unlock(&head->masks_lock);
349 
350 	tcf_queue_work(&mask->rwork, fl_mask_free_work);
351 
352 	return true;
353 }
354 
355 static void __fl_destroy_filter(struct cls_fl_filter *f)
356 {
357 	tcf_exts_destroy(&f->exts);
358 	tcf_exts_put_net(&f->exts);
359 	kfree(f);
360 }
361 
362 static void fl_destroy_filter_work(struct work_struct *work)
363 {
364 	struct cls_fl_filter *f = container_of(to_rcu_work(work),
365 					struct cls_fl_filter, rwork);
366 
367 	__fl_destroy_filter(f);
368 }
369 
370 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
371 				 bool rtnl_held, struct netlink_ext_ack *extack)
372 {
373 	struct tc_cls_flower_offload cls_flower = {};
374 	struct tcf_block *block = tp->chain->block;
375 
376 	if (!rtnl_held)
377 		rtnl_lock();
378 
379 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
380 	cls_flower.command = TC_CLSFLOWER_DESTROY;
381 	cls_flower.cookie = (unsigned long) f;
382 
383 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
384 	spin_lock(&tp->lock);
385 	tcf_block_offload_dec(block, &f->flags);
386 	spin_unlock(&tp->lock);
387 
388 	if (!rtnl_held)
389 		rtnl_unlock();
390 }
391 
392 static int fl_hw_replace_filter(struct tcf_proto *tp,
393 				struct cls_fl_filter *f, bool rtnl_held,
394 				struct netlink_ext_ack *extack)
395 {
396 	struct tc_cls_flower_offload cls_flower = {};
397 	struct tcf_block *block = tp->chain->block;
398 	bool skip_sw = tc_skip_sw(f->flags);
399 	int err = 0;
400 
401 	if (!rtnl_held)
402 		rtnl_lock();
403 
404 	cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
405 	if (!cls_flower.rule) {
406 		err = -ENOMEM;
407 		goto errout;
408 	}
409 
410 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
411 	cls_flower.command = TC_CLSFLOWER_REPLACE;
412 	cls_flower.cookie = (unsigned long) f;
413 	cls_flower.rule->match.dissector = &f->mask->dissector;
414 	cls_flower.rule->match.mask = &f->mask->key;
415 	cls_flower.rule->match.key = &f->mkey;
416 	cls_flower.classid = f->res.classid;
417 
418 	err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
419 	if (err) {
420 		kfree(cls_flower.rule);
421 		if (skip_sw)
422 			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
423 		else
424 			err = 0;
425 		goto errout;
426 	}
427 
428 	err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
429 	kfree(cls_flower.rule);
430 
431 	if (err < 0) {
432 		fl_hw_destroy_filter(tp, f, true, NULL);
433 		goto errout;
434 	} else if (err > 0) {
435 		f->in_hw_count = err;
436 		err = 0;
437 		spin_lock(&tp->lock);
438 		tcf_block_offload_inc(block, &f->flags);
439 		spin_unlock(&tp->lock);
440 	}
441 
442 	if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) {
443 		err = -EINVAL;
444 		goto errout;
445 	}
446 
447 errout:
448 	if (!rtnl_held)
449 		rtnl_unlock();
450 
451 	return err;
452 }
453 
454 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
455 			       bool rtnl_held)
456 {
457 	struct tc_cls_flower_offload cls_flower = {};
458 	struct tcf_block *block = tp->chain->block;
459 
460 	if (!rtnl_held)
461 		rtnl_lock();
462 
463 	tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
464 	cls_flower.command = TC_CLSFLOWER_STATS;
465 	cls_flower.cookie = (unsigned long) f;
466 	cls_flower.classid = f->res.classid;
467 
468 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
469 
470 	tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
471 			      cls_flower.stats.pkts,
472 			      cls_flower.stats.lastused);
473 
474 	if (!rtnl_held)
475 		rtnl_unlock();
476 }
477 
478 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
479 {
480 	/* Flower classifier only changes root pointer during init and destroy.
481 	 * Users must obtain reference to tcf_proto instance before calling its
482 	 * API, so tp->root pointer is protected from concurrent call to
483 	 * fl_destroy() by reference counting.
484 	 */
485 	return rcu_dereference_raw(tp->root);
486 }
487 
488 static void __fl_put(struct cls_fl_filter *f)
489 {
490 	if (!refcount_dec_and_test(&f->refcnt))
491 		return;
492 
493 	WARN_ON(!f->deleted);
494 
495 	if (tcf_exts_get_net(&f->exts))
496 		tcf_queue_work(&f->rwork, fl_destroy_filter_work);
497 	else
498 		__fl_destroy_filter(f);
499 }
500 
501 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
502 {
503 	struct cls_fl_filter *f;
504 
505 	rcu_read_lock();
506 	f = idr_find(&head->handle_idr, handle);
507 	if (f && !refcount_inc_not_zero(&f->refcnt))
508 		f = NULL;
509 	rcu_read_unlock();
510 
511 	return f;
512 }
513 
514 static struct cls_fl_filter *fl_get_next_filter(struct tcf_proto *tp,
515 						unsigned long *handle)
516 {
517 	struct cls_fl_head *head = fl_head_dereference(tp);
518 	struct cls_fl_filter *f;
519 
520 	rcu_read_lock();
521 	while ((f = idr_get_next_ul(&head->handle_idr, handle))) {
522 		/* don't return filters that are being deleted */
523 		if (refcount_inc_not_zero(&f->refcnt))
524 			break;
525 		++(*handle);
526 	}
527 	rcu_read_unlock();
528 
529 	return f;
530 }
531 
532 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
533 		       bool *last, bool rtnl_held,
534 		       struct netlink_ext_ack *extack)
535 {
536 	struct cls_fl_head *head = fl_head_dereference(tp);
537 
538 	*last = false;
539 
540 	spin_lock(&tp->lock);
541 	if (f->deleted) {
542 		spin_unlock(&tp->lock);
543 		return -ENOENT;
544 	}
545 
546 	f->deleted = true;
547 	rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
548 			       f->mask->filter_ht_params);
549 	idr_remove(&head->handle_idr, f->handle);
550 	list_del_rcu(&f->list);
551 	spin_unlock(&tp->lock);
552 
553 	*last = fl_mask_put(head, f->mask);
554 	if (!tc_skip_hw(f->flags))
555 		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
556 	tcf_unbind_filter(tp, &f->res);
557 	__fl_put(f);
558 
559 	return 0;
560 }
561 
562 static void fl_destroy_sleepable(struct work_struct *work)
563 {
564 	struct cls_fl_head *head = container_of(to_rcu_work(work),
565 						struct cls_fl_head,
566 						rwork);
567 
568 	rhashtable_destroy(&head->ht);
569 	kfree(head);
570 	module_put(THIS_MODULE);
571 }
572 
573 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
574 		       struct netlink_ext_ack *extack)
575 {
576 	struct cls_fl_head *head = fl_head_dereference(tp);
577 	struct fl_flow_mask *mask, *next_mask;
578 	struct cls_fl_filter *f, *next;
579 	bool last;
580 
581 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
582 		list_for_each_entry_safe(f, next, &mask->filters, list) {
583 			__fl_delete(tp, f, &last, rtnl_held, extack);
584 			if (last)
585 				break;
586 		}
587 	}
588 	idr_destroy(&head->handle_idr);
589 
590 	__module_get(THIS_MODULE);
591 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
592 }
593 
594 static void fl_put(struct tcf_proto *tp, void *arg)
595 {
596 	struct cls_fl_filter *f = arg;
597 
598 	__fl_put(f);
599 }
600 
601 static void *fl_get(struct tcf_proto *tp, u32 handle)
602 {
603 	struct cls_fl_head *head = fl_head_dereference(tp);
604 
605 	return __fl_get(head, handle);
606 }
607 
608 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
609 	[TCA_FLOWER_UNSPEC]		= { .type = NLA_UNSPEC },
610 	[TCA_FLOWER_CLASSID]		= { .type = NLA_U32 },
611 	[TCA_FLOWER_INDEV]		= { .type = NLA_STRING,
612 					    .len = IFNAMSIZ },
613 	[TCA_FLOWER_KEY_ETH_DST]	= { .len = ETH_ALEN },
614 	[TCA_FLOWER_KEY_ETH_DST_MASK]	= { .len = ETH_ALEN },
615 	[TCA_FLOWER_KEY_ETH_SRC]	= { .len = ETH_ALEN },
616 	[TCA_FLOWER_KEY_ETH_SRC_MASK]	= { .len = ETH_ALEN },
617 	[TCA_FLOWER_KEY_ETH_TYPE]	= { .type = NLA_U16 },
618 	[TCA_FLOWER_KEY_IP_PROTO]	= { .type = NLA_U8 },
619 	[TCA_FLOWER_KEY_IPV4_SRC]	= { .type = NLA_U32 },
620 	[TCA_FLOWER_KEY_IPV4_SRC_MASK]	= { .type = NLA_U32 },
621 	[TCA_FLOWER_KEY_IPV4_DST]	= { .type = NLA_U32 },
622 	[TCA_FLOWER_KEY_IPV4_DST_MASK]	= { .type = NLA_U32 },
623 	[TCA_FLOWER_KEY_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
624 	[TCA_FLOWER_KEY_IPV6_SRC_MASK]	= { .len = sizeof(struct in6_addr) },
625 	[TCA_FLOWER_KEY_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
626 	[TCA_FLOWER_KEY_IPV6_DST_MASK]	= { .len = sizeof(struct in6_addr) },
627 	[TCA_FLOWER_KEY_TCP_SRC]	= { .type = NLA_U16 },
628 	[TCA_FLOWER_KEY_TCP_DST]	= { .type = NLA_U16 },
629 	[TCA_FLOWER_KEY_UDP_SRC]	= { .type = NLA_U16 },
630 	[TCA_FLOWER_KEY_UDP_DST]	= { .type = NLA_U16 },
631 	[TCA_FLOWER_KEY_VLAN_ID]	= { .type = NLA_U16 },
632 	[TCA_FLOWER_KEY_VLAN_PRIO]	= { .type = NLA_U8 },
633 	[TCA_FLOWER_KEY_VLAN_ETH_TYPE]	= { .type = NLA_U16 },
634 	[TCA_FLOWER_KEY_ENC_KEY_ID]	= { .type = NLA_U32 },
635 	[TCA_FLOWER_KEY_ENC_IPV4_SRC]	= { .type = NLA_U32 },
636 	[TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
637 	[TCA_FLOWER_KEY_ENC_IPV4_DST]	= { .type = NLA_U32 },
638 	[TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
639 	[TCA_FLOWER_KEY_ENC_IPV6_SRC]	= { .len = sizeof(struct in6_addr) },
640 	[TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
641 	[TCA_FLOWER_KEY_ENC_IPV6_DST]	= { .len = sizeof(struct in6_addr) },
642 	[TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
643 	[TCA_FLOWER_KEY_TCP_SRC_MASK]	= { .type = NLA_U16 },
644 	[TCA_FLOWER_KEY_TCP_DST_MASK]	= { .type = NLA_U16 },
645 	[TCA_FLOWER_KEY_UDP_SRC_MASK]	= { .type = NLA_U16 },
646 	[TCA_FLOWER_KEY_UDP_DST_MASK]	= { .type = NLA_U16 },
647 	[TCA_FLOWER_KEY_SCTP_SRC_MASK]	= { .type = NLA_U16 },
648 	[TCA_FLOWER_KEY_SCTP_DST_MASK]	= { .type = NLA_U16 },
649 	[TCA_FLOWER_KEY_SCTP_SRC]	= { .type = NLA_U16 },
650 	[TCA_FLOWER_KEY_SCTP_DST]	= { .type = NLA_U16 },
651 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT]	= { .type = NLA_U16 },
652 	[TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK]	= { .type = NLA_U16 },
653 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT]	= { .type = NLA_U16 },
654 	[TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK]	= { .type = NLA_U16 },
655 	[TCA_FLOWER_KEY_FLAGS]		= { .type = NLA_U32 },
656 	[TCA_FLOWER_KEY_FLAGS_MASK]	= { .type = NLA_U32 },
657 	[TCA_FLOWER_KEY_ICMPV4_TYPE]	= { .type = NLA_U8 },
658 	[TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
659 	[TCA_FLOWER_KEY_ICMPV4_CODE]	= { .type = NLA_U8 },
660 	[TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
661 	[TCA_FLOWER_KEY_ICMPV6_TYPE]	= { .type = NLA_U8 },
662 	[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
663 	[TCA_FLOWER_KEY_ICMPV6_CODE]	= { .type = NLA_U8 },
664 	[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
665 	[TCA_FLOWER_KEY_ARP_SIP]	= { .type = NLA_U32 },
666 	[TCA_FLOWER_KEY_ARP_SIP_MASK]	= { .type = NLA_U32 },
667 	[TCA_FLOWER_KEY_ARP_TIP]	= { .type = NLA_U32 },
668 	[TCA_FLOWER_KEY_ARP_TIP_MASK]	= { .type = NLA_U32 },
669 	[TCA_FLOWER_KEY_ARP_OP]		= { .type = NLA_U8 },
670 	[TCA_FLOWER_KEY_ARP_OP_MASK]	= { .type = NLA_U8 },
671 	[TCA_FLOWER_KEY_ARP_SHA]	= { .len = ETH_ALEN },
672 	[TCA_FLOWER_KEY_ARP_SHA_MASK]	= { .len = ETH_ALEN },
673 	[TCA_FLOWER_KEY_ARP_THA]	= { .len = ETH_ALEN },
674 	[TCA_FLOWER_KEY_ARP_THA_MASK]	= { .len = ETH_ALEN },
675 	[TCA_FLOWER_KEY_MPLS_TTL]	= { .type = NLA_U8 },
676 	[TCA_FLOWER_KEY_MPLS_BOS]	= { .type = NLA_U8 },
677 	[TCA_FLOWER_KEY_MPLS_TC]	= { .type = NLA_U8 },
678 	[TCA_FLOWER_KEY_MPLS_LABEL]	= { .type = NLA_U32 },
679 	[TCA_FLOWER_KEY_TCP_FLAGS]	= { .type = NLA_U16 },
680 	[TCA_FLOWER_KEY_TCP_FLAGS_MASK]	= { .type = NLA_U16 },
681 	[TCA_FLOWER_KEY_IP_TOS]		= { .type = NLA_U8 },
682 	[TCA_FLOWER_KEY_IP_TOS_MASK]	= { .type = NLA_U8 },
683 	[TCA_FLOWER_KEY_IP_TTL]		= { .type = NLA_U8 },
684 	[TCA_FLOWER_KEY_IP_TTL_MASK]	= { .type = NLA_U8 },
685 	[TCA_FLOWER_KEY_CVLAN_ID]	= { .type = NLA_U16 },
686 	[TCA_FLOWER_KEY_CVLAN_PRIO]	= { .type = NLA_U8 },
687 	[TCA_FLOWER_KEY_CVLAN_ETH_TYPE]	= { .type = NLA_U16 },
688 	[TCA_FLOWER_KEY_ENC_IP_TOS]	= { .type = NLA_U8 },
689 	[TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
690 	[TCA_FLOWER_KEY_ENC_IP_TTL]	 = { .type = NLA_U8 },
691 	[TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
692 	[TCA_FLOWER_KEY_ENC_OPTS]	= { .type = NLA_NESTED },
693 	[TCA_FLOWER_KEY_ENC_OPTS_MASK]	= { .type = NLA_NESTED },
694 };
695 
696 static const struct nla_policy
697 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
698 	[TCA_FLOWER_KEY_ENC_OPTS_GENEVE]        = { .type = NLA_NESTED },
699 };
700 
701 static const struct nla_policy
702 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
703 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]      = { .type = NLA_U16 },
704 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]       = { .type = NLA_U8 },
705 	[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]       = { .type = NLA_BINARY,
706 						       .len = 128 },
707 };
708 
709 static void fl_set_key_val(struct nlattr **tb,
710 			   void *val, int val_type,
711 			   void *mask, int mask_type, int len)
712 {
713 	if (!tb[val_type])
714 		return;
715 	memcpy(val, nla_data(tb[val_type]), len);
716 	if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
717 		memset(mask, 0xff, len);
718 	else
719 		memcpy(mask, nla_data(tb[mask_type]), len);
720 }
721 
722 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
723 				 struct fl_flow_key *mask)
724 {
725 	fl_set_key_val(tb, &key->tp_min.dst,
726 		       TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
727 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
728 	fl_set_key_val(tb, &key->tp_max.dst,
729 		       TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
730 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
731 	fl_set_key_val(tb, &key->tp_min.src,
732 		       TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
733 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
734 	fl_set_key_val(tb, &key->tp_max.src,
735 		       TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
736 		       TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
737 
738 	if ((mask->tp_min.dst && mask->tp_max.dst &&
739 	     htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
740 	     (mask->tp_min.src && mask->tp_max.src &&
741 	      htons(key->tp_max.src) <= htons(key->tp_min.src)))
742 		return -EINVAL;
743 
744 	return 0;
745 }
746 
747 static int fl_set_key_mpls(struct nlattr **tb,
748 			   struct flow_dissector_key_mpls *key_val,
749 			   struct flow_dissector_key_mpls *key_mask)
750 {
751 	if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
752 		key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
753 		key_mask->mpls_ttl = MPLS_TTL_MASK;
754 	}
755 	if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
756 		u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
757 
758 		if (bos & ~MPLS_BOS_MASK)
759 			return -EINVAL;
760 		key_val->mpls_bos = bos;
761 		key_mask->mpls_bos = MPLS_BOS_MASK;
762 	}
763 	if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
764 		u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
765 
766 		if (tc & ~MPLS_TC_MASK)
767 			return -EINVAL;
768 		key_val->mpls_tc = tc;
769 		key_mask->mpls_tc = MPLS_TC_MASK;
770 	}
771 	if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
772 		u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
773 
774 		if (label & ~MPLS_LABEL_MASK)
775 			return -EINVAL;
776 		key_val->mpls_label = label;
777 		key_mask->mpls_label = MPLS_LABEL_MASK;
778 	}
779 	return 0;
780 }
781 
782 static void fl_set_key_vlan(struct nlattr **tb,
783 			    __be16 ethertype,
784 			    int vlan_id_key, int vlan_prio_key,
785 			    struct flow_dissector_key_vlan *key_val,
786 			    struct flow_dissector_key_vlan *key_mask)
787 {
788 #define VLAN_PRIORITY_MASK	0x7
789 
790 	if (tb[vlan_id_key]) {
791 		key_val->vlan_id =
792 			nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
793 		key_mask->vlan_id = VLAN_VID_MASK;
794 	}
795 	if (tb[vlan_prio_key]) {
796 		key_val->vlan_priority =
797 			nla_get_u8(tb[vlan_prio_key]) &
798 			VLAN_PRIORITY_MASK;
799 		key_mask->vlan_priority = VLAN_PRIORITY_MASK;
800 	}
801 	key_val->vlan_tpid = ethertype;
802 	key_mask->vlan_tpid = cpu_to_be16(~0);
803 }
804 
805 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
806 			    u32 *dissector_key, u32 *dissector_mask,
807 			    u32 flower_flag_bit, u32 dissector_flag_bit)
808 {
809 	if (flower_mask & flower_flag_bit) {
810 		*dissector_mask |= dissector_flag_bit;
811 		if (flower_key & flower_flag_bit)
812 			*dissector_key |= dissector_flag_bit;
813 	}
814 }
815 
816 static int fl_set_key_flags(struct nlattr **tb,
817 			    u32 *flags_key, u32 *flags_mask)
818 {
819 	u32 key, mask;
820 
821 	/* mask is mandatory for flags */
822 	if (!tb[TCA_FLOWER_KEY_FLAGS_MASK])
823 		return -EINVAL;
824 
825 	key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS]));
826 	mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
827 
828 	*flags_key  = 0;
829 	*flags_mask = 0;
830 
831 	fl_set_key_flag(key, mask, flags_key, flags_mask,
832 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
833 	fl_set_key_flag(key, mask, flags_key, flags_mask,
834 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
835 			FLOW_DIS_FIRST_FRAG);
836 
837 	return 0;
838 }
839 
840 static void fl_set_key_ip(struct nlattr **tb, bool encap,
841 			  struct flow_dissector_key_ip *key,
842 			  struct flow_dissector_key_ip *mask)
843 {
844 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
845 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
846 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
847 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
848 
849 	fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
850 	fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
851 }
852 
853 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
854 			     int depth, int option_len,
855 			     struct netlink_ext_ack *extack)
856 {
857 	struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
858 	struct nlattr *class = NULL, *type = NULL, *data = NULL;
859 	struct geneve_opt *opt;
860 	int err, data_len = 0;
861 
862 	if (option_len > sizeof(struct geneve_opt))
863 		data_len = option_len - sizeof(struct geneve_opt);
864 
865 	opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
866 	memset(opt, 0xff, option_len);
867 	opt->length = data_len / 4;
868 	opt->r1 = 0;
869 	opt->r2 = 0;
870 	opt->r3 = 0;
871 
872 	/* If no mask has been prodived we assume an exact match. */
873 	if (!depth)
874 		return sizeof(struct geneve_opt) + data_len;
875 
876 	if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
877 		NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
878 		return -EINVAL;
879 	}
880 
881 	err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
882 			       nla, geneve_opt_policy, extack);
883 	if (err < 0)
884 		return err;
885 
886 	/* We are not allowed to omit any of CLASS, TYPE or DATA
887 	 * fields from the key.
888 	 */
889 	if (!option_len &&
890 	    (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
891 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
892 	     !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
893 		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
894 		return -EINVAL;
895 	}
896 
897 	/* Omitting any of CLASS, TYPE or DATA fields is allowed
898 	 * for the mask.
899 	 */
900 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
901 		int new_len = key->enc_opts.len;
902 
903 		data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
904 		data_len = nla_len(data);
905 		if (data_len < 4) {
906 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
907 			return -ERANGE;
908 		}
909 		if (data_len % 4) {
910 			NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
911 			return -ERANGE;
912 		}
913 
914 		new_len += sizeof(struct geneve_opt) + data_len;
915 		BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
916 		if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
917 			NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
918 			return -ERANGE;
919 		}
920 		opt->length = data_len / 4;
921 		memcpy(opt->opt_data, nla_data(data), data_len);
922 	}
923 
924 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
925 		class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
926 		opt->opt_class = nla_get_be16(class);
927 	}
928 
929 	if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
930 		type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
931 		opt->type = nla_get_u8(type);
932 	}
933 
934 	return sizeof(struct geneve_opt) + data_len;
935 }
936 
937 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
938 			  struct fl_flow_key *mask,
939 			  struct netlink_ext_ack *extack)
940 {
941 	const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
942 	int err, option_len, key_depth, msk_depth = 0;
943 
944 	err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
945 				  TCA_FLOWER_KEY_ENC_OPTS_MAX,
946 				  enc_opts_policy, extack);
947 	if (err)
948 		return err;
949 
950 	nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
951 
952 	if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
953 		err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
954 					  TCA_FLOWER_KEY_ENC_OPTS_MAX,
955 					  enc_opts_policy, extack);
956 		if (err)
957 			return err;
958 
959 		nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
960 		msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
961 	}
962 
963 	nla_for_each_attr(nla_opt_key, nla_enc_key,
964 			  nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
965 		switch (nla_type(nla_opt_key)) {
966 		case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
967 			option_len = 0;
968 			key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
969 			option_len = fl_set_geneve_opt(nla_opt_key, key,
970 						       key_depth, option_len,
971 						       extack);
972 			if (option_len < 0)
973 				return option_len;
974 
975 			key->enc_opts.len += option_len;
976 			/* At the same time we need to parse through the mask
977 			 * in order to verify exact and mask attribute lengths.
978 			 */
979 			mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
980 			option_len = fl_set_geneve_opt(nla_opt_msk, mask,
981 						       msk_depth, option_len,
982 						       extack);
983 			if (option_len < 0)
984 				return option_len;
985 
986 			mask->enc_opts.len += option_len;
987 			if (key->enc_opts.len != mask->enc_opts.len) {
988 				NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
989 				return -EINVAL;
990 			}
991 
992 			if (msk_depth)
993 				nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
994 			break;
995 		default:
996 			NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
997 			return -EINVAL;
998 		}
999 	}
1000 
1001 	return 0;
1002 }
1003 
1004 static int fl_set_key(struct net *net, struct nlattr **tb,
1005 		      struct fl_flow_key *key, struct fl_flow_key *mask,
1006 		      struct netlink_ext_ack *extack)
1007 {
1008 	__be16 ethertype;
1009 	int ret = 0;
1010 #ifdef CONFIG_NET_CLS_IND
1011 	if (tb[TCA_FLOWER_INDEV]) {
1012 		int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1013 		if (err < 0)
1014 			return err;
1015 		key->indev_ifindex = err;
1016 		mask->indev_ifindex = 0xffffffff;
1017 	}
1018 #endif
1019 
1020 	fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1021 		       mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1022 		       sizeof(key->eth.dst));
1023 	fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1024 		       mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1025 		       sizeof(key->eth.src));
1026 
1027 	if (tb[TCA_FLOWER_KEY_ETH_TYPE]) {
1028 		ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]);
1029 
1030 		if (eth_type_vlan(ethertype)) {
1031 			fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1032 					TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
1033 					&mask->vlan);
1034 
1035 			if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
1036 				ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
1037 				if (eth_type_vlan(ethertype)) {
1038 					fl_set_key_vlan(tb, ethertype,
1039 							TCA_FLOWER_KEY_CVLAN_ID,
1040 							TCA_FLOWER_KEY_CVLAN_PRIO,
1041 							&key->cvlan, &mask->cvlan);
1042 					fl_set_key_val(tb, &key->basic.n_proto,
1043 						       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1044 						       &mask->basic.n_proto,
1045 						       TCA_FLOWER_UNSPEC,
1046 						       sizeof(key->basic.n_proto));
1047 				} else {
1048 					key->basic.n_proto = ethertype;
1049 					mask->basic.n_proto = cpu_to_be16(~0);
1050 				}
1051 			}
1052 		} else {
1053 			key->basic.n_proto = ethertype;
1054 			mask->basic.n_proto = cpu_to_be16(~0);
1055 		}
1056 	}
1057 
1058 	if (key->basic.n_proto == htons(ETH_P_IP) ||
1059 	    key->basic.n_proto == htons(ETH_P_IPV6)) {
1060 		fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1061 			       &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1062 			       sizeof(key->basic.ip_proto));
1063 		fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1064 	}
1065 
1066 	if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1067 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1068 		mask->control.addr_type = ~0;
1069 		fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1070 			       &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1071 			       sizeof(key->ipv4.src));
1072 		fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1073 			       &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1074 			       sizeof(key->ipv4.dst));
1075 	} else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1076 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1077 		mask->control.addr_type = ~0;
1078 		fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1079 			       &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1080 			       sizeof(key->ipv6.src));
1081 		fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1082 			       &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1083 			       sizeof(key->ipv6.dst));
1084 	}
1085 
1086 	if (key->basic.ip_proto == IPPROTO_TCP) {
1087 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1088 			       &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1089 			       sizeof(key->tp.src));
1090 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1091 			       &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1092 			       sizeof(key->tp.dst));
1093 		fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1094 			       &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1095 			       sizeof(key->tcp.flags));
1096 	} else if (key->basic.ip_proto == IPPROTO_UDP) {
1097 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1098 			       &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1099 			       sizeof(key->tp.src));
1100 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1101 			       &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1102 			       sizeof(key->tp.dst));
1103 	} else if (key->basic.ip_proto == IPPROTO_SCTP) {
1104 		fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1105 			       &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1106 			       sizeof(key->tp.src));
1107 		fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1108 			       &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1109 			       sizeof(key->tp.dst));
1110 	} else if (key->basic.n_proto == htons(ETH_P_IP) &&
1111 		   key->basic.ip_proto == IPPROTO_ICMP) {
1112 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1113 			       &mask->icmp.type,
1114 			       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1115 			       sizeof(key->icmp.type));
1116 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1117 			       &mask->icmp.code,
1118 			       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1119 			       sizeof(key->icmp.code));
1120 	} else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1121 		   key->basic.ip_proto == IPPROTO_ICMPV6) {
1122 		fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1123 			       &mask->icmp.type,
1124 			       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1125 			       sizeof(key->icmp.type));
1126 		fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1127 			       &mask->icmp.code,
1128 			       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1129 			       sizeof(key->icmp.code));
1130 	} else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1131 		   key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1132 		ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls);
1133 		if (ret)
1134 			return ret;
1135 	} else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1136 		   key->basic.n_proto == htons(ETH_P_RARP)) {
1137 		fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1138 			       &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1139 			       sizeof(key->arp.sip));
1140 		fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1141 			       &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1142 			       sizeof(key->arp.tip));
1143 		fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1144 			       &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1145 			       sizeof(key->arp.op));
1146 		fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1147 			       mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1148 			       sizeof(key->arp.sha));
1149 		fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1150 			       mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1151 			       sizeof(key->arp.tha));
1152 	}
1153 
1154 	if (key->basic.ip_proto == IPPROTO_TCP ||
1155 	    key->basic.ip_proto == IPPROTO_UDP ||
1156 	    key->basic.ip_proto == IPPROTO_SCTP) {
1157 		ret = fl_set_key_port_range(tb, key, mask);
1158 		if (ret)
1159 			return ret;
1160 	}
1161 
1162 	if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1163 	    tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1164 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1165 		mask->enc_control.addr_type = ~0;
1166 		fl_set_key_val(tb, &key->enc_ipv4.src,
1167 			       TCA_FLOWER_KEY_ENC_IPV4_SRC,
1168 			       &mask->enc_ipv4.src,
1169 			       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1170 			       sizeof(key->enc_ipv4.src));
1171 		fl_set_key_val(tb, &key->enc_ipv4.dst,
1172 			       TCA_FLOWER_KEY_ENC_IPV4_DST,
1173 			       &mask->enc_ipv4.dst,
1174 			       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1175 			       sizeof(key->enc_ipv4.dst));
1176 	}
1177 
1178 	if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1179 	    tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1180 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1181 		mask->enc_control.addr_type = ~0;
1182 		fl_set_key_val(tb, &key->enc_ipv6.src,
1183 			       TCA_FLOWER_KEY_ENC_IPV6_SRC,
1184 			       &mask->enc_ipv6.src,
1185 			       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1186 			       sizeof(key->enc_ipv6.src));
1187 		fl_set_key_val(tb, &key->enc_ipv6.dst,
1188 			       TCA_FLOWER_KEY_ENC_IPV6_DST,
1189 			       &mask->enc_ipv6.dst,
1190 			       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1191 			       sizeof(key->enc_ipv6.dst));
1192 	}
1193 
1194 	fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1195 		       &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1196 		       sizeof(key->enc_key_id.keyid));
1197 
1198 	fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1199 		       &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1200 		       sizeof(key->enc_tp.src));
1201 
1202 	fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1203 		       &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1204 		       sizeof(key->enc_tp.dst));
1205 
1206 	fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1207 
1208 	if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1209 		ret = fl_set_enc_opt(tb, key, mask, extack);
1210 		if (ret)
1211 			return ret;
1212 	}
1213 
1214 	if (tb[TCA_FLOWER_KEY_FLAGS])
1215 		ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags);
1216 
1217 	return ret;
1218 }
1219 
1220 static void fl_mask_copy(struct fl_flow_mask *dst,
1221 			 struct fl_flow_mask *src)
1222 {
1223 	const void *psrc = fl_key_get_start(&src->key, src);
1224 	void *pdst = fl_key_get_start(&dst->key, src);
1225 
1226 	memcpy(pdst, psrc, fl_mask_range(src));
1227 	dst->range = src->range;
1228 }
1229 
1230 static const struct rhashtable_params fl_ht_params = {
1231 	.key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
1232 	.head_offset = offsetof(struct cls_fl_filter, ht_node),
1233 	.automatic_shrinking = true,
1234 };
1235 
1236 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
1237 {
1238 	mask->filter_ht_params = fl_ht_params;
1239 	mask->filter_ht_params.key_len = fl_mask_range(mask);
1240 	mask->filter_ht_params.key_offset += mask->range.start;
1241 
1242 	return rhashtable_init(&mask->ht, &mask->filter_ht_params);
1243 }
1244 
1245 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
1246 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member)
1247 
1248 #define FL_KEY_IS_MASKED(mask, member)						\
1249 	memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member),		\
1250 		   0, FL_KEY_MEMBER_SIZE(member))				\
1251 
1252 #define FL_KEY_SET(keys, cnt, id, member)					\
1253 	do {									\
1254 		keys[cnt].key_id = id;						\
1255 		keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member);		\
1256 		cnt++;								\
1257 	} while(0);
1258 
1259 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member)			\
1260 	do {									\
1261 		if (FL_KEY_IS_MASKED(mask, member))				\
1262 			FL_KEY_SET(keys, cnt, id, member);			\
1263 	} while(0);
1264 
1265 static void fl_init_dissector(struct flow_dissector *dissector,
1266 			      struct fl_flow_key *mask)
1267 {
1268 	struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
1269 	size_t cnt = 0;
1270 
1271 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
1272 	FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
1273 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1274 			     FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
1275 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1276 			     FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
1277 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1278 			     FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
1279 	if (FL_KEY_IS_MASKED(mask, tp) ||
1280 	    FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
1281 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
1282 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1283 			     FLOW_DISSECTOR_KEY_IP, ip);
1284 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1285 			     FLOW_DISSECTOR_KEY_TCP, tcp);
1286 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1287 			     FLOW_DISSECTOR_KEY_ICMP, icmp);
1288 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1289 			     FLOW_DISSECTOR_KEY_ARP, arp);
1290 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1291 			     FLOW_DISSECTOR_KEY_MPLS, mpls);
1292 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1293 			     FLOW_DISSECTOR_KEY_VLAN, vlan);
1294 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1295 			     FLOW_DISSECTOR_KEY_CVLAN, cvlan);
1296 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1297 			     FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
1298 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1299 			     FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
1300 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1301 			     FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
1302 	if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
1303 	    FL_KEY_IS_MASKED(mask, enc_ipv6))
1304 		FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1305 			   enc_control);
1306 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1307 			     FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
1308 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1309 			     FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
1310 	FL_KEY_SET_IF_MASKED(mask, keys, cnt,
1311 			     FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
1312 
1313 	skb_flow_dissector_init(dissector, keys, cnt);
1314 }
1315 
1316 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
1317 					       struct fl_flow_mask *mask)
1318 {
1319 	struct fl_flow_mask *newmask;
1320 	int err;
1321 
1322 	newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
1323 	if (!newmask)
1324 		return ERR_PTR(-ENOMEM);
1325 
1326 	fl_mask_copy(newmask, mask);
1327 
1328 	if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
1329 	    (newmask->key.tp_min.src && newmask->key.tp_max.src))
1330 		newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
1331 
1332 	err = fl_init_mask_hashtable(newmask);
1333 	if (err)
1334 		goto errout_free;
1335 
1336 	fl_init_dissector(&newmask->dissector, &newmask->key);
1337 
1338 	INIT_LIST_HEAD_RCU(&newmask->filters);
1339 
1340 	refcount_set(&newmask->refcnt, 1);
1341 	err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
1342 				      &newmask->ht_node, mask_ht_params);
1343 	if (err)
1344 		goto errout_destroy;
1345 
1346 	/* Wait until any potential concurrent users of mask are finished */
1347 	synchronize_rcu();
1348 
1349 	spin_lock(&head->masks_lock);
1350 	list_add_tail_rcu(&newmask->list, &head->masks);
1351 	spin_unlock(&head->masks_lock);
1352 
1353 	return newmask;
1354 
1355 errout_destroy:
1356 	rhashtable_destroy(&newmask->ht);
1357 errout_free:
1358 	kfree(newmask);
1359 
1360 	return ERR_PTR(err);
1361 }
1362 
1363 static int fl_check_assign_mask(struct cls_fl_head *head,
1364 				struct cls_fl_filter *fnew,
1365 				struct cls_fl_filter *fold,
1366 				struct fl_flow_mask *mask)
1367 {
1368 	struct fl_flow_mask *newmask;
1369 	int ret = 0;
1370 
1371 	rcu_read_lock();
1372 
1373 	/* Insert mask as temporary node to prevent concurrent creation of mask
1374 	 * with same key. Any concurrent lookups with same key will return
1375 	 * -EAGAIN because mask's refcnt is zero. It is safe to insert
1376 	 * stack-allocated 'mask' to masks hash table because we call
1377 	 * synchronize_rcu() before returning from this function (either in case
1378 	 * of error or after replacing it with heap-allocated mask in
1379 	 * fl_create_new_mask()).
1380 	 */
1381 	fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
1382 						       &mask->ht_node,
1383 						       mask_ht_params);
1384 	if (!fnew->mask) {
1385 		rcu_read_unlock();
1386 
1387 		if (fold) {
1388 			ret = -EINVAL;
1389 			goto errout_cleanup;
1390 		}
1391 
1392 		newmask = fl_create_new_mask(head, mask);
1393 		if (IS_ERR(newmask)) {
1394 			ret = PTR_ERR(newmask);
1395 			goto errout_cleanup;
1396 		}
1397 
1398 		fnew->mask = newmask;
1399 		return 0;
1400 	} else if (IS_ERR(fnew->mask)) {
1401 		ret = PTR_ERR(fnew->mask);
1402 	} else if (fold && fold->mask != fnew->mask) {
1403 		ret = -EINVAL;
1404 	} else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
1405 		/* Mask was deleted concurrently, try again */
1406 		ret = -EAGAIN;
1407 	}
1408 	rcu_read_unlock();
1409 	return ret;
1410 
1411 errout_cleanup:
1412 	rhashtable_remove_fast(&head->ht, &mask->ht_node,
1413 			       mask_ht_params);
1414 	/* Wait until any potential concurrent users of mask are finished */
1415 	synchronize_rcu();
1416 	return ret;
1417 }
1418 
1419 static int fl_set_parms(struct net *net, struct tcf_proto *tp,
1420 			struct cls_fl_filter *f, struct fl_flow_mask *mask,
1421 			unsigned long base, struct nlattr **tb,
1422 			struct nlattr *est, bool ovr,
1423 			struct fl_flow_tmplt *tmplt, bool rtnl_held,
1424 			struct netlink_ext_ack *extack)
1425 {
1426 	int err;
1427 
1428 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held,
1429 				extack);
1430 	if (err < 0)
1431 		return err;
1432 
1433 	if (tb[TCA_FLOWER_CLASSID]) {
1434 		f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
1435 		if (!rtnl_held)
1436 			rtnl_lock();
1437 		tcf_bind_filter(tp, &f->res, base);
1438 		if (!rtnl_held)
1439 			rtnl_unlock();
1440 	}
1441 
1442 	err = fl_set_key(net, tb, &f->key, &mask->key, extack);
1443 	if (err)
1444 		return err;
1445 
1446 	fl_mask_update_range(mask);
1447 	fl_set_masked_key(&f->mkey, &f->key, mask);
1448 
1449 	if (!fl_mask_fits_tmplt(tmplt, mask)) {
1450 		NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
1451 		return -EINVAL;
1452 	}
1453 
1454 	return 0;
1455 }
1456 
1457 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
1458 			       struct cls_fl_filter *fold,
1459 			       bool *in_ht)
1460 {
1461 	struct fl_flow_mask *mask = fnew->mask;
1462 	int err;
1463 
1464 	err = rhashtable_lookup_insert_fast(&mask->ht,
1465 					    &fnew->ht_node,
1466 					    mask->filter_ht_params);
1467 	if (err) {
1468 		*in_ht = false;
1469 		/* It is okay if filter with same key exists when
1470 		 * overwriting.
1471 		 */
1472 		return fold && err == -EEXIST ? 0 : err;
1473 	}
1474 
1475 	*in_ht = true;
1476 	return 0;
1477 }
1478 
1479 static int fl_change(struct net *net, struct sk_buff *in_skb,
1480 		     struct tcf_proto *tp, unsigned long base,
1481 		     u32 handle, struct nlattr **tca,
1482 		     void **arg, bool ovr, bool rtnl_held,
1483 		     struct netlink_ext_ack *extack)
1484 {
1485 	struct cls_fl_head *head = fl_head_dereference(tp);
1486 	struct cls_fl_filter *fold = *arg;
1487 	struct cls_fl_filter *fnew;
1488 	struct fl_flow_mask *mask;
1489 	struct nlattr **tb;
1490 	bool in_ht;
1491 	int err;
1492 
1493 	if (!tca[TCA_OPTIONS]) {
1494 		err = -EINVAL;
1495 		goto errout_fold;
1496 	}
1497 
1498 	mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
1499 	if (!mask) {
1500 		err = -ENOBUFS;
1501 		goto errout_fold;
1502 	}
1503 
1504 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1505 	if (!tb) {
1506 		err = -ENOBUFS;
1507 		goto errout_mask_alloc;
1508 	}
1509 
1510 	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1511 			       fl_policy, NULL);
1512 	if (err < 0)
1513 		goto errout_tb;
1514 
1515 	if (fold && handle && fold->handle != handle) {
1516 		err = -EINVAL;
1517 		goto errout_tb;
1518 	}
1519 
1520 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
1521 	if (!fnew) {
1522 		err = -ENOBUFS;
1523 		goto errout_tb;
1524 	}
1525 	refcount_set(&fnew->refcnt, 1);
1526 
1527 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0);
1528 	if (err < 0)
1529 		goto errout;
1530 
1531 	if (tb[TCA_FLOWER_FLAGS]) {
1532 		fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
1533 
1534 		if (!tc_flags_valid(fnew->flags)) {
1535 			err = -EINVAL;
1536 			goto errout;
1537 		}
1538 	}
1539 
1540 	err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
1541 			   tp->chain->tmplt_priv, rtnl_held, extack);
1542 	if (err)
1543 		goto errout;
1544 
1545 	err = fl_check_assign_mask(head, fnew, fold, mask);
1546 	if (err)
1547 		goto errout;
1548 
1549 	err = fl_ht_insert_unique(fnew, fold, &in_ht);
1550 	if (err)
1551 		goto errout_mask;
1552 
1553 	if (!tc_skip_hw(fnew->flags)) {
1554 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
1555 		if (err)
1556 			goto errout_ht;
1557 	}
1558 
1559 	if (!tc_in_hw(fnew->flags))
1560 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
1561 
1562 	spin_lock(&tp->lock);
1563 
1564 	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
1565 	 * proto again or create new one, if necessary.
1566 	 */
1567 	if (tp->deleting) {
1568 		err = -EAGAIN;
1569 		goto errout_hw;
1570 	}
1571 
1572 	refcount_inc(&fnew->refcnt);
1573 	if (fold) {
1574 		/* Fold filter was deleted concurrently. Retry lookup. */
1575 		if (fold->deleted) {
1576 			err = -EAGAIN;
1577 			goto errout_hw;
1578 		}
1579 
1580 		fnew->handle = handle;
1581 
1582 		if (!in_ht) {
1583 			struct rhashtable_params params =
1584 				fnew->mask->filter_ht_params;
1585 
1586 			err = rhashtable_insert_fast(&fnew->mask->ht,
1587 						     &fnew->ht_node,
1588 						     params);
1589 			if (err)
1590 				goto errout_hw;
1591 			in_ht = true;
1592 		}
1593 
1594 		rhashtable_remove_fast(&fold->mask->ht,
1595 				       &fold->ht_node,
1596 				       fold->mask->filter_ht_params);
1597 		idr_replace(&head->handle_idr, fnew, fnew->handle);
1598 		list_replace_rcu(&fold->list, &fnew->list);
1599 		fold->deleted = true;
1600 
1601 		spin_unlock(&tp->lock);
1602 
1603 		fl_mask_put(head, fold->mask);
1604 		if (!tc_skip_hw(fold->flags))
1605 			fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
1606 		tcf_unbind_filter(tp, &fold->res);
1607 		/* Caller holds reference to fold, so refcnt is always > 0
1608 		 * after this.
1609 		 */
1610 		refcount_dec(&fold->refcnt);
1611 		__fl_put(fold);
1612 	} else {
1613 		if (handle) {
1614 			/* user specifies a handle and it doesn't exist */
1615 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1616 					    handle, GFP_ATOMIC);
1617 
1618 			/* Filter with specified handle was concurrently
1619 			 * inserted after initial check in cls_api. This is not
1620 			 * necessarily an error if NLM_F_EXCL is not set in
1621 			 * message flags. Returning EAGAIN will cause cls_api to
1622 			 * try to update concurrently inserted rule.
1623 			 */
1624 			if (err == -ENOSPC)
1625 				err = -EAGAIN;
1626 		} else {
1627 			handle = 1;
1628 			err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
1629 					    INT_MAX, GFP_ATOMIC);
1630 		}
1631 		if (err)
1632 			goto errout_hw;
1633 
1634 		fnew->handle = handle;
1635 		list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
1636 		spin_unlock(&tp->lock);
1637 	}
1638 
1639 	*arg = fnew;
1640 
1641 	kfree(tb);
1642 	kfree(mask);
1643 	return 0;
1644 
1645 errout_hw:
1646 	spin_unlock(&tp->lock);
1647 	if (!tc_skip_hw(fnew->flags))
1648 		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
1649 errout_ht:
1650 	if (in_ht)
1651 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
1652 				       fnew->mask->filter_ht_params);
1653 errout_mask:
1654 	fl_mask_put(head, fnew->mask);
1655 errout:
1656 	tcf_exts_get_net(&fnew->exts);
1657 	tcf_queue_work(&fnew->rwork, fl_destroy_filter_work);
1658 errout_tb:
1659 	kfree(tb);
1660 errout_mask_alloc:
1661 	kfree(mask);
1662 errout_fold:
1663 	if (fold)
1664 		__fl_put(fold);
1665 	return err;
1666 }
1667 
1668 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
1669 		     bool rtnl_held, struct netlink_ext_ack *extack)
1670 {
1671 	struct cls_fl_head *head = fl_head_dereference(tp);
1672 	struct cls_fl_filter *f = arg;
1673 	bool last_on_mask;
1674 	int err = 0;
1675 
1676 	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
1677 	*last = list_empty(&head->masks);
1678 	__fl_put(f);
1679 
1680 	return err;
1681 }
1682 
1683 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
1684 		    bool rtnl_held)
1685 {
1686 	struct cls_fl_filter *f;
1687 
1688 	arg->count = arg->skip;
1689 
1690 	while ((f = fl_get_next_filter(tp, &arg->cookie)) != NULL) {
1691 		if (arg->fn(tp, f, arg) < 0) {
1692 			__fl_put(f);
1693 			arg->stop = 1;
1694 			break;
1695 		}
1696 		__fl_put(f);
1697 		arg->cookie++;
1698 		arg->count++;
1699 	}
1700 }
1701 
1702 static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
1703 			void *cb_priv, struct netlink_ext_ack *extack)
1704 {
1705 	struct tc_cls_flower_offload cls_flower = {};
1706 	struct tcf_block *block = tp->chain->block;
1707 	unsigned long handle = 0;
1708 	struct cls_fl_filter *f;
1709 	int err;
1710 
1711 	while ((f = fl_get_next_filter(tp, &handle))) {
1712 		if (tc_skip_hw(f->flags))
1713 			goto next_flow;
1714 
1715 		cls_flower.rule =
1716 			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
1717 		if (!cls_flower.rule) {
1718 			__fl_put(f);
1719 			return -ENOMEM;
1720 		}
1721 
1722 		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
1723 					   extack);
1724 		cls_flower.command = add ?
1725 			TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
1726 		cls_flower.cookie = (unsigned long)f;
1727 		cls_flower.rule->match.dissector = &f->mask->dissector;
1728 		cls_flower.rule->match.mask = &f->mask->key;
1729 		cls_flower.rule->match.key = &f->mkey;
1730 
1731 		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
1732 		if (err) {
1733 			kfree(cls_flower.rule);
1734 			if (tc_skip_sw(f->flags)) {
1735 				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
1736 				__fl_put(f);
1737 				return err;
1738 			}
1739 			goto next_flow;
1740 		}
1741 
1742 		cls_flower.classid = f->res.classid;
1743 
1744 		err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
1745 		kfree(cls_flower.rule);
1746 
1747 		if (err) {
1748 			if (add && tc_skip_sw(f->flags)) {
1749 				__fl_put(f);
1750 				return err;
1751 			}
1752 			goto next_flow;
1753 		}
1754 
1755 		spin_lock(&tp->lock);
1756 		tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
1757 					  add);
1758 		spin_unlock(&tp->lock);
1759 next_flow:
1760 		handle++;
1761 		__fl_put(f);
1762 	}
1763 
1764 	return 0;
1765 }
1766 
1767 static int fl_hw_create_tmplt(struct tcf_chain *chain,
1768 			      struct fl_flow_tmplt *tmplt)
1769 {
1770 	struct tc_cls_flower_offload cls_flower = {};
1771 	struct tcf_block *block = chain->block;
1772 
1773 	cls_flower.rule = flow_rule_alloc(0);
1774 	if (!cls_flower.rule)
1775 		return -ENOMEM;
1776 
1777 	cls_flower.common.chain_index = chain->index;
1778 	cls_flower.command = TC_CLSFLOWER_TMPLT_CREATE;
1779 	cls_flower.cookie = (unsigned long) tmplt;
1780 	cls_flower.rule->match.dissector = &tmplt->dissector;
1781 	cls_flower.rule->match.mask = &tmplt->mask;
1782 	cls_flower.rule->match.key = &tmplt->dummy_key;
1783 
1784 	/* We don't care if driver (any of them) fails to handle this
1785 	 * call. It serves just as a hint for it.
1786 	 */
1787 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
1788 	kfree(cls_flower.rule);
1789 
1790 	return 0;
1791 }
1792 
1793 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
1794 				struct fl_flow_tmplt *tmplt)
1795 {
1796 	struct tc_cls_flower_offload cls_flower = {};
1797 	struct tcf_block *block = chain->block;
1798 
1799 	cls_flower.common.chain_index = chain->index;
1800 	cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
1801 	cls_flower.cookie = (unsigned long) tmplt;
1802 
1803 	tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
1804 }
1805 
1806 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
1807 			     struct nlattr **tca,
1808 			     struct netlink_ext_ack *extack)
1809 {
1810 	struct fl_flow_tmplt *tmplt;
1811 	struct nlattr **tb;
1812 	int err;
1813 
1814 	if (!tca[TCA_OPTIONS])
1815 		return ERR_PTR(-EINVAL);
1816 
1817 	tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
1818 	if (!tb)
1819 		return ERR_PTR(-ENOBUFS);
1820 	err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS],
1821 			       fl_policy, NULL);
1822 	if (err)
1823 		goto errout_tb;
1824 
1825 	tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
1826 	if (!tmplt) {
1827 		err = -ENOMEM;
1828 		goto errout_tb;
1829 	}
1830 	tmplt->chain = chain;
1831 	err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
1832 	if (err)
1833 		goto errout_tmplt;
1834 
1835 	fl_init_dissector(&tmplt->dissector, &tmplt->mask);
1836 
1837 	err = fl_hw_create_tmplt(chain, tmplt);
1838 	if (err)
1839 		goto errout_tmplt;
1840 
1841 	kfree(tb);
1842 	return tmplt;
1843 
1844 errout_tmplt:
1845 	kfree(tmplt);
1846 errout_tb:
1847 	kfree(tb);
1848 	return ERR_PTR(err);
1849 }
1850 
1851 static void fl_tmplt_destroy(void *tmplt_priv)
1852 {
1853 	struct fl_flow_tmplt *tmplt = tmplt_priv;
1854 
1855 	fl_hw_destroy_tmplt(tmplt->chain, tmplt);
1856 	kfree(tmplt);
1857 }
1858 
1859 static int fl_dump_key_val(struct sk_buff *skb,
1860 			   void *val, int val_type,
1861 			   void *mask, int mask_type, int len)
1862 {
1863 	int err;
1864 
1865 	if (!memchr_inv(mask, 0, len))
1866 		return 0;
1867 	err = nla_put(skb, val_type, len, val);
1868 	if (err)
1869 		return err;
1870 	if (mask_type != TCA_FLOWER_UNSPEC) {
1871 		err = nla_put(skb, mask_type, len, mask);
1872 		if (err)
1873 			return err;
1874 	}
1875 	return 0;
1876 }
1877 
1878 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
1879 				  struct fl_flow_key *mask)
1880 {
1881 	if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
1882 			    &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
1883 			    sizeof(key->tp_min.dst)) ||
1884 	    fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
1885 			    &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
1886 			    sizeof(key->tp_max.dst)) ||
1887 	    fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
1888 			    &mask->tp_min.src, TCA_FLOWER_UNSPEC,
1889 			    sizeof(key->tp_min.src)) ||
1890 	    fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
1891 			    &mask->tp_max.src, TCA_FLOWER_UNSPEC,
1892 			    sizeof(key->tp_max.src)))
1893 		return -1;
1894 
1895 	return 0;
1896 }
1897 
1898 static int fl_dump_key_mpls(struct sk_buff *skb,
1899 			    struct flow_dissector_key_mpls *mpls_key,
1900 			    struct flow_dissector_key_mpls *mpls_mask)
1901 {
1902 	int err;
1903 
1904 	if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask)))
1905 		return 0;
1906 	if (mpls_mask->mpls_ttl) {
1907 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
1908 				 mpls_key->mpls_ttl);
1909 		if (err)
1910 			return err;
1911 	}
1912 	if (mpls_mask->mpls_tc) {
1913 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
1914 				 mpls_key->mpls_tc);
1915 		if (err)
1916 			return err;
1917 	}
1918 	if (mpls_mask->mpls_label) {
1919 		err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
1920 				  mpls_key->mpls_label);
1921 		if (err)
1922 			return err;
1923 	}
1924 	if (mpls_mask->mpls_bos) {
1925 		err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
1926 				 mpls_key->mpls_bos);
1927 		if (err)
1928 			return err;
1929 	}
1930 	return 0;
1931 }
1932 
1933 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
1934 			  struct flow_dissector_key_ip *key,
1935 			  struct flow_dissector_key_ip *mask)
1936 {
1937 	int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1938 	int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1939 	int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1940 	int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1941 
1942 	if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
1943 	    fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
1944 		return -1;
1945 
1946 	return 0;
1947 }
1948 
1949 static int fl_dump_key_vlan(struct sk_buff *skb,
1950 			    int vlan_id_key, int vlan_prio_key,
1951 			    struct flow_dissector_key_vlan *vlan_key,
1952 			    struct flow_dissector_key_vlan *vlan_mask)
1953 {
1954 	int err;
1955 
1956 	if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
1957 		return 0;
1958 	if (vlan_mask->vlan_id) {
1959 		err = nla_put_u16(skb, vlan_id_key,
1960 				  vlan_key->vlan_id);
1961 		if (err)
1962 			return err;
1963 	}
1964 	if (vlan_mask->vlan_priority) {
1965 		err = nla_put_u8(skb, vlan_prio_key,
1966 				 vlan_key->vlan_priority);
1967 		if (err)
1968 			return err;
1969 	}
1970 	return 0;
1971 }
1972 
1973 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
1974 			    u32 *flower_key, u32 *flower_mask,
1975 			    u32 flower_flag_bit, u32 dissector_flag_bit)
1976 {
1977 	if (dissector_mask & dissector_flag_bit) {
1978 		*flower_mask |= flower_flag_bit;
1979 		if (dissector_key & dissector_flag_bit)
1980 			*flower_key |= flower_flag_bit;
1981 	}
1982 }
1983 
1984 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
1985 {
1986 	u32 key, mask;
1987 	__be32 _key, _mask;
1988 	int err;
1989 
1990 	if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
1991 		return 0;
1992 
1993 	key = 0;
1994 	mask = 0;
1995 
1996 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1997 			TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1998 	fl_get_key_flag(flags_key, flags_mask, &key, &mask,
1999 			TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2000 			FLOW_DIS_FIRST_FRAG);
2001 
2002 	_key = cpu_to_be32(key);
2003 	_mask = cpu_to_be32(mask);
2004 
2005 	err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2006 	if (err)
2007 		return err;
2008 
2009 	return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
2010 }
2011 
2012 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
2013 				  struct flow_dissector_key_enc_opts *enc_opts)
2014 {
2015 	struct geneve_opt *opt;
2016 	struct nlattr *nest;
2017 	int opt_off = 0;
2018 
2019 	nest = nla_nest_start(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
2020 	if (!nest)
2021 		goto nla_put_failure;
2022 
2023 	while (enc_opts->len > opt_off) {
2024 		opt = (struct geneve_opt *)&enc_opts->data[opt_off];
2025 
2026 		if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
2027 				 opt->opt_class))
2028 			goto nla_put_failure;
2029 		if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
2030 			       opt->type))
2031 			goto nla_put_failure;
2032 		if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
2033 			    opt->length * 4, opt->opt_data))
2034 			goto nla_put_failure;
2035 
2036 		opt_off += sizeof(struct geneve_opt) + opt->length * 4;
2037 	}
2038 	nla_nest_end(skb, nest);
2039 	return 0;
2040 
2041 nla_put_failure:
2042 	nla_nest_cancel(skb, nest);
2043 	return -EMSGSIZE;
2044 }
2045 
2046 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
2047 			       struct flow_dissector_key_enc_opts *enc_opts)
2048 {
2049 	struct nlattr *nest;
2050 	int err;
2051 
2052 	if (!enc_opts->len)
2053 		return 0;
2054 
2055 	nest = nla_nest_start(skb, enc_opt_type);
2056 	if (!nest)
2057 		goto nla_put_failure;
2058 
2059 	switch (enc_opts->dst_opt_type) {
2060 	case TUNNEL_GENEVE_OPT:
2061 		err = fl_dump_key_geneve_opt(skb, enc_opts);
2062 		if (err)
2063 			goto nla_put_failure;
2064 		break;
2065 	default:
2066 		goto nla_put_failure;
2067 	}
2068 	nla_nest_end(skb, nest);
2069 	return 0;
2070 
2071 nla_put_failure:
2072 	nla_nest_cancel(skb, nest);
2073 	return -EMSGSIZE;
2074 }
2075 
2076 static int fl_dump_key_enc_opt(struct sk_buff *skb,
2077 			       struct flow_dissector_key_enc_opts *key_opts,
2078 			       struct flow_dissector_key_enc_opts *msk_opts)
2079 {
2080 	int err;
2081 
2082 	err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
2083 	if (err)
2084 		return err;
2085 
2086 	return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
2087 }
2088 
2089 static int fl_dump_key(struct sk_buff *skb, struct net *net,
2090 		       struct fl_flow_key *key, struct fl_flow_key *mask)
2091 {
2092 	if (mask->indev_ifindex) {
2093 		struct net_device *dev;
2094 
2095 		dev = __dev_get_by_index(net, key->indev_ifindex);
2096 		if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
2097 			goto nla_put_failure;
2098 	}
2099 
2100 	if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
2101 			    mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
2102 			    sizeof(key->eth.dst)) ||
2103 	    fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
2104 			    mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
2105 			    sizeof(key->eth.src)) ||
2106 	    fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
2107 			    &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
2108 			    sizeof(key->basic.n_proto)))
2109 		goto nla_put_failure;
2110 
2111 	if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
2112 		goto nla_put_failure;
2113 
2114 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
2115 			     TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
2116 		goto nla_put_failure;
2117 
2118 	if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
2119 			     TCA_FLOWER_KEY_CVLAN_PRIO,
2120 			     &key->cvlan, &mask->cvlan) ||
2121 	    (mask->cvlan.vlan_tpid &&
2122 	     nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2123 			  key->cvlan.vlan_tpid)))
2124 		goto nla_put_failure;
2125 
2126 	if (mask->basic.n_proto) {
2127 		if (mask->cvlan.vlan_tpid) {
2128 			if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
2129 					 key->basic.n_proto))
2130 				goto nla_put_failure;
2131 		} else if (mask->vlan.vlan_tpid) {
2132 			if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
2133 					 key->basic.n_proto))
2134 				goto nla_put_failure;
2135 		}
2136 	}
2137 
2138 	if ((key->basic.n_proto == htons(ETH_P_IP) ||
2139 	     key->basic.n_proto == htons(ETH_P_IPV6)) &&
2140 	    (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
2141 			    &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
2142 			    sizeof(key->basic.ip_proto)) ||
2143 	    fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
2144 		goto nla_put_failure;
2145 
2146 	if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2147 	    (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
2148 			     &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
2149 			     sizeof(key->ipv4.src)) ||
2150 	     fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
2151 			     &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
2152 			     sizeof(key->ipv4.dst))))
2153 		goto nla_put_failure;
2154 	else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2155 		 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
2156 				  &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
2157 				  sizeof(key->ipv6.src)) ||
2158 		  fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
2159 				  &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
2160 				  sizeof(key->ipv6.dst))))
2161 		goto nla_put_failure;
2162 
2163 	if (key->basic.ip_proto == IPPROTO_TCP &&
2164 	    (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
2165 			     &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
2166 			     sizeof(key->tp.src)) ||
2167 	     fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
2168 			     &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
2169 			     sizeof(key->tp.dst)) ||
2170 	     fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
2171 			     &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
2172 			     sizeof(key->tcp.flags))))
2173 		goto nla_put_failure;
2174 	else if (key->basic.ip_proto == IPPROTO_UDP &&
2175 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
2176 				  &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
2177 				  sizeof(key->tp.src)) ||
2178 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
2179 				  &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
2180 				  sizeof(key->tp.dst))))
2181 		goto nla_put_failure;
2182 	else if (key->basic.ip_proto == IPPROTO_SCTP &&
2183 		 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
2184 				  &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
2185 				  sizeof(key->tp.src)) ||
2186 		  fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
2187 				  &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
2188 				  sizeof(key->tp.dst))))
2189 		goto nla_put_failure;
2190 	else if (key->basic.n_proto == htons(ETH_P_IP) &&
2191 		 key->basic.ip_proto == IPPROTO_ICMP &&
2192 		 (fl_dump_key_val(skb, &key->icmp.type,
2193 				  TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
2194 				  TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
2195 				  sizeof(key->icmp.type)) ||
2196 		  fl_dump_key_val(skb, &key->icmp.code,
2197 				  TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
2198 				  TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
2199 				  sizeof(key->icmp.code))))
2200 		goto nla_put_failure;
2201 	else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
2202 		 key->basic.ip_proto == IPPROTO_ICMPV6 &&
2203 		 (fl_dump_key_val(skb, &key->icmp.type,
2204 				  TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
2205 				  TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
2206 				  sizeof(key->icmp.type)) ||
2207 		  fl_dump_key_val(skb, &key->icmp.code,
2208 				  TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
2209 				  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
2210 				  sizeof(key->icmp.code))))
2211 		goto nla_put_failure;
2212 	else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
2213 		  key->basic.n_proto == htons(ETH_P_RARP)) &&
2214 		 (fl_dump_key_val(skb, &key->arp.sip,
2215 				  TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
2216 				  TCA_FLOWER_KEY_ARP_SIP_MASK,
2217 				  sizeof(key->arp.sip)) ||
2218 		  fl_dump_key_val(skb, &key->arp.tip,
2219 				  TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
2220 				  TCA_FLOWER_KEY_ARP_TIP_MASK,
2221 				  sizeof(key->arp.tip)) ||
2222 		  fl_dump_key_val(skb, &key->arp.op,
2223 				  TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
2224 				  TCA_FLOWER_KEY_ARP_OP_MASK,
2225 				  sizeof(key->arp.op)) ||
2226 		  fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
2227 				  mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
2228 				  sizeof(key->arp.sha)) ||
2229 		  fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
2230 				  mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
2231 				  sizeof(key->arp.tha))))
2232 		goto nla_put_failure;
2233 
2234 	if ((key->basic.ip_proto == IPPROTO_TCP ||
2235 	     key->basic.ip_proto == IPPROTO_UDP ||
2236 	     key->basic.ip_proto == IPPROTO_SCTP) &&
2237 	     fl_dump_key_port_range(skb, key, mask))
2238 		goto nla_put_failure;
2239 
2240 	if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
2241 	    (fl_dump_key_val(skb, &key->enc_ipv4.src,
2242 			    TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
2243 			    TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
2244 			    sizeof(key->enc_ipv4.src)) ||
2245 	     fl_dump_key_val(skb, &key->enc_ipv4.dst,
2246 			     TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
2247 			     TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
2248 			     sizeof(key->enc_ipv4.dst))))
2249 		goto nla_put_failure;
2250 	else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
2251 		 (fl_dump_key_val(skb, &key->enc_ipv6.src,
2252 			    TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
2253 			    TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
2254 			    sizeof(key->enc_ipv6.src)) ||
2255 		 fl_dump_key_val(skb, &key->enc_ipv6.dst,
2256 				 TCA_FLOWER_KEY_ENC_IPV6_DST,
2257 				 &mask->enc_ipv6.dst,
2258 				 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
2259 			    sizeof(key->enc_ipv6.dst))))
2260 		goto nla_put_failure;
2261 
2262 	if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
2263 			    &mask->enc_key_id, TCA_FLOWER_UNSPEC,
2264 			    sizeof(key->enc_key_id)) ||
2265 	    fl_dump_key_val(skb, &key->enc_tp.src,
2266 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
2267 			    &mask->enc_tp.src,
2268 			    TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
2269 			    sizeof(key->enc_tp.src)) ||
2270 	    fl_dump_key_val(skb, &key->enc_tp.dst,
2271 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
2272 			    &mask->enc_tp.dst,
2273 			    TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
2274 			    sizeof(key->enc_tp.dst)) ||
2275 	    fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
2276 	    fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
2277 		goto nla_put_failure;
2278 
2279 	if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
2280 		goto nla_put_failure;
2281 
2282 	return 0;
2283 
2284 nla_put_failure:
2285 	return -EMSGSIZE;
2286 }
2287 
2288 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
2289 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
2290 {
2291 	struct cls_fl_filter *f = fh;
2292 	struct nlattr *nest;
2293 	struct fl_flow_key *key, *mask;
2294 	bool skip_hw;
2295 
2296 	if (!f)
2297 		return skb->len;
2298 
2299 	t->tcm_handle = f->handle;
2300 
2301 	nest = nla_nest_start(skb, TCA_OPTIONS);
2302 	if (!nest)
2303 		goto nla_put_failure;
2304 
2305 	spin_lock(&tp->lock);
2306 
2307 	if (f->res.classid &&
2308 	    nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
2309 		goto nla_put_failure_locked;
2310 
2311 	key = &f->key;
2312 	mask = &f->mask->key;
2313 	skip_hw = tc_skip_hw(f->flags);
2314 
2315 	if (fl_dump_key(skb, net, key, mask))
2316 		goto nla_put_failure_locked;
2317 
2318 	if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
2319 		goto nla_put_failure_locked;
2320 
2321 	spin_unlock(&tp->lock);
2322 
2323 	if (!skip_hw)
2324 		fl_hw_update_stats(tp, f, rtnl_held);
2325 
2326 	if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
2327 		goto nla_put_failure;
2328 
2329 	if (tcf_exts_dump(skb, &f->exts))
2330 		goto nla_put_failure;
2331 
2332 	nla_nest_end(skb, nest);
2333 
2334 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
2335 		goto nla_put_failure;
2336 
2337 	return skb->len;
2338 
2339 nla_put_failure_locked:
2340 	spin_unlock(&tp->lock);
2341 nla_put_failure:
2342 	nla_nest_cancel(skb, nest);
2343 	return -1;
2344 }
2345 
2346 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
2347 {
2348 	struct fl_flow_tmplt *tmplt = tmplt_priv;
2349 	struct fl_flow_key *key, *mask;
2350 	struct nlattr *nest;
2351 
2352 	nest = nla_nest_start(skb, TCA_OPTIONS);
2353 	if (!nest)
2354 		goto nla_put_failure;
2355 
2356 	key = &tmplt->dummy_key;
2357 	mask = &tmplt->mask;
2358 
2359 	if (fl_dump_key(skb, net, key, mask))
2360 		goto nla_put_failure;
2361 
2362 	nla_nest_end(skb, nest);
2363 
2364 	return skb->len;
2365 
2366 nla_put_failure:
2367 	nla_nest_cancel(skb, nest);
2368 	return -EMSGSIZE;
2369 }
2370 
2371 static void fl_bind_class(void *fh, u32 classid, unsigned long cl)
2372 {
2373 	struct cls_fl_filter *f = fh;
2374 
2375 	if (f && f->res.classid == classid)
2376 		f->res.class = cl;
2377 }
2378 
2379 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
2380 	.kind		= "flower",
2381 	.classify	= fl_classify,
2382 	.init		= fl_init,
2383 	.destroy	= fl_destroy,
2384 	.get		= fl_get,
2385 	.put		= fl_put,
2386 	.change		= fl_change,
2387 	.delete		= fl_delete,
2388 	.walk		= fl_walk,
2389 	.reoffload	= fl_reoffload,
2390 	.dump		= fl_dump,
2391 	.bind_class	= fl_bind_class,
2392 	.tmplt_create	= fl_tmplt_create,
2393 	.tmplt_destroy	= fl_tmplt_destroy,
2394 	.tmplt_dump	= fl_tmplt_dump,
2395 	.owner		= THIS_MODULE,
2396 	.flags		= TCF_PROTO_OPS_DOIT_UNLOCKED,
2397 };
2398 
2399 static int __init cls_fl_init(void)
2400 {
2401 	return register_tcf_proto_ops(&cls_fl_ops);
2402 }
2403 
2404 static void __exit cls_fl_exit(void)
2405 {
2406 	unregister_tcf_proto_ops(&cls_fl_ops);
2407 }
2408 
2409 module_init(cls_fl_init);
2410 module_exit(cls_fl_exit);
2411 
2412 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
2413 MODULE_DESCRIPTION("Flower classifier");
2414 MODULE_LICENSE("GPL v2");
2415