1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/cls_flower.c Flower classifier
4 *
5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/rhashtable.h>
12 #include <linux/workqueue.h>
13 #include <linux/refcount.h>
14 #include <linux/bitfield.h>
15
16 #include <linux/if_ether.h>
17 #include <linux/in6.h>
18 #include <linux/ip.h>
19 #include <linux/mpls.h>
20 #include <linux/ppp_defs.h>
21
22 #include <net/sch_generic.h>
23 #include <net/pkt_cls.h>
24 #include <net/pkt_sched.h>
25 #include <net/ip.h>
26 #include <net/flow_dissector.h>
27 #include <net/geneve.h>
28 #include <net/vxlan.h>
29 #include <net/erspan.h>
30 #include <net/gtp.h>
31 #include <net/tc_wrapper.h>
32
33 #include <net/dst.h>
34 #include <net/dst_metadata.h>
35
36 #include <uapi/linux/netfilter/nf_conntrack_common.h>
37
38 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \
39 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1)
40 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \
41 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1)
42
43 struct fl_flow_key {
44 struct flow_dissector_key_meta meta;
45 struct flow_dissector_key_control control;
46 struct flow_dissector_key_control enc_control;
47 struct flow_dissector_key_basic basic;
48 struct flow_dissector_key_eth_addrs eth;
49 struct flow_dissector_key_vlan vlan;
50 struct flow_dissector_key_vlan cvlan;
51 union {
52 struct flow_dissector_key_ipv4_addrs ipv4;
53 struct flow_dissector_key_ipv6_addrs ipv6;
54 };
55 struct flow_dissector_key_ports tp;
56 struct flow_dissector_key_icmp icmp;
57 struct flow_dissector_key_arp arp;
58 struct flow_dissector_key_keyid enc_key_id;
59 union {
60 struct flow_dissector_key_ipv4_addrs enc_ipv4;
61 struct flow_dissector_key_ipv6_addrs enc_ipv6;
62 };
63 struct flow_dissector_key_ports enc_tp;
64 struct flow_dissector_key_mpls mpls;
65 struct flow_dissector_key_tcp tcp;
66 struct flow_dissector_key_ip ip;
67 struct flow_dissector_key_ip enc_ip;
68 struct flow_dissector_key_enc_opts enc_opts;
69 struct flow_dissector_key_ports_range tp_range;
70 struct flow_dissector_key_ct ct;
71 struct flow_dissector_key_hash hash;
72 struct flow_dissector_key_num_of_vlans num_of_vlans;
73 struct flow_dissector_key_pppoe pppoe;
74 struct flow_dissector_key_l2tpv3 l2tpv3;
75 struct flow_dissector_key_ipsec ipsec;
76 struct flow_dissector_key_cfm cfm;
77 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
78
79 struct fl_flow_mask_range {
80 unsigned short int start;
81 unsigned short int end;
82 };
83
84 struct fl_flow_mask {
85 struct fl_flow_key key;
86 struct fl_flow_mask_range range;
87 u32 flags;
88 struct rhash_head ht_node;
89 struct rhashtable ht;
90 struct rhashtable_params filter_ht_params;
91 struct flow_dissector dissector;
92 struct list_head filters;
93 struct rcu_work rwork;
94 struct list_head list;
95 refcount_t refcnt;
96 };
97
98 struct fl_flow_tmplt {
99 struct fl_flow_key dummy_key;
100 struct fl_flow_key mask;
101 struct flow_dissector dissector;
102 struct tcf_chain *chain;
103 };
104
105 struct cls_fl_head {
106 struct rhashtable ht;
107 spinlock_t masks_lock; /* Protect masks list */
108 struct list_head masks;
109 struct list_head hw_filters;
110 struct rcu_work rwork;
111 struct idr handle_idr;
112 };
113
114 struct cls_fl_filter {
115 struct fl_flow_mask *mask;
116 struct rhash_head ht_node;
117 struct fl_flow_key mkey;
118 struct tcf_exts exts;
119 struct tcf_result res;
120 struct fl_flow_key key;
121 struct list_head list;
122 struct list_head hw_list;
123 u32 handle;
124 u32 flags;
125 u32 in_hw_count;
126 u8 needs_tc_skb_ext:1;
127 struct rcu_work rwork;
128 struct net_device *hw_dev;
129 /* Flower classifier is unlocked, which means that its reference counter
130 * can be changed concurrently without any kind of external
131 * synchronization. Use atomic reference counter to be concurrency-safe.
132 */
133 refcount_t refcnt;
134 bool deleted;
135 };
136
137 static const struct rhashtable_params mask_ht_params = {
138 .key_offset = offsetof(struct fl_flow_mask, key),
139 .key_len = sizeof(struct fl_flow_key),
140 .head_offset = offsetof(struct fl_flow_mask, ht_node),
141 .automatic_shrinking = true,
142 };
143
fl_mask_range(const struct fl_flow_mask * mask)144 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask)
145 {
146 return mask->range.end - mask->range.start;
147 }
148
fl_mask_update_range(struct fl_flow_mask * mask)149 static void fl_mask_update_range(struct fl_flow_mask *mask)
150 {
151 const u8 *bytes = (const u8 *) &mask->key;
152 size_t size = sizeof(mask->key);
153 size_t i, first = 0, last;
154
155 for (i = 0; i < size; i++) {
156 if (bytes[i]) {
157 first = i;
158 break;
159 }
160 }
161 last = first;
162 for (i = size - 1; i != first; i--) {
163 if (bytes[i]) {
164 last = i;
165 break;
166 }
167 }
168 mask->range.start = rounddown(first, sizeof(long));
169 mask->range.end = roundup(last + 1, sizeof(long));
170 }
171
fl_key_get_start(struct fl_flow_key * key,const struct fl_flow_mask * mask)172 static void *fl_key_get_start(struct fl_flow_key *key,
173 const struct fl_flow_mask *mask)
174 {
175 return (u8 *) key + mask->range.start;
176 }
177
fl_set_masked_key(struct fl_flow_key * mkey,struct fl_flow_key * key,struct fl_flow_mask * mask)178 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key,
179 struct fl_flow_mask *mask)
180 {
181 const long *lkey = fl_key_get_start(key, mask);
182 const long *lmask = fl_key_get_start(&mask->key, mask);
183 long *lmkey = fl_key_get_start(mkey, mask);
184 int i;
185
186 for (i = 0; i < fl_mask_range(mask); i += sizeof(long))
187 *lmkey++ = *lkey++ & *lmask++;
188 }
189
fl_mask_fits_tmplt(struct fl_flow_tmplt * tmplt,struct fl_flow_mask * mask)190 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt,
191 struct fl_flow_mask *mask)
192 {
193 const long *lmask = fl_key_get_start(&mask->key, mask);
194 const long *ltmplt;
195 int i;
196
197 if (!tmplt)
198 return true;
199 ltmplt = fl_key_get_start(&tmplt->mask, mask);
200 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) {
201 if (~*ltmplt++ & *lmask++)
202 return false;
203 }
204 return true;
205 }
206
fl_clear_masked_range(struct fl_flow_key * key,struct fl_flow_mask * mask)207 static void fl_clear_masked_range(struct fl_flow_key *key,
208 struct fl_flow_mask *mask)
209 {
210 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
211 }
212
fl_range_port_dst_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)213 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
214 struct fl_flow_key *key,
215 struct fl_flow_key *mkey)
216 {
217 u16 min_mask, max_mask, min_val, max_val;
218
219 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst);
220 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst);
221 min_val = ntohs(filter->key.tp_range.tp_min.dst);
222 max_val = ntohs(filter->key.tp_range.tp_max.dst);
223
224 if (min_mask && max_mask) {
225 if (ntohs(key->tp_range.tp.dst) < min_val ||
226 ntohs(key->tp_range.tp.dst) > max_val)
227 return false;
228
229 /* skb does not have min and max values */
230 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst;
231 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst;
232 }
233 return true;
234 }
235
fl_range_port_src_cmp(struct cls_fl_filter * filter,struct fl_flow_key * key,struct fl_flow_key * mkey)236 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
237 struct fl_flow_key *key,
238 struct fl_flow_key *mkey)
239 {
240 u16 min_mask, max_mask, min_val, max_val;
241
242 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src);
243 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src);
244 min_val = ntohs(filter->key.tp_range.tp_min.src);
245 max_val = ntohs(filter->key.tp_range.tp_max.src);
246
247 if (min_mask && max_mask) {
248 if (ntohs(key->tp_range.tp.src) < min_val ||
249 ntohs(key->tp_range.tp.src) > max_val)
250 return false;
251
252 /* skb does not have min and max values */
253 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src;
254 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src;
255 }
256 return true;
257 }
258
__fl_lookup(struct fl_flow_mask * mask,struct fl_flow_key * mkey)259 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
260 struct fl_flow_key *mkey)
261 {
262 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
263 mask->filter_ht_params);
264 }
265
fl_lookup_range(struct fl_flow_mask * mask,struct fl_flow_key * mkey,struct fl_flow_key * key)266 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
267 struct fl_flow_key *mkey,
268 struct fl_flow_key *key)
269 {
270 struct cls_fl_filter *filter, *f;
271
272 list_for_each_entry_rcu(filter, &mask->filters, list) {
273 if (!fl_range_port_dst_cmp(filter, key, mkey))
274 continue;
275
276 if (!fl_range_port_src_cmp(filter, key, mkey))
277 continue;
278
279 f = __fl_lookup(mask, mkey);
280 if (f)
281 return f;
282 }
283 return NULL;
284 }
285
286 static noinline_for_stack
fl_mask_lookup(struct fl_flow_mask * mask,struct fl_flow_key * key)287 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key)
288 {
289 struct fl_flow_key mkey;
290
291 fl_set_masked_key(&mkey, key, mask);
292 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
293 return fl_lookup_range(mask, &mkey, key);
294
295 return __fl_lookup(mask, &mkey);
296 }
297
298 static u16 fl_ct_info_to_flower_map[] = {
299 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
300 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED,
301 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
302 TCA_FLOWER_KEY_CT_FLAGS_RELATED,
303 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
304 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
305 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
306 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
307 TCA_FLOWER_KEY_CT_FLAGS_RELATED |
308 TCA_FLOWER_KEY_CT_FLAGS_REPLY,
309 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
310 TCA_FLOWER_KEY_CT_FLAGS_NEW,
311 };
312
fl_classify(struct sk_buff * skb,const struct tcf_proto * tp,struct tcf_result * res)313 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb,
314 const struct tcf_proto *tp,
315 struct tcf_result *res)
316 {
317 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
318 bool post_ct = tc_skb_cb(skb)->post_ct;
319 u16 zone = tc_skb_cb(skb)->zone;
320 struct fl_flow_key skb_key;
321 struct fl_flow_mask *mask;
322 struct cls_fl_filter *f;
323
324 list_for_each_entry_rcu(mask, &head->masks, list) {
325 flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
326 fl_clear_masked_range(&skb_key, mask);
327
328 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
329 /* skb_flow_dissect() does not set n_proto in case an unknown
330 * protocol, so do it rather here.
331 */
332 skb_key.basic.n_proto = skb_protocol(skb, false);
333 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
334 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
335 fl_ct_info_to_flower_map,
336 ARRAY_SIZE(fl_ct_info_to_flower_map),
337 post_ct, zone);
338 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
339 skb_flow_dissect(skb, &mask->dissector, &skb_key,
340 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
341
342 f = fl_mask_lookup(mask, &skb_key);
343 if (f && !tc_skip_sw(f->flags)) {
344 *res = f->res;
345 return tcf_exts_exec(skb, &f->exts, res);
346 }
347 }
348 return -1;
349 }
350
fl_init(struct tcf_proto * tp)351 static int fl_init(struct tcf_proto *tp)
352 {
353 struct cls_fl_head *head;
354
355 head = kzalloc(sizeof(*head), GFP_KERNEL);
356 if (!head)
357 return -ENOBUFS;
358
359 spin_lock_init(&head->masks_lock);
360 INIT_LIST_HEAD_RCU(&head->masks);
361 INIT_LIST_HEAD(&head->hw_filters);
362 rcu_assign_pointer(tp->root, head);
363 idr_init(&head->handle_idr);
364
365 return rhashtable_init(&head->ht, &mask_ht_params);
366 }
367
fl_mask_free(struct fl_flow_mask * mask,bool mask_init_done)368 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done)
369 {
370 /* temporary masks don't have their filters list and ht initialized */
371 if (mask_init_done) {
372 WARN_ON(!list_empty(&mask->filters));
373 rhashtable_destroy(&mask->ht);
374 }
375 kfree(mask);
376 }
377
fl_mask_free_work(struct work_struct * work)378 static void fl_mask_free_work(struct work_struct *work)
379 {
380 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
381 struct fl_flow_mask, rwork);
382
383 fl_mask_free(mask, true);
384 }
385
fl_uninit_mask_free_work(struct work_struct * work)386 static void fl_uninit_mask_free_work(struct work_struct *work)
387 {
388 struct fl_flow_mask *mask = container_of(to_rcu_work(work),
389 struct fl_flow_mask, rwork);
390
391 fl_mask_free(mask, false);
392 }
393
fl_mask_put(struct cls_fl_head * head,struct fl_flow_mask * mask)394 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask)
395 {
396 if (!refcount_dec_and_test(&mask->refcnt))
397 return false;
398
399 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
400
401 spin_lock(&head->masks_lock);
402 list_del_rcu(&mask->list);
403 spin_unlock(&head->masks_lock);
404
405 tcf_queue_work(&mask->rwork, fl_mask_free_work);
406
407 return true;
408 }
409
fl_head_dereference(struct tcf_proto * tp)410 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp)
411 {
412 /* Flower classifier only changes root pointer during init and destroy.
413 * Users must obtain reference to tcf_proto instance before calling its
414 * API, so tp->root pointer is protected from concurrent call to
415 * fl_destroy() by reference counting.
416 */
417 return rcu_dereference_raw(tp->root);
418 }
419
__fl_destroy_filter(struct cls_fl_filter * f)420 static void __fl_destroy_filter(struct cls_fl_filter *f)
421 {
422 if (f->needs_tc_skb_ext)
423 tc_skb_ext_tc_disable();
424 tcf_exts_destroy(&f->exts);
425 tcf_exts_put_net(&f->exts);
426 kfree(f);
427 }
428
fl_destroy_filter_work(struct work_struct * work)429 static void fl_destroy_filter_work(struct work_struct *work)
430 {
431 struct cls_fl_filter *f = container_of(to_rcu_work(work),
432 struct cls_fl_filter, rwork);
433
434 __fl_destroy_filter(f);
435 }
436
fl_hw_destroy_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)437 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
438 bool rtnl_held, struct netlink_ext_ack *extack)
439 {
440 struct tcf_block *block = tp->chain->block;
441 struct flow_cls_offload cls_flower = {};
442
443 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
444 cls_flower.command = FLOW_CLS_DESTROY;
445 cls_flower.cookie = (unsigned long) f;
446
447 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false,
448 &f->flags, &f->in_hw_count, rtnl_held);
449
450 }
451
fl_hw_replace_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held,struct netlink_ext_ack * extack)452 static int fl_hw_replace_filter(struct tcf_proto *tp,
453 struct cls_fl_filter *f, bool rtnl_held,
454 struct netlink_ext_ack *extack)
455 {
456 struct tcf_block *block = tp->chain->block;
457 struct flow_cls_offload cls_flower = {};
458 bool skip_sw = tc_skip_sw(f->flags);
459 int err = 0;
460
461 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts));
462 if (!cls_flower.rule)
463 return -ENOMEM;
464
465 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack);
466 cls_flower.command = FLOW_CLS_REPLACE;
467 cls_flower.cookie = (unsigned long) f;
468 cls_flower.rule->match.dissector = &f->mask->dissector;
469 cls_flower.rule->match.mask = &f->mask->key;
470 cls_flower.rule->match.key = &f->mkey;
471 cls_flower.classid = f->res.classid;
472
473 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
474 cls_flower.common.extack);
475 if (err) {
476 kfree(cls_flower.rule);
477
478 return skip_sw ? err : 0;
479 }
480
481 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
482 skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
483 tc_cleanup_offload_action(&cls_flower.rule->action);
484 kfree(cls_flower.rule);
485
486 if (err) {
487 fl_hw_destroy_filter(tp, f, rtnl_held, NULL);
488 return err;
489 }
490
491 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
492 return -EINVAL;
493
494 return 0;
495 }
496
fl_hw_update_stats(struct tcf_proto * tp,struct cls_fl_filter * f,bool rtnl_held)497 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
498 bool rtnl_held)
499 {
500 struct tcf_block *block = tp->chain->block;
501 struct flow_cls_offload cls_flower = {};
502
503 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL);
504 cls_flower.command = FLOW_CLS_STATS;
505 cls_flower.cookie = (unsigned long) f;
506 cls_flower.classid = f->res.classid;
507
508 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
509 rtnl_held);
510
511 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats);
512 }
513
__fl_put(struct cls_fl_filter * f)514 static void __fl_put(struct cls_fl_filter *f)
515 {
516 if (!refcount_dec_and_test(&f->refcnt))
517 return;
518
519 if (tcf_exts_get_net(&f->exts))
520 tcf_queue_work(&f->rwork, fl_destroy_filter_work);
521 else
522 __fl_destroy_filter(f);
523 }
524
__fl_get(struct cls_fl_head * head,u32 handle)525 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
526 {
527 struct cls_fl_filter *f;
528
529 rcu_read_lock();
530 f = idr_find(&head->handle_idr, handle);
531 if (f && !refcount_inc_not_zero(&f->refcnt))
532 f = NULL;
533 rcu_read_unlock();
534
535 return f;
536 }
537
fl_get_exts(const struct tcf_proto * tp,u32 handle)538 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle)
539 {
540 struct cls_fl_head *head = rcu_dereference_bh(tp->root);
541 struct cls_fl_filter *f;
542
543 f = idr_find(&head->handle_idr, handle);
544 return f ? &f->exts : NULL;
545 }
546
__fl_delete(struct tcf_proto * tp,struct cls_fl_filter * f,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)547 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
548 bool *last, bool rtnl_held,
549 struct netlink_ext_ack *extack)
550 {
551 struct cls_fl_head *head = fl_head_dereference(tp);
552
553 *last = false;
554
555 spin_lock(&tp->lock);
556 if (f->deleted) {
557 spin_unlock(&tp->lock);
558 return -ENOENT;
559 }
560
561 f->deleted = true;
562 rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
563 f->mask->filter_ht_params);
564 idr_remove(&head->handle_idr, f->handle);
565 list_del_rcu(&f->list);
566 spin_unlock(&tp->lock);
567
568 *last = fl_mask_put(head, f->mask);
569 if (!tc_skip_hw(f->flags))
570 fl_hw_destroy_filter(tp, f, rtnl_held, extack);
571 tcf_unbind_filter(tp, &f->res);
572 __fl_put(f);
573
574 return 0;
575 }
576
fl_destroy_sleepable(struct work_struct * work)577 static void fl_destroy_sleepable(struct work_struct *work)
578 {
579 struct cls_fl_head *head = container_of(to_rcu_work(work),
580 struct cls_fl_head,
581 rwork);
582
583 rhashtable_destroy(&head->ht);
584 kfree(head);
585 module_put(THIS_MODULE);
586 }
587
fl_destroy(struct tcf_proto * tp,bool rtnl_held,struct netlink_ext_ack * extack)588 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
589 struct netlink_ext_ack *extack)
590 {
591 struct cls_fl_head *head = fl_head_dereference(tp);
592 struct fl_flow_mask *mask, *next_mask;
593 struct cls_fl_filter *f, *next;
594 bool last;
595
596 list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
597 list_for_each_entry_safe(f, next, &mask->filters, list) {
598 __fl_delete(tp, f, &last, rtnl_held, extack);
599 if (last)
600 break;
601 }
602 }
603 idr_destroy(&head->handle_idr);
604
605 __module_get(THIS_MODULE);
606 tcf_queue_work(&head->rwork, fl_destroy_sleepable);
607 }
608
fl_put(struct tcf_proto * tp,void * arg)609 static void fl_put(struct tcf_proto *tp, void *arg)
610 {
611 struct cls_fl_filter *f = arg;
612
613 __fl_put(f);
614 }
615
fl_get(struct tcf_proto * tp,u32 handle)616 static void *fl_get(struct tcf_proto *tp, u32 handle)
617 {
618 struct cls_fl_head *head = fl_head_dereference(tp);
619
620 return __fl_get(head, handle);
621 }
622
623 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
624 [TCA_FLOWER_UNSPEC] = { .strict_start_type =
625 TCA_FLOWER_L2_MISS },
626 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 },
627 [TCA_FLOWER_INDEV] = { .type = NLA_STRING,
628 .len = IFNAMSIZ },
629 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN },
630 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN },
631 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN },
632 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN },
633 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 },
634 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 },
635 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 },
636 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 },
637 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 },
638 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 },
639 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
640 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
641 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) },
642 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
643 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 },
644 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 },
645 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 },
646 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 },
647 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 },
648 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 },
649 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 },
650 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 },
651 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 },
652 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 },
653 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 },
654 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 },
655 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
656 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) },
657 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) },
658 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) },
659 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 },
660 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 },
661 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 },
662 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 },
663 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 },
664 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 },
665 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 },
666 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 },
667 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 },
668 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 },
669 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 },
670 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 },
671 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 },
672 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 },
673 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 },
674 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 },
675 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 },
676 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 },
677 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 },
678 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
679 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
680 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
681 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
682 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
683 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
684 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
685 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
686 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
687 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
688 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
689 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
690 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
691 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 },
692 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 },
693 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 },
694 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 },
695 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED },
696 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 },
697 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 },
698 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 },
699 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 },
700 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 },
701 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 },
702 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 },
703 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 },
704 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 },
705 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 },
706 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 },
707 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 },
708 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 },
709 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED },
710 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED },
711 [TCA_FLOWER_KEY_CT_STATE] =
712 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
713 [TCA_FLOWER_KEY_CT_STATE_MASK] =
714 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK),
715 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 },
716 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 },
717 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 },
718 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 },
719 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY,
720 .len = 128 / BITS_PER_BYTE },
721 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
722 .len = 128 / BITS_PER_BYTE },
723 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
724 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 },
725 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 },
726 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 },
727 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 },
728 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 },
729 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 },
730 [TCA_FLOWER_KEY_SPI] = { .type = NLA_U32 },
731 [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 },
732 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1),
733 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED },
734 };
735
736 static const struct nla_policy
737 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = {
738 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = {
739 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN },
740 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED },
741 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED },
742 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED },
743 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED },
744 };
745
746 static const struct nla_policy
747 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
748 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
749 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
750 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
751 .len = 128 },
752 };
753
754 static const struct nla_policy
755 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = {
756 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 },
757 };
758
759 static const struct nla_policy
760 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = {
761 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 },
762 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 },
763 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 },
764 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 },
765 };
766
767 static const struct nla_policy
768 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = {
769 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 },
770 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 },
771 };
772
773 static const struct nla_policy
774 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = {
775 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 },
776 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 },
777 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 },
778 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 },
779 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 },
780 };
781
782 static const struct nla_policy
783 cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = {
784 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8,
785 FLOW_DIS_CFM_MDL_MAX),
786 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 },
787 };
788
fl_set_key_val(struct nlattr ** tb,void * val,int val_type,void * mask,int mask_type,int len)789 static void fl_set_key_val(struct nlattr **tb,
790 void *val, int val_type,
791 void *mask, int mask_type, int len)
792 {
793 if (!tb[val_type])
794 return;
795 nla_memcpy(val, tb[val_type], len);
796 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type])
797 memset(mask, 0xff, len);
798 else
799 nla_memcpy(mask, tb[mask_type], len);
800 }
801
fl_set_key_spi(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)802 static int fl_set_key_spi(struct nlattr **tb, struct fl_flow_key *key,
803 struct fl_flow_key *mask,
804 struct netlink_ext_ack *extack)
805 {
806 if (key->basic.ip_proto != IPPROTO_ESP &&
807 key->basic.ip_proto != IPPROTO_AH) {
808 NL_SET_ERR_MSG(extack,
809 "Protocol must be either ESP or AH");
810 return -EINVAL;
811 }
812
813 fl_set_key_val(tb, &key->ipsec.spi,
814 TCA_FLOWER_KEY_SPI,
815 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
816 sizeof(key->ipsec.spi));
817 return 0;
818 }
819
fl_set_key_port_range(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)820 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
821 struct fl_flow_key *mask,
822 struct netlink_ext_ack *extack)
823 {
824 fl_set_key_val(tb, &key->tp_range.tp_min.dst,
825 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst,
826 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst));
827 fl_set_key_val(tb, &key->tp_range.tp_max.dst,
828 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst,
829 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst));
830 fl_set_key_val(tb, &key->tp_range.tp_min.src,
831 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src,
832 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src));
833 fl_set_key_val(tb, &key->tp_range.tp_max.src,
834 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src,
835 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src));
836
837 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) {
838 NL_SET_ERR_MSG(extack,
839 "Both min and max destination ports must be specified");
840 return -EINVAL;
841 }
842 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) {
843 NL_SET_ERR_MSG(extack,
844 "Both min and max source ports must be specified");
845 return -EINVAL;
846 }
847 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst &&
848 ntohs(key->tp_range.tp_max.dst) <=
849 ntohs(key->tp_range.tp_min.dst)) {
850 NL_SET_ERR_MSG_ATTR(extack,
851 tb[TCA_FLOWER_KEY_PORT_DST_MIN],
852 "Invalid destination port range (min must be strictly smaller than max)");
853 return -EINVAL;
854 }
855 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src &&
856 ntohs(key->tp_range.tp_max.src) <=
857 ntohs(key->tp_range.tp_min.src)) {
858 NL_SET_ERR_MSG_ATTR(extack,
859 tb[TCA_FLOWER_KEY_PORT_SRC_MIN],
860 "Invalid source port range (min must be strictly smaller than max)");
861 return -EINVAL;
862 }
863
864 return 0;
865 }
866
fl_set_key_mpls_lse(const struct nlattr * nla_lse,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)867 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse,
868 struct flow_dissector_key_mpls *key_val,
869 struct flow_dissector_key_mpls *key_mask,
870 struct netlink_ext_ack *extack)
871 {
872 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1];
873 struct flow_dissector_mpls_lse *lse_mask;
874 struct flow_dissector_mpls_lse *lse_val;
875 u8 lse_index;
876 u8 depth;
877 int err;
878
879 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse,
880 mpls_stack_entry_policy, extack);
881 if (err < 0)
882 return err;
883
884 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) {
885 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\"");
886 return -EINVAL;
887 }
888
889 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]);
890
891 /* LSE depth starts at 1, for consistency with terminology used by
892 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets.
893 */
894 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) {
895 NL_SET_ERR_MSG_ATTR(extack,
896 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH],
897 "Invalid MPLS depth");
898 return -EINVAL;
899 }
900 lse_index = depth - 1;
901
902 dissector_set_mpls_lse(key_val, lse_index);
903 dissector_set_mpls_lse(key_mask, lse_index);
904
905 lse_val = &key_val->ls[lse_index];
906 lse_mask = &key_mask->ls[lse_index];
907
908 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) {
909 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]);
910 lse_mask->mpls_ttl = MPLS_TTL_MASK;
911 }
912 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) {
913 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]);
914
915 if (bos & ~MPLS_BOS_MASK) {
916 NL_SET_ERR_MSG_ATTR(extack,
917 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS],
918 "Bottom Of Stack (BOS) must be 0 or 1");
919 return -EINVAL;
920 }
921 lse_val->mpls_bos = bos;
922 lse_mask->mpls_bos = MPLS_BOS_MASK;
923 }
924 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) {
925 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]);
926
927 if (tc & ~MPLS_TC_MASK) {
928 NL_SET_ERR_MSG_ATTR(extack,
929 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC],
930 "Traffic Class (TC) must be between 0 and 7");
931 return -EINVAL;
932 }
933 lse_val->mpls_tc = tc;
934 lse_mask->mpls_tc = MPLS_TC_MASK;
935 }
936 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) {
937 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]);
938
939 if (label & ~MPLS_LABEL_MASK) {
940 NL_SET_ERR_MSG_ATTR(extack,
941 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL],
942 "Label must be between 0 and 1048575");
943 return -EINVAL;
944 }
945 lse_val->mpls_label = label;
946 lse_mask->mpls_label = MPLS_LABEL_MASK;
947 }
948
949 return 0;
950 }
951
fl_set_key_mpls_opts(const struct nlattr * nla_mpls_opts,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)952 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts,
953 struct flow_dissector_key_mpls *key_val,
954 struct flow_dissector_key_mpls *key_mask,
955 struct netlink_ext_ack *extack)
956 {
957 struct nlattr *nla_lse;
958 int rem;
959 int err;
960
961 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) {
962 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts,
963 "NLA_F_NESTED is missing");
964 return -EINVAL;
965 }
966
967 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) {
968 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) {
969 NL_SET_ERR_MSG_ATTR(extack, nla_lse,
970 "Invalid MPLS option type");
971 return -EINVAL;
972 }
973
974 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack);
975 if (err < 0)
976 return err;
977 }
978 if (rem) {
979 NL_SET_ERR_MSG(extack,
980 "Bytes leftover after parsing MPLS options");
981 return -EINVAL;
982 }
983
984 return 0;
985 }
986
fl_set_key_mpls(struct nlattr ** tb,struct flow_dissector_key_mpls * key_val,struct flow_dissector_key_mpls * key_mask,struct netlink_ext_ack * extack)987 static int fl_set_key_mpls(struct nlattr **tb,
988 struct flow_dissector_key_mpls *key_val,
989 struct flow_dissector_key_mpls *key_mask,
990 struct netlink_ext_ack *extack)
991 {
992 struct flow_dissector_mpls_lse *lse_mask;
993 struct flow_dissector_mpls_lse *lse_val;
994
995 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) {
996 if (tb[TCA_FLOWER_KEY_MPLS_TTL] ||
997 tb[TCA_FLOWER_KEY_MPLS_BOS] ||
998 tb[TCA_FLOWER_KEY_MPLS_TC] ||
999 tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1000 NL_SET_ERR_MSG_ATTR(extack,
1001 tb[TCA_FLOWER_KEY_MPLS_OPTS],
1002 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute");
1003 return -EBADMSG;
1004 }
1005
1006 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS],
1007 key_val, key_mask, extack);
1008 }
1009
1010 lse_val = &key_val->ls[0];
1011 lse_mask = &key_mask->ls[0];
1012
1013 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) {
1014 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]);
1015 lse_mask->mpls_ttl = MPLS_TTL_MASK;
1016 dissector_set_mpls_lse(key_val, 0);
1017 dissector_set_mpls_lse(key_mask, 0);
1018 }
1019 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) {
1020 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]);
1021
1022 if (bos & ~MPLS_BOS_MASK) {
1023 NL_SET_ERR_MSG_ATTR(extack,
1024 tb[TCA_FLOWER_KEY_MPLS_BOS],
1025 "Bottom Of Stack (BOS) must be 0 or 1");
1026 return -EINVAL;
1027 }
1028 lse_val->mpls_bos = bos;
1029 lse_mask->mpls_bos = MPLS_BOS_MASK;
1030 dissector_set_mpls_lse(key_val, 0);
1031 dissector_set_mpls_lse(key_mask, 0);
1032 }
1033 if (tb[TCA_FLOWER_KEY_MPLS_TC]) {
1034 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]);
1035
1036 if (tc & ~MPLS_TC_MASK) {
1037 NL_SET_ERR_MSG_ATTR(extack,
1038 tb[TCA_FLOWER_KEY_MPLS_TC],
1039 "Traffic Class (TC) must be between 0 and 7");
1040 return -EINVAL;
1041 }
1042 lse_val->mpls_tc = tc;
1043 lse_mask->mpls_tc = MPLS_TC_MASK;
1044 dissector_set_mpls_lse(key_val, 0);
1045 dissector_set_mpls_lse(key_mask, 0);
1046 }
1047 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) {
1048 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]);
1049
1050 if (label & ~MPLS_LABEL_MASK) {
1051 NL_SET_ERR_MSG_ATTR(extack,
1052 tb[TCA_FLOWER_KEY_MPLS_LABEL],
1053 "Label must be between 0 and 1048575");
1054 return -EINVAL;
1055 }
1056 lse_val->mpls_label = label;
1057 lse_mask->mpls_label = MPLS_LABEL_MASK;
1058 dissector_set_mpls_lse(key_val, 0);
1059 dissector_set_mpls_lse(key_mask, 0);
1060 }
1061 return 0;
1062 }
1063
fl_set_key_vlan(struct nlattr ** tb,__be16 ethertype,int vlan_id_key,int vlan_prio_key,int vlan_next_eth_type_key,struct flow_dissector_key_vlan * key_val,struct flow_dissector_key_vlan * key_mask)1064 static void fl_set_key_vlan(struct nlattr **tb,
1065 __be16 ethertype,
1066 int vlan_id_key, int vlan_prio_key,
1067 int vlan_next_eth_type_key,
1068 struct flow_dissector_key_vlan *key_val,
1069 struct flow_dissector_key_vlan *key_mask)
1070 {
1071 #define VLAN_PRIORITY_MASK 0x7
1072
1073 if (tb[vlan_id_key]) {
1074 key_val->vlan_id =
1075 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK;
1076 key_mask->vlan_id = VLAN_VID_MASK;
1077 }
1078 if (tb[vlan_prio_key]) {
1079 key_val->vlan_priority =
1080 nla_get_u8(tb[vlan_prio_key]) &
1081 VLAN_PRIORITY_MASK;
1082 key_mask->vlan_priority = VLAN_PRIORITY_MASK;
1083 }
1084 if (ethertype) {
1085 key_val->vlan_tpid = ethertype;
1086 key_mask->vlan_tpid = cpu_to_be16(~0);
1087 }
1088 if (tb[vlan_next_eth_type_key]) {
1089 key_val->vlan_eth_type =
1090 nla_get_be16(tb[vlan_next_eth_type_key]);
1091 key_mask->vlan_eth_type = cpu_to_be16(~0);
1092 }
1093 }
1094
fl_set_key_pppoe(struct nlattr ** tb,struct flow_dissector_key_pppoe * key_val,struct flow_dissector_key_pppoe * key_mask,struct fl_flow_key * key,struct fl_flow_key * mask)1095 static void fl_set_key_pppoe(struct nlattr **tb,
1096 struct flow_dissector_key_pppoe *key_val,
1097 struct flow_dissector_key_pppoe *key_mask,
1098 struct fl_flow_key *key,
1099 struct fl_flow_key *mask)
1100 {
1101 /* key_val::type must be set to ETH_P_PPP_SES
1102 * because ETH_P_PPP_SES was stored in basic.n_proto
1103 * which might get overwritten by ppp_proto
1104 * or might be set to 0, the role of key_val::type
1105 * is similar to vlan_key::tpid
1106 */
1107 key_val->type = htons(ETH_P_PPP_SES);
1108 key_mask->type = cpu_to_be16(~0);
1109
1110 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) {
1111 key_val->session_id =
1112 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]);
1113 key_mask->session_id = cpu_to_be16(~0);
1114 }
1115 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) {
1116 key_val->ppp_proto =
1117 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]);
1118 key_mask->ppp_proto = cpu_to_be16(~0);
1119
1120 if (key_val->ppp_proto == htons(PPP_IP)) {
1121 key->basic.n_proto = htons(ETH_P_IP);
1122 mask->basic.n_proto = cpu_to_be16(~0);
1123 } else if (key_val->ppp_proto == htons(PPP_IPV6)) {
1124 key->basic.n_proto = htons(ETH_P_IPV6);
1125 mask->basic.n_proto = cpu_to_be16(~0);
1126 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) {
1127 key->basic.n_proto = htons(ETH_P_MPLS_UC);
1128 mask->basic.n_proto = cpu_to_be16(~0);
1129 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) {
1130 key->basic.n_proto = htons(ETH_P_MPLS_MC);
1131 mask->basic.n_proto = cpu_to_be16(~0);
1132 }
1133 } else {
1134 key->basic.n_proto = 0;
1135 mask->basic.n_proto = cpu_to_be16(0);
1136 }
1137 }
1138
fl_set_key_flag(u32 flower_key,u32 flower_mask,u32 * dissector_key,u32 * dissector_mask,u32 flower_flag_bit,u32 dissector_flag_bit)1139 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
1140 u32 *dissector_key, u32 *dissector_mask,
1141 u32 flower_flag_bit, u32 dissector_flag_bit)
1142 {
1143 if (flower_mask & flower_flag_bit) {
1144 *dissector_mask |= dissector_flag_bit;
1145 if (flower_key & flower_flag_bit)
1146 *dissector_key |= dissector_flag_bit;
1147 }
1148 }
1149
fl_set_key_flags(struct nlattr ** tb,u32 * flags_key,u32 * flags_mask,struct netlink_ext_ack * extack)1150 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key,
1151 u32 *flags_mask, struct netlink_ext_ack *extack)
1152 {
1153 u32 key, mask;
1154
1155 /* mask is mandatory for flags */
1156 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) {
1157 NL_SET_ERR_MSG(extack, "Missing flags mask");
1158 return -EINVAL;
1159 }
1160
1161 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS]));
1162 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK]));
1163
1164 *flags_key = 0;
1165 *flags_mask = 0;
1166
1167 fl_set_key_flag(key, mask, flags_key, flags_mask,
1168 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
1169 fl_set_key_flag(key, mask, flags_key, flags_mask,
1170 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
1171 FLOW_DIS_FIRST_FRAG);
1172
1173 return 0;
1174 }
1175
fl_set_key_ip(struct nlattr ** tb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)1176 static void fl_set_key_ip(struct nlattr **tb, bool encap,
1177 struct flow_dissector_key_ip *key,
1178 struct flow_dissector_key_ip *mask)
1179 {
1180 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
1181 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
1182 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
1183 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
1184
1185 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos));
1186 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl));
1187 }
1188
fl_set_geneve_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1189 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
1190 int depth, int option_len,
1191 struct netlink_ext_ack *extack)
1192 {
1193 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1];
1194 struct nlattr *class = NULL, *type = NULL, *data = NULL;
1195 struct geneve_opt *opt;
1196 int err, data_len = 0;
1197
1198 if (option_len > sizeof(struct geneve_opt))
1199 data_len = option_len - sizeof(struct geneve_opt);
1200
1201 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
1202 return -ERANGE;
1203
1204 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
1205 memset(opt, 0xff, option_len);
1206 opt->length = data_len / 4;
1207 opt->r1 = 0;
1208 opt->r2 = 0;
1209 opt->r3 = 0;
1210
1211 /* If no mask has been prodived we assume an exact match. */
1212 if (!depth)
1213 return sizeof(struct geneve_opt) + data_len;
1214
1215 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) {
1216 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask");
1217 return -EINVAL;
1218 }
1219
1220 err = nla_parse_nested_deprecated(tb,
1221 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
1222 nla, geneve_opt_policy, extack);
1223 if (err < 0)
1224 return err;
1225
1226 /* We are not allowed to omit any of CLASS, TYPE or DATA
1227 * fields from the key.
1228 */
1229 if (!option_len &&
1230 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] ||
1231 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] ||
1232 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) {
1233 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
1234 return -EINVAL;
1235 }
1236
1237 /* Omitting any of CLASS, TYPE or DATA fields is allowed
1238 * for the mask.
1239 */
1240 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) {
1241 int new_len = key->enc_opts.len;
1242
1243 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA];
1244 data_len = nla_len(data);
1245 if (data_len < 4) {
1246 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
1247 return -ERANGE;
1248 }
1249 if (data_len % 4) {
1250 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
1251 return -ERANGE;
1252 }
1253
1254 new_len += sizeof(struct geneve_opt) + data_len;
1255 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX);
1256 if (new_len > FLOW_DIS_TUN_OPTS_MAX) {
1257 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size");
1258 return -ERANGE;
1259 }
1260 opt->length = data_len / 4;
1261 memcpy(opt->opt_data, nla_data(data), data_len);
1262 }
1263
1264 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) {
1265 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS];
1266 opt->opt_class = nla_get_be16(class);
1267 }
1268
1269 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) {
1270 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE];
1271 opt->type = nla_get_u8(type);
1272 }
1273
1274 return sizeof(struct geneve_opt) + data_len;
1275 }
1276
fl_set_vxlan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1277 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1278 int depth, int option_len,
1279 struct netlink_ext_ack *extack)
1280 {
1281 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1];
1282 struct vxlan_metadata *md;
1283 int err;
1284
1285 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1286 memset(md, 0xff, sizeof(*md));
1287
1288 if (!depth)
1289 return sizeof(*md);
1290
1291 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) {
1292 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask");
1293 return -EINVAL;
1294 }
1295
1296 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla,
1297 vxlan_opt_policy, extack);
1298 if (err < 0)
1299 return err;
1300
1301 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1302 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp");
1303 return -EINVAL;
1304 }
1305
1306 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) {
1307 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]);
1308 md->gbp &= VXLAN_GBP_MASK;
1309 }
1310
1311 return sizeof(*md);
1312 }
1313
fl_set_erspan_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1314 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
1315 int depth, int option_len,
1316 struct netlink_ext_ack *extack)
1317 {
1318 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1];
1319 struct erspan_metadata *md;
1320 int err;
1321
1322 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
1323 md->version = 1;
1324
1325 if (!depth)
1326 return sizeof(*md);
1327
1328 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) {
1329 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask");
1330 return -EINVAL;
1331 }
1332
1333 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla,
1334 erspan_opt_policy, extack);
1335 if (err < 0)
1336 return err;
1337
1338 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) {
1339 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver");
1340 return -EINVAL;
1341 }
1342
1343 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER])
1344 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]);
1345
1346 if (md->version == 1) {
1347 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1348 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
1349 return -EINVAL;
1350 }
1351 memset(&md->u.index, 0xff, sizeof(md->u.index));
1352 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
1353 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
1354 md->u.index = nla_get_be32(nla);
1355 }
1356 } else if (md->version == 2) {
1357 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] ||
1358 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) {
1359 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
1360 return -EINVAL;
1361 }
1362 md->u.md2.dir = 1;
1363 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
1364 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
1365 md->u.md2.dir = nla_get_u8(nla);
1366 }
1367 set_hwid(&md->u.md2, 0xff);
1368 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
1369 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
1370 set_hwid(&md->u.md2, nla_get_u8(nla));
1371 }
1372 } else {
1373 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect");
1374 return -EINVAL;
1375 }
1376
1377 return sizeof(*md);
1378 }
1379
fl_set_gtp_opt(const struct nlattr * nla,struct fl_flow_key * key,int depth,int option_len,struct netlink_ext_ack * extack)1380 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key,
1381 int depth, int option_len,
1382 struct netlink_ext_ack *extack)
1383 {
1384 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1];
1385 struct gtp_pdu_session_info *sinfo;
1386 u8 len = key->enc_opts.len;
1387 int err;
1388
1389 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len];
1390 memset(sinfo, 0xff, option_len);
1391
1392 if (!depth)
1393 return sizeof(*sinfo);
1394
1395 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) {
1396 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask");
1397 return -EINVAL;
1398 }
1399
1400 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla,
1401 gtp_opt_policy, extack);
1402 if (err < 0)
1403 return err;
1404
1405 if (!option_len &&
1406 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] ||
1407 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) {
1408 NL_SET_ERR_MSG_MOD(extack,
1409 "Missing tunnel key gtp option pdu type or qfi");
1410 return -EINVAL;
1411 }
1412
1413 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE])
1414 sinfo->pdu_type =
1415 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]);
1416
1417 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])
1418 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]);
1419
1420 return sizeof(*sinfo);
1421 }
1422
fl_set_enc_opt(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1423 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
1424 struct fl_flow_key *mask,
1425 struct netlink_ext_ack *extack)
1426 {
1427 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
1428 int err, option_len, key_depth, msk_depth = 0;
1429
1430 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS],
1431 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1432 enc_opts_policy, extack);
1433 if (err)
1434 return err;
1435
1436 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
1437
1438 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
1439 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
1440 TCA_FLOWER_KEY_ENC_OPTS_MAX,
1441 enc_opts_policy, extack);
1442 if (err)
1443 return err;
1444
1445 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1446 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
1447 if (!nla_ok(nla_opt_msk, msk_depth)) {
1448 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
1449 return -EINVAL;
1450 }
1451 }
1452
1453 nla_for_each_attr(nla_opt_key, nla_enc_key,
1454 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) {
1455 switch (nla_type(nla_opt_key)) {
1456 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE:
1457 if (key->enc_opts.dst_opt_type &&
1458 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) {
1459 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options");
1460 return -EINVAL;
1461 }
1462 option_len = 0;
1463 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1464 option_len = fl_set_geneve_opt(nla_opt_key, key,
1465 key_depth, option_len,
1466 extack);
1467 if (option_len < 0)
1468 return option_len;
1469
1470 key->enc_opts.len += option_len;
1471 /* At the same time we need to parse through the mask
1472 * in order to verify exact and mask attribute lengths.
1473 */
1474 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT;
1475 option_len = fl_set_geneve_opt(nla_opt_msk, mask,
1476 msk_depth, option_len,
1477 extack);
1478 if (option_len < 0)
1479 return option_len;
1480
1481 mask->enc_opts.len += option_len;
1482 if (key->enc_opts.len != mask->enc_opts.len) {
1483 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1484 return -EINVAL;
1485 }
1486 break;
1487 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
1488 if (key->enc_opts.dst_opt_type) {
1489 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options");
1490 return -EINVAL;
1491 }
1492 option_len = 0;
1493 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1494 option_len = fl_set_vxlan_opt(nla_opt_key, key,
1495 key_depth, option_len,
1496 extack);
1497 if (option_len < 0)
1498 return option_len;
1499
1500 key->enc_opts.len += option_len;
1501 /* At the same time we need to parse through the mask
1502 * in order to verify exact and mask attribute lengths.
1503 */
1504 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT;
1505 option_len = fl_set_vxlan_opt(nla_opt_msk, mask,
1506 msk_depth, option_len,
1507 extack);
1508 if (option_len < 0)
1509 return option_len;
1510
1511 mask->enc_opts.len += option_len;
1512 if (key->enc_opts.len != mask->enc_opts.len) {
1513 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1514 return -EINVAL;
1515 }
1516 break;
1517 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
1518 if (key->enc_opts.dst_opt_type) {
1519 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options");
1520 return -EINVAL;
1521 }
1522 option_len = 0;
1523 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1524 option_len = fl_set_erspan_opt(nla_opt_key, key,
1525 key_depth, option_len,
1526 extack);
1527 if (option_len < 0)
1528 return option_len;
1529
1530 key->enc_opts.len += option_len;
1531 /* At the same time we need to parse through the mask
1532 * in order to verify exact and mask attribute lengths.
1533 */
1534 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT;
1535 option_len = fl_set_erspan_opt(nla_opt_msk, mask,
1536 msk_depth, option_len,
1537 extack);
1538 if (option_len < 0)
1539 return option_len;
1540
1541 mask->enc_opts.len += option_len;
1542 if (key->enc_opts.len != mask->enc_opts.len) {
1543 NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
1544 return -EINVAL;
1545 }
1546 break;
1547 case TCA_FLOWER_KEY_ENC_OPTS_GTP:
1548 if (key->enc_opts.dst_opt_type) {
1549 NL_SET_ERR_MSG_MOD(extack,
1550 "Duplicate type for gtp options");
1551 return -EINVAL;
1552 }
1553 option_len = 0;
1554 key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1555 option_len = fl_set_gtp_opt(nla_opt_key, key,
1556 key_depth, option_len,
1557 extack);
1558 if (option_len < 0)
1559 return option_len;
1560
1561 key->enc_opts.len += option_len;
1562 /* At the same time we need to parse through the mask
1563 * in order to verify exact and mask attribute lengths.
1564 */
1565 mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT;
1566 option_len = fl_set_gtp_opt(nla_opt_msk, mask,
1567 msk_depth, option_len,
1568 extack);
1569 if (option_len < 0)
1570 return option_len;
1571
1572 mask->enc_opts.len += option_len;
1573 if (key->enc_opts.len != mask->enc_opts.len) {
1574 NL_SET_ERR_MSG_MOD(extack,
1575 "Key and mask miss aligned");
1576 return -EINVAL;
1577 }
1578 break;
1579 default:
1580 NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
1581 return -EINVAL;
1582 }
1583
1584 if (!msk_depth)
1585 continue;
1586
1587 if (!nla_ok(nla_opt_msk, msk_depth)) {
1588 NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
1589 return -EINVAL;
1590 }
1591 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
1592 }
1593
1594 return 0;
1595 }
1596
fl_validate_ct_state(u16 state,struct nlattr * tb,struct netlink_ext_ack * extack)1597 static int fl_validate_ct_state(u16 state, struct nlattr *tb,
1598 struct netlink_ext_ack *extack)
1599 {
1600 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) {
1601 NL_SET_ERR_MSG_ATTR(extack, tb,
1602 "no trk, so no other flag can be set");
1603 return -EINVAL;
1604 }
1605
1606 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1607 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) {
1608 NL_SET_ERR_MSG_ATTR(extack, tb,
1609 "new and est are mutually exclusive");
1610 return -EINVAL;
1611 }
1612
1613 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID &&
1614 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1615 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1616 NL_SET_ERR_MSG_ATTR(extack, tb,
1617 "when inv is set, only trk may be set");
1618 return -EINVAL;
1619 }
1620
1621 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW &&
1622 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) {
1623 NL_SET_ERR_MSG_ATTR(extack, tb,
1624 "new and rpl are mutually exclusive");
1625 return -EINVAL;
1626 }
1627
1628 return 0;
1629 }
1630
fl_set_key_ct(struct nlattr ** tb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask,struct netlink_ext_ack * extack)1631 static int fl_set_key_ct(struct nlattr **tb,
1632 struct flow_dissector_key_ct *key,
1633 struct flow_dissector_key_ct *mask,
1634 struct netlink_ext_ack *extack)
1635 {
1636 if (tb[TCA_FLOWER_KEY_CT_STATE]) {
1637 int err;
1638
1639 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) {
1640 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled");
1641 return -EOPNOTSUPP;
1642 }
1643 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
1644 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
1645 sizeof(key->ct_state));
1646
1647 err = fl_validate_ct_state(key->ct_state & mask->ct_state,
1648 tb[TCA_FLOWER_KEY_CT_STATE_MASK],
1649 extack);
1650 if (err)
1651 return err;
1652
1653 }
1654 if (tb[TCA_FLOWER_KEY_CT_ZONE]) {
1655 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1656 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled");
1657 return -EOPNOTSUPP;
1658 }
1659 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
1660 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
1661 sizeof(key->ct_zone));
1662 }
1663 if (tb[TCA_FLOWER_KEY_CT_MARK]) {
1664 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1665 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled");
1666 return -EOPNOTSUPP;
1667 }
1668 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
1669 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
1670 sizeof(key->ct_mark));
1671 }
1672 if (tb[TCA_FLOWER_KEY_CT_LABELS]) {
1673 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1674 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled");
1675 return -EOPNOTSUPP;
1676 }
1677 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
1678 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
1679 sizeof(key->ct_labels));
1680 }
1681
1682 return 0;
1683 }
1684
is_vlan_key(struct nlattr * tb,__be16 * ethertype,struct fl_flow_key * key,struct fl_flow_key * mask,int vthresh)1685 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype,
1686 struct fl_flow_key *key, struct fl_flow_key *mask,
1687 int vthresh)
1688 {
1689 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh;
1690
1691 if (!tb) {
1692 *ethertype = 0;
1693 return good_num_of_vlans;
1694 }
1695
1696 *ethertype = nla_get_be16(tb);
1697 if (good_num_of_vlans || eth_type_vlan(*ethertype))
1698 return true;
1699
1700 key->basic.n_proto = *ethertype;
1701 mask->basic.n_proto = cpu_to_be16(~0);
1702 return false;
1703 }
1704
fl_set_key_cfm_md_level(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1705 static void fl_set_key_cfm_md_level(struct nlattr **tb,
1706 struct fl_flow_key *key,
1707 struct fl_flow_key *mask,
1708 struct netlink_ext_ack *extack)
1709 {
1710 u8 level;
1711
1712 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL])
1713 return;
1714
1715 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]);
1716 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level);
1717 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK;
1718 }
1719
fl_set_key_cfm_opcode(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1720 static void fl_set_key_cfm_opcode(struct nlattr **tb,
1721 struct fl_flow_key *key,
1722 struct fl_flow_key *mask,
1723 struct netlink_ext_ack *extack)
1724 {
1725 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE,
1726 &mask->cfm.opcode, TCA_FLOWER_UNSPEC,
1727 sizeof(key->cfm.opcode));
1728 }
1729
fl_set_key_cfm(struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1730 static int fl_set_key_cfm(struct nlattr **tb,
1731 struct fl_flow_key *key,
1732 struct fl_flow_key *mask,
1733 struct netlink_ext_ack *extack)
1734 {
1735 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1];
1736 int err;
1737
1738 if (!tb[TCA_FLOWER_KEY_CFM])
1739 return 0;
1740
1741 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX,
1742 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack);
1743 if (err < 0)
1744 return err;
1745
1746 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack);
1747 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack);
1748
1749 return 0;
1750 }
1751
fl_set_key(struct net * net,struct nlattr ** tb,struct fl_flow_key * key,struct fl_flow_key * mask,struct netlink_ext_ack * extack)1752 static int fl_set_key(struct net *net, struct nlattr **tb,
1753 struct fl_flow_key *key, struct fl_flow_key *mask,
1754 struct netlink_ext_ack *extack)
1755 {
1756 __be16 ethertype;
1757 int ret = 0;
1758
1759 if (tb[TCA_FLOWER_INDEV]) {
1760 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack);
1761 if (err < 0)
1762 return err;
1763 key->meta.ingress_ifindex = err;
1764 mask->meta.ingress_ifindex = 0xffffffff;
1765 }
1766
1767 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS,
1768 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC,
1769 sizeof(key->meta.l2_miss));
1770
1771 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
1772 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
1773 sizeof(key->eth.dst));
1774 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
1775 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
1776 sizeof(key->eth.src));
1777 fl_set_key_val(tb, &key->num_of_vlans,
1778 TCA_FLOWER_KEY_NUM_OF_VLANS,
1779 &mask->num_of_vlans,
1780 TCA_FLOWER_UNSPEC,
1781 sizeof(key->num_of_vlans));
1782
1783 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) {
1784 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
1785 TCA_FLOWER_KEY_VLAN_PRIO,
1786 TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1787 &key->vlan, &mask->vlan);
1788
1789 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE],
1790 ðertype, key, mask, 1)) {
1791 fl_set_key_vlan(tb, ethertype,
1792 TCA_FLOWER_KEY_CVLAN_ID,
1793 TCA_FLOWER_KEY_CVLAN_PRIO,
1794 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1795 &key->cvlan, &mask->cvlan);
1796 fl_set_key_val(tb, &key->basic.n_proto,
1797 TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
1798 &mask->basic.n_proto,
1799 TCA_FLOWER_UNSPEC,
1800 sizeof(key->basic.n_proto));
1801 }
1802 }
1803
1804 if (key->basic.n_proto == htons(ETH_P_PPP_SES))
1805 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask);
1806
1807 if (key->basic.n_proto == htons(ETH_P_IP) ||
1808 key->basic.n_proto == htons(ETH_P_IPV6)) {
1809 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
1810 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
1811 sizeof(key->basic.ip_proto));
1812 fl_set_key_ip(tb, false, &key->ip, &mask->ip);
1813 }
1814
1815 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
1816 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1817 mask->control.addr_type = ~0;
1818 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
1819 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
1820 sizeof(key->ipv4.src));
1821 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
1822 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
1823 sizeof(key->ipv4.dst));
1824 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
1825 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1826 mask->control.addr_type = ~0;
1827 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
1828 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
1829 sizeof(key->ipv6.src));
1830 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
1831 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
1832 sizeof(key->ipv6.dst));
1833 }
1834
1835 if (key->basic.ip_proto == IPPROTO_TCP) {
1836 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
1837 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
1838 sizeof(key->tp.src));
1839 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
1840 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
1841 sizeof(key->tp.dst));
1842 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
1843 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
1844 sizeof(key->tcp.flags));
1845 } else if (key->basic.ip_proto == IPPROTO_UDP) {
1846 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
1847 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
1848 sizeof(key->tp.src));
1849 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
1850 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
1851 sizeof(key->tp.dst));
1852 } else if (key->basic.ip_proto == IPPROTO_SCTP) {
1853 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
1854 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
1855 sizeof(key->tp.src));
1856 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
1857 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
1858 sizeof(key->tp.dst));
1859 } else if (key->basic.n_proto == htons(ETH_P_IP) &&
1860 key->basic.ip_proto == IPPROTO_ICMP) {
1861 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE,
1862 &mask->icmp.type,
1863 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
1864 sizeof(key->icmp.type));
1865 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE,
1866 &mask->icmp.code,
1867 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
1868 sizeof(key->icmp.code));
1869 } else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
1870 key->basic.ip_proto == IPPROTO_ICMPV6) {
1871 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE,
1872 &mask->icmp.type,
1873 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
1874 sizeof(key->icmp.type));
1875 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE,
1876 &mask->icmp.code,
1877 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
1878 sizeof(key->icmp.code));
1879 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) ||
1880 key->basic.n_proto == htons(ETH_P_MPLS_MC)) {
1881 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack);
1882 if (ret)
1883 return ret;
1884 } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
1885 key->basic.n_proto == htons(ETH_P_RARP)) {
1886 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
1887 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
1888 sizeof(key->arp.sip));
1889 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
1890 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
1891 sizeof(key->arp.tip));
1892 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
1893 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
1894 sizeof(key->arp.op));
1895 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
1896 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
1897 sizeof(key->arp.sha));
1898 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
1899 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
1900 sizeof(key->arp.tha));
1901 } else if (key->basic.ip_proto == IPPROTO_L2TP) {
1902 fl_set_key_val(tb, &key->l2tpv3.session_id,
1903 TCA_FLOWER_KEY_L2TPV3_SID,
1904 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC,
1905 sizeof(key->l2tpv3.session_id));
1906 } else if (key->basic.n_proto == htons(ETH_P_CFM)) {
1907 ret = fl_set_key_cfm(tb, key, mask, extack);
1908 if (ret)
1909 return ret;
1910 }
1911
1912 if (key->basic.ip_proto == IPPROTO_TCP ||
1913 key->basic.ip_proto == IPPROTO_UDP ||
1914 key->basic.ip_proto == IPPROTO_SCTP) {
1915 ret = fl_set_key_port_range(tb, key, mask, extack);
1916 if (ret)
1917 return ret;
1918 }
1919
1920 if (tb[TCA_FLOWER_KEY_SPI]) {
1921 ret = fl_set_key_spi(tb, key, mask, extack);
1922 if (ret)
1923 return ret;
1924 }
1925
1926 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
1927 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
1928 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1929 mask->enc_control.addr_type = ~0;
1930 fl_set_key_val(tb, &key->enc_ipv4.src,
1931 TCA_FLOWER_KEY_ENC_IPV4_SRC,
1932 &mask->enc_ipv4.src,
1933 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
1934 sizeof(key->enc_ipv4.src));
1935 fl_set_key_val(tb, &key->enc_ipv4.dst,
1936 TCA_FLOWER_KEY_ENC_IPV4_DST,
1937 &mask->enc_ipv4.dst,
1938 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
1939 sizeof(key->enc_ipv4.dst));
1940 }
1941
1942 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] ||
1943 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) {
1944 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1945 mask->enc_control.addr_type = ~0;
1946 fl_set_key_val(tb, &key->enc_ipv6.src,
1947 TCA_FLOWER_KEY_ENC_IPV6_SRC,
1948 &mask->enc_ipv6.src,
1949 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
1950 sizeof(key->enc_ipv6.src));
1951 fl_set_key_val(tb, &key->enc_ipv6.dst,
1952 TCA_FLOWER_KEY_ENC_IPV6_DST,
1953 &mask->enc_ipv6.dst,
1954 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
1955 sizeof(key->enc_ipv6.dst));
1956 }
1957
1958 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID,
1959 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC,
1960 sizeof(key->enc_key_id.keyid));
1961
1962 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
1963 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
1964 sizeof(key->enc_tp.src));
1965
1966 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
1967 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
1968 sizeof(key->enc_tp.dst));
1969
1970 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
1971
1972 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
1973 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
1974 sizeof(key->hash.hash));
1975
1976 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
1977 ret = fl_set_enc_opt(tb, key, mask, extack);
1978 if (ret)
1979 return ret;
1980 }
1981
1982 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack);
1983 if (ret)
1984 return ret;
1985
1986 if (tb[TCA_FLOWER_KEY_FLAGS])
1987 ret = fl_set_key_flags(tb, &key->control.flags,
1988 &mask->control.flags, extack);
1989
1990 return ret;
1991 }
1992
fl_mask_copy(struct fl_flow_mask * dst,struct fl_flow_mask * src)1993 static void fl_mask_copy(struct fl_flow_mask *dst,
1994 struct fl_flow_mask *src)
1995 {
1996 const void *psrc = fl_key_get_start(&src->key, src);
1997 void *pdst = fl_key_get_start(&dst->key, src);
1998
1999 memcpy(pdst, psrc, fl_mask_range(src));
2000 dst->range = src->range;
2001 }
2002
2003 static const struct rhashtable_params fl_ht_params = {
2004 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */
2005 .head_offset = offsetof(struct cls_fl_filter, ht_node),
2006 .automatic_shrinking = true,
2007 };
2008
fl_init_mask_hashtable(struct fl_flow_mask * mask)2009 static int fl_init_mask_hashtable(struct fl_flow_mask *mask)
2010 {
2011 mask->filter_ht_params = fl_ht_params;
2012 mask->filter_ht_params.key_len = fl_mask_range(mask);
2013 mask->filter_ht_params.key_offset += mask->range.start;
2014
2015 return rhashtable_init(&mask->ht, &mask->filter_ht_params);
2016 }
2017
2018 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member)
2019 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member)
2020
2021 #define FL_KEY_IS_MASKED(mask, member) \
2022 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
2023 0, FL_KEY_MEMBER_SIZE(member)) \
2024
2025 #define FL_KEY_SET(keys, cnt, id, member) \
2026 do { \
2027 keys[cnt].key_id = id; \
2028 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \
2029 cnt++; \
2030 } while(0);
2031
2032 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \
2033 do { \
2034 if (FL_KEY_IS_MASKED(mask, member)) \
2035 FL_KEY_SET(keys, cnt, id, member); \
2036 } while(0);
2037
fl_init_dissector(struct flow_dissector * dissector,struct fl_flow_key * mask)2038 static void fl_init_dissector(struct flow_dissector *dissector,
2039 struct fl_flow_key *mask)
2040 {
2041 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX];
2042 size_t cnt = 0;
2043
2044 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2045 FLOW_DISSECTOR_KEY_META, meta);
2046 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control);
2047 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic);
2048 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2049 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth);
2050 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2051 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
2052 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2053 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
2054 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2055 FLOW_DISSECTOR_KEY_PORTS, tp);
2056 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2057 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range);
2058 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2059 FLOW_DISSECTOR_KEY_IP, ip);
2060 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2061 FLOW_DISSECTOR_KEY_TCP, tcp);
2062 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2063 FLOW_DISSECTOR_KEY_ICMP, icmp);
2064 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2065 FLOW_DISSECTOR_KEY_ARP, arp);
2066 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2067 FLOW_DISSECTOR_KEY_MPLS, mpls);
2068 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2069 FLOW_DISSECTOR_KEY_VLAN, vlan);
2070 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2071 FLOW_DISSECTOR_KEY_CVLAN, cvlan);
2072 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2073 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
2074 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2075 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4);
2076 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2077 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6);
2078 if (FL_KEY_IS_MASKED(mask, enc_ipv4) ||
2079 FL_KEY_IS_MASKED(mask, enc_ipv6))
2080 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2081 enc_control);
2082 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2083 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp);
2084 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2085 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip);
2086 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2087 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
2088 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2089 FLOW_DISSECTOR_KEY_CT, ct);
2090 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2091 FLOW_DISSECTOR_KEY_HASH, hash);
2092 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2093 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans);
2094 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2095 FLOW_DISSECTOR_KEY_PPPOE, pppoe);
2096 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2097 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3);
2098 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2099 FLOW_DISSECTOR_KEY_IPSEC, ipsec);
2100 FL_KEY_SET_IF_MASKED(mask, keys, cnt,
2101 FLOW_DISSECTOR_KEY_CFM, cfm);
2102
2103 skb_flow_dissector_init(dissector, keys, cnt);
2104 }
2105
fl_create_new_mask(struct cls_fl_head * head,struct fl_flow_mask * mask)2106 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
2107 struct fl_flow_mask *mask)
2108 {
2109 struct fl_flow_mask *newmask;
2110 int err;
2111
2112 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL);
2113 if (!newmask)
2114 return ERR_PTR(-ENOMEM);
2115
2116 fl_mask_copy(newmask, mask);
2117
2118 if ((newmask->key.tp_range.tp_min.dst &&
2119 newmask->key.tp_range.tp_max.dst) ||
2120 (newmask->key.tp_range.tp_min.src &&
2121 newmask->key.tp_range.tp_max.src))
2122 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
2123
2124 err = fl_init_mask_hashtable(newmask);
2125 if (err)
2126 goto errout_free;
2127
2128 fl_init_dissector(&newmask->dissector, &newmask->key);
2129
2130 INIT_LIST_HEAD_RCU(&newmask->filters);
2131
2132 refcount_set(&newmask->refcnt, 1);
2133 err = rhashtable_replace_fast(&head->ht, &mask->ht_node,
2134 &newmask->ht_node, mask_ht_params);
2135 if (err)
2136 goto errout_destroy;
2137
2138 spin_lock(&head->masks_lock);
2139 list_add_tail_rcu(&newmask->list, &head->masks);
2140 spin_unlock(&head->masks_lock);
2141
2142 return newmask;
2143
2144 errout_destroy:
2145 rhashtable_destroy(&newmask->ht);
2146 errout_free:
2147 kfree(newmask);
2148
2149 return ERR_PTR(err);
2150 }
2151
fl_check_assign_mask(struct cls_fl_head * head,struct cls_fl_filter * fnew,struct cls_fl_filter * fold,struct fl_flow_mask * mask)2152 static int fl_check_assign_mask(struct cls_fl_head *head,
2153 struct cls_fl_filter *fnew,
2154 struct cls_fl_filter *fold,
2155 struct fl_flow_mask *mask)
2156 {
2157 struct fl_flow_mask *newmask;
2158 int ret = 0;
2159
2160 rcu_read_lock();
2161
2162 /* Insert mask as temporary node to prevent concurrent creation of mask
2163 * with same key. Any concurrent lookups with same key will return
2164 * -EAGAIN because mask's refcnt is zero.
2165 */
2166 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht,
2167 &mask->ht_node,
2168 mask_ht_params);
2169 if (!fnew->mask) {
2170 rcu_read_unlock();
2171
2172 if (fold) {
2173 ret = -EINVAL;
2174 goto errout_cleanup;
2175 }
2176
2177 newmask = fl_create_new_mask(head, mask);
2178 if (IS_ERR(newmask)) {
2179 ret = PTR_ERR(newmask);
2180 goto errout_cleanup;
2181 }
2182
2183 fnew->mask = newmask;
2184 return 0;
2185 } else if (IS_ERR(fnew->mask)) {
2186 ret = PTR_ERR(fnew->mask);
2187 } else if (fold && fold->mask != fnew->mask) {
2188 ret = -EINVAL;
2189 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) {
2190 /* Mask was deleted concurrently, try again */
2191 ret = -EAGAIN;
2192 }
2193 rcu_read_unlock();
2194 return ret;
2195
2196 errout_cleanup:
2197 rhashtable_remove_fast(&head->ht, &mask->ht_node,
2198 mask_ht_params);
2199 return ret;
2200 }
2201
fl_needs_tc_skb_ext(const struct fl_flow_key * mask)2202 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask)
2203 {
2204 return mask->meta.l2_miss;
2205 }
2206
fl_ht_insert_unique(struct cls_fl_filter * fnew,struct cls_fl_filter * fold,bool * in_ht)2207 static int fl_ht_insert_unique(struct cls_fl_filter *fnew,
2208 struct cls_fl_filter *fold,
2209 bool *in_ht)
2210 {
2211 struct fl_flow_mask *mask = fnew->mask;
2212 int err;
2213
2214 err = rhashtable_lookup_insert_fast(&mask->ht,
2215 &fnew->ht_node,
2216 mask->filter_ht_params);
2217 if (err) {
2218 *in_ht = false;
2219 /* It is okay if filter with same key exists when
2220 * overwriting.
2221 */
2222 return fold && err == -EEXIST ? 0 : err;
2223 }
2224
2225 *in_ht = true;
2226 return 0;
2227 }
2228
fl_change(struct net * net,struct sk_buff * in_skb,struct tcf_proto * tp,unsigned long base,u32 handle,struct nlattr ** tca,void ** arg,u32 flags,struct netlink_ext_ack * extack)2229 static int fl_change(struct net *net, struct sk_buff *in_skb,
2230 struct tcf_proto *tp, unsigned long base,
2231 u32 handle, struct nlattr **tca,
2232 void **arg, u32 flags,
2233 struct netlink_ext_ack *extack)
2234 {
2235 struct cls_fl_head *head = fl_head_dereference(tp);
2236 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL);
2237 struct cls_fl_filter *fold = *arg;
2238 bool bound_to_filter = false;
2239 struct cls_fl_filter *fnew;
2240 struct fl_flow_mask *mask;
2241 struct nlattr **tb;
2242 bool in_ht;
2243 int err;
2244
2245 if (!tca[TCA_OPTIONS]) {
2246 err = -EINVAL;
2247 goto errout_fold;
2248 }
2249
2250 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL);
2251 if (!mask) {
2252 err = -ENOBUFS;
2253 goto errout_fold;
2254 }
2255
2256 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2257 if (!tb) {
2258 err = -ENOBUFS;
2259 goto errout_mask_alloc;
2260 }
2261
2262 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2263 tca[TCA_OPTIONS], fl_policy, NULL);
2264 if (err < 0)
2265 goto errout_tb;
2266
2267 if (fold && handle && fold->handle != handle) {
2268 err = -EINVAL;
2269 goto errout_tb;
2270 }
2271
2272 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
2273 if (!fnew) {
2274 err = -ENOBUFS;
2275 goto errout_tb;
2276 }
2277 INIT_LIST_HEAD(&fnew->hw_list);
2278 refcount_set(&fnew->refcnt, 1);
2279
2280 if (tb[TCA_FLOWER_FLAGS]) {
2281 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
2282
2283 if (!tc_flags_valid(fnew->flags)) {
2284 kfree(fnew);
2285 err = -EINVAL;
2286 goto errout_tb;
2287 }
2288 }
2289
2290 if (!fold) {
2291 spin_lock(&tp->lock);
2292 if (!handle) {
2293 handle = 1;
2294 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2295 INT_MAX, GFP_ATOMIC);
2296 } else {
2297 err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
2298 handle, GFP_ATOMIC);
2299
2300 /* Filter with specified handle was concurrently
2301 * inserted after initial check in cls_api. This is not
2302 * necessarily an error if NLM_F_EXCL is not set in
2303 * message flags. Returning EAGAIN will cause cls_api to
2304 * try to update concurrently inserted rule.
2305 */
2306 if (err == -ENOSPC)
2307 err = -EAGAIN;
2308 }
2309 spin_unlock(&tp->lock);
2310
2311 if (err) {
2312 kfree(fnew);
2313 goto errout_tb;
2314 }
2315 }
2316 fnew->handle = handle;
2317
2318 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle,
2319 !tc_skip_hw(fnew->flags));
2320 if (err < 0)
2321 goto errout_idr;
2322
2323 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
2324 &fnew->exts, flags, fnew->flags,
2325 extack);
2326 if (err < 0)
2327 goto errout_idr;
2328
2329 if (tb[TCA_FLOWER_CLASSID]) {
2330 fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]);
2331 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2332 rtnl_lock();
2333 tcf_bind_filter(tp, &fnew->res, base);
2334 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2335 rtnl_unlock();
2336 bound_to_filter = true;
2337 }
2338
2339 err = fl_set_key(net, tb, &fnew->key, &mask->key, extack);
2340 if (err)
2341 goto unbind_filter;
2342
2343 fl_mask_update_range(mask);
2344 fl_set_masked_key(&fnew->mkey, &fnew->key, mask);
2345
2346 if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) {
2347 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template");
2348 err = -EINVAL;
2349 goto unbind_filter;
2350 }
2351
2352 /* Enable tc skb extension if filter matches on data extracted from
2353 * this extension.
2354 */
2355 if (fl_needs_tc_skb_ext(&mask->key)) {
2356 fnew->needs_tc_skb_ext = 1;
2357 tc_skb_ext_tc_enable();
2358 }
2359
2360 err = fl_check_assign_mask(head, fnew, fold, mask);
2361 if (err)
2362 goto unbind_filter;
2363
2364 err = fl_ht_insert_unique(fnew, fold, &in_ht);
2365 if (err)
2366 goto errout_mask;
2367
2368 if (!tc_skip_hw(fnew->flags)) {
2369 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
2370 if (err)
2371 goto errout_ht;
2372 }
2373
2374 if (!tc_in_hw(fnew->flags))
2375 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
2376
2377 spin_lock(&tp->lock);
2378
2379 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup
2380 * proto again or create new one, if necessary.
2381 */
2382 if (tp->deleting) {
2383 err = -EAGAIN;
2384 goto errout_hw;
2385 }
2386
2387 if (fold) {
2388 /* Fold filter was deleted concurrently. Retry lookup. */
2389 if (fold->deleted) {
2390 err = -EAGAIN;
2391 goto errout_hw;
2392 }
2393
2394 fnew->handle = handle;
2395
2396 if (!in_ht) {
2397 struct rhashtable_params params =
2398 fnew->mask->filter_ht_params;
2399
2400 err = rhashtable_insert_fast(&fnew->mask->ht,
2401 &fnew->ht_node,
2402 params);
2403 if (err)
2404 goto errout_hw;
2405 in_ht = true;
2406 }
2407
2408 refcount_inc(&fnew->refcnt);
2409 rhashtable_remove_fast(&fold->mask->ht,
2410 &fold->ht_node,
2411 fold->mask->filter_ht_params);
2412 idr_replace(&head->handle_idr, fnew, fnew->handle);
2413 list_replace_rcu(&fold->list, &fnew->list);
2414 fold->deleted = true;
2415
2416 spin_unlock(&tp->lock);
2417
2418 fl_mask_put(head, fold->mask);
2419 if (!tc_skip_hw(fold->flags))
2420 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL);
2421 tcf_unbind_filter(tp, &fold->res);
2422 /* Caller holds reference to fold, so refcnt is always > 0
2423 * after this.
2424 */
2425 refcount_dec(&fold->refcnt);
2426 __fl_put(fold);
2427 } else {
2428 idr_replace(&head->handle_idr, fnew, fnew->handle);
2429
2430 refcount_inc(&fnew->refcnt);
2431 list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
2432 spin_unlock(&tp->lock);
2433 }
2434
2435 *arg = fnew;
2436
2437 kfree(tb);
2438 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2439 return 0;
2440
2441 errout_ht:
2442 spin_lock(&tp->lock);
2443 errout_hw:
2444 fnew->deleted = true;
2445 spin_unlock(&tp->lock);
2446 if (!tc_skip_hw(fnew->flags))
2447 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
2448 if (in_ht)
2449 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
2450 fnew->mask->filter_ht_params);
2451 errout_mask:
2452 fl_mask_put(head, fnew->mask);
2453
2454 unbind_filter:
2455 if (bound_to_filter) {
2456 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2457 rtnl_lock();
2458 tcf_unbind_filter(tp, &fnew->res);
2459 if (flags & TCA_ACT_FLAGS_NO_RTNL)
2460 rtnl_unlock();
2461 }
2462
2463 errout_idr:
2464 if (!fold) {
2465 spin_lock(&tp->lock);
2466 idr_remove(&head->handle_idr, fnew->handle);
2467 spin_unlock(&tp->lock);
2468 }
2469 __fl_put(fnew);
2470 errout_tb:
2471 kfree(tb);
2472 errout_mask_alloc:
2473 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work);
2474 errout_fold:
2475 if (fold)
2476 __fl_put(fold);
2477 return err;
2478 }
2479
fl_delete(struct tcf_proto * tp,void * arg,bool * last,bool rtnl_held,struct netlink_ext_ack * extack)2480 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
2481 bool rtnl_held, struct netlink_ext_ack *extack)
2482 {
2483 struct cls_fl_head *head = fl_head_dereference(tp);
2484 struct cls_fl_filter *f = arg;
2485 bool last_on_mask;
2486 int err = 0;
2487
2488 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
2489 *last = list_empty(&head->masks);
2490 __fl_put(f);
2491
2492 return err;
2493 }
2494
fl_walk(struct tcf_proto * tp,struct tcf_walker * arg,bool rtnl_held)2495 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
2496 bool rtnl_held)
2497 {
2498 struct cls_fl_head *head = fl_head_dereference(tp);
2499 unsigned long id = arg->cookie, tmp;
2500 struct cls_fl_filter *f;
2501
2502 arg->count = arg->skip;
2503
2504 rcu_read_lock();
2505 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
2506 /* don't return filters that are being deleted */
2507 if (!f || !refcount_inc_not_zero(&f->refcnt))
2508 continue;
2509 rcu_read_unlock();
2510
2511 if (arg->fn(tp, f, arg) < 0) {
2512 __fl_put(f);
2513 arg->stop = 1;
2514 rcu_read_lock();
2515 break;
2516 }
2517 __fl_put(f);
2518 arg->count++;
2519 rcu_read_lock();
2520 }
2521 rcu_read_unlock();
2522 arg->cookie = id;
2523 }
2524
2525 static struct cls_fl_filter *
fl_get_next_hw_filter(struct tcf_proto * tp,struct cls_fl_filter * f,bool add)2526 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add)
2527 {
2528 struct cls_fl_head *head = fl_head_dereference(tp);
2529
2530 spin_lock(&tp->lock);
2531 if (list_empty(&head->hw_filters)) {
2532 spin_unlock(&tp->lock);
2533 return NULL;
2534 }
2535
2536 if (!f)
2537 f = list_entry(&head->hw_filters, struct cls_fl_filter,
2538 hw_list);
2539 list_for_each_entry_continue(f, &head->hw_filters, hw_list) {
2540 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) {
2541 spin_unlock(&tp->lock);
2542 return f;
2543 }
2544 }
2545
2546 spin_unlock(&tp->lock);
2547 return NULL;
2548 }
2549
fl_reoffload(struct tcf_proto * tp,bool add,flow_setup_cb_t * cb,void * cb_priv,struct netlink_ext_ack * extack)2550 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
2551 void *cb_priv, struct netlink_ext_ack *extack)
2552 {
2553 struct tcf_block *block = tp->chain->block;
2554 struct flow_cls_offload cls_flower = {};
2555 struct cls_fl_filter *f = NULL;
2556 int err;
2557
2558 /* hw_filters list can only be changed by hw offload functions after
2559 * obtaining rtnl lock. Make sure it is not changed while reoffload is
2560 * iterating it.
2561 */
2562 ASSERT_RTNL();
2563
2564 while ((f = fl_get_next_hw_filter(tp, f, add))) {
2565 cls_flower.rule =
2566 flow_rule_alloc(tcf_exts_num_actions(&f->exts));
2567 if (!cls_flower.rule) {
2568 __fl_put(f);
2569 return -ENOMEM;
2570 }
2571
2572 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
2573 extack);
2574 cls_flower.command = add ?
2575 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY;
2576 cls_flower.cookie = (unsigned long)f;
2577 cls_flower.rule->match.dissector = &f->mask->dissector;
2578 cls_flower.rule->match.mask = &f->mask->key;
2579 cls_flower.rule->match.key = &f->mkey;
2580
2581 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts,
2582 cls_flower.common.extack);
2583 if (err) {
2584 kfree(cls_flower.rule);
2585 if (tc_skip_sw(f->flags)) {
2586 __fl_put(f);
2587 return err;
2588 }
2589 goto next_flow;
2590 }
2591
2592 cls_flower.classid = f->res.classid;
2593
2594 err = tc_setup_cb_reoffload(block, tp, add, cb,
2595 TC_SETUP_CLSFLOWER, &cls_flower,
2596 cb_priv, &f->flags,
2597 &f->in_hw_count);
2598 tc_cleanup_offload_action(&cls_flower.rule->action);
2599 kfree(cls_flower.rule);
2600
2601 if (err) {
2602 __fl_put(f);
2603 return err;
2604 }
2605 next_flow:
2606 __fl_put(f);
2607 }
2608
2609 return 0;
2610 }
2611
fl_hw_add(struct tcf_proto * tp,void * type_data)2612 static void fl_hw_add(struct tcf_proto *tp, void *type_data)
2613 {
2614 struct flow_cls_offload *cls_flower = type_data;
2615 struct cls_fl_filter *f =
2616 (struct cls_fl_filter *) cls_flower->cookie;
2617 struct cls_fl_head *head = fl_head_dereference(tp);
2618
2619 spin_lock(&tp->lock);
2620 list_add(&f->hw_list, &head->hw_filters);
2621 spin_unlock(&tp->lock);
2622 }
2623
fl_hw_del(struct tcf_proto * tp,void * type_data)2624 static void fl_hw_del(struct tcf_proto *tp, void *type_data)
2625 {
2626 struct flow_cls_offload *cls_flower = type_data;
2627 struct cls_fl_filter *f =
2628 (struct cls_fl_filter *) cls_flower->cookie;
2629
2630 spin_lock(&tp->lock);
2631 if (!list_empty(&f->hw_list))
2632 list_del_init(&f->hw_list);
2633 spin_unlock(&tp->lock);
2634 }
2635
fl_hw_create_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2636 static int fl_hw_create_tmplt(struct tcf_chain *chain,
2637 struct fl_flow_tmplt *tmplt)
2638 {
2639 struct flow_cls_offload cls_flower = {};
2640 struct tcf_block *block = chain->block;
2641
2642 cls_flower.rule = flow_rule_alloc(0);
2643 if (!cls_flower.rule)
2644 return -ENOMEM;
2645
2646 cls_flower.common.chain_index = chain->index;
2647 cls_flower.command = FLOW_CLS_TMPLT_CREATE;
2648 cls_flower.cookie = (unsigned long) tmplt;
2649 cls_flower.rule->match.dissector = &tmplt->dissector;
2650 cls_flower.rule->match.mask = &tmplt->mask;
2651 cls_flower.rule->match.key = &tmplt->dummy_key;
2652
2653 /* We don't care if driver (any of them) fails to handle this
2654 * call. It serves just as a hint for it.
2655 */
2656 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2657 kfree(cls_flower.rule);
2658
2659 return 0;
2660 }
2661
fl_hw_destroy_tmplt(struct tcf_chain * chain,struct fl_flow_tmplt * tmplt)2662 static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
2663 struct fl_flow_tmplt *tmplt)
2664 {
2665 struct flow_cls_offload cls_flower = {};
2666 struct tcf_block *block = chain->block;
2667
2668 cls_flower.common.chain_index = chain->index;
2669 cls_flower.command = FLOW_CLS_TMPLT_DESTROY;
2670 cls_flower.cookie = (unsigned long) tmplt;
2671
2672 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true);
2673 }
2674
fl_tmplt_create(struct net * net,struct tcf_chain * chain,struct nlattr ** tca,struct netlink_ext_ack * extack)2675 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
2676 struct nlattr **tca,
2677 struct netlink_ext_ack *extack)
2678 {
2679 struct fl_flow_tmplt *tmplt;
2680 struct nlattr **tb;
2681 int err;
2682
2683 if (!tca[TCA_OPTIONS])
2684 return ERR_PTR(-EINVAL);
2685
2686 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
2687 if (!tb)
2688 return ERR_PTR(-ENOBUFS);
2689 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX,
2690 tca[TCA_OPTIONS], fl_policy, NULL);
2691 if (err)
2692 goto errout_tb;
2693
2694 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL);
2695 if (!tmplt) {
2696 err = -ENOMEM;
2697 goto errout_tb;
2698 }
2699 tmplt->chain = chain;
2700 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack);
2701 if (err)
2702 goto errout_tmplt;
2703
2704 fl_init_dissector(&tmplt->dissector, &tmplt->mask);
2705
2706 err = fl_hw_create_tmplt(chain, tmplt);
2707 if (err)
2708 goto errout_tmplt;
2709
2710 kfree(tb);
2711 return tmplt;
2712
2713 errout_tmplt:
2714 kfree(tmplt);
2715 errout_tb:
2716 kfree(tb);
2717 return ERR_PTR(err);
2718 }
2719
fl_tmplt_destroy(void * tmplt_priv)2720 static void fl_tmplt_destroy(void *tmplt_priv)
2721 {
2722 struct fl_flow_tmplt *tmplt = tmplt_priv;
2723
2724 fl_hw_destroy_tmplt(tmplt->chain, tmplt);
2725 kfree(tmplt);
2726 }
2727
fl_tmplt_reoffload(struct tcf_chain * chain,bool add,flow_setup_cb_t * cb,void * cb_priv)2728 static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
2729 flow_setup_cb_t *cb, void *cb_priv)
2730 {
2731 struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
2732 struct flow_cls_offload cls_flower = {};
2733
2734 cls_flower.rule = flow_rule_alloc(0);
2735 if (!cls_flower.rule)
2736 return;
2737
2738 cls_flower.common.chain_index = chain->index;
2739 cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
2740 FLOW_CLS_TMPLT_DESTROY;
2741 cls_flower.cookie = (unsigned long) tmplt;
2742 cls_flower.rule->match.dissector = &tmplt->dissector;
2743 cls_flower.rule->match.mask = &tmplt->mask;
2744 cls_flower.rule->match.key = &tmplt->dummy_key;
2745
2746 cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
2747 kfree(cls_flower.rule);
2748 }
2749
fl_dump_key_val(struct sk_buff * skb,void * val,int val_type,void * mask,int mask_type,int len)2750 static int fl_dump_key_val(struct sk_buff *skb,
2751 void *val, int val_type,
2752 void *mask, int mask_type, int len)
2753 {
2754 int err;
2755
2756 if (!memchr_inv(mask, 0, len))
2757 return 0;
2758 err = nla_put(skb, val_type, len, val);
2759 if (err)
2760 return err;
2761 if (mask_type != TCA_FLOWER_UNSPEC) {
2762 err = nla_put(skb, mask_type, len, mask);
2763 if (err)
2764 return err;
2765 }
2766 return 0;
2767 }
2768
fl_dump_key_port_range(struct sk_buff * skb,struct fl_flow_key * key,struct fl_flow_key * mask)2769 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
2770 struct fl_flow_key *mask)
2771 {
2772 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst,
2773 TCA_FLOWER_KEY_PORT_DST_MIN,
2774 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC,
2775 sizeof(key->tp_range.tp_min.dst)) ||
2776 fl_dump_key_val(skb, &key->tp_range.tp_max.dst,
2777 TCA_FLOWER_KEY_PORT_DST_MAX,
2778 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC,
2779 sizeof(key->tp_range.tp_max.dst)) ||
2780 fl_dump_key_val(skb, &key->tp_range.tp_min.src,
2781 TCA_FLOWER_KEY_PORT_SRC_MIN,
2782 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC,
2783 sizeof(key->tp_range.tp_min.src)) ||
2784 fl_dump_key_val(skb, &key->tp_range.tp_max.src,
2785 TCA_FLOWER_KEY_PORT_SRC_MAX,
2786 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC,
2787 sizeof(key->tp_range.tp_max.src)))
2788 return -1;
2789
2790 return 0;
2791 }
2792
fl_dump_key_mpls_opt_lse(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask,u8 lse_index)2793 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb,
2794 struct flow_dissector_key_mpls *mpls_key,
2795 struct flow_dissector_key_mpls *mpls_mask,
2796 u8 lse_index)
2797 {
2798 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index];
2799 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index];
2800 int err;
2801
2802 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH,
2803 lse_index + 1);
2804 if (err)
2805 return err;
2806
2807 if (lse_mask->mpls_ttl) {
2808 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL,
2809 lse_key->mpls_ttl);
2810 if (err)
2811 return err;
2812 }
2813 if (lse_mask->mpls_bos) {
2814 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS,
2815 lse_key->mpls_bos);
2816 if (err)
2817 return err;
2818 }
2819 if (lse_mask->mpls_tc) {
2820 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC,
2821 lse_key->mpls_tc);
2822 if (err)
2823 return err;
2824 }
2825 if (lse_mask->mpls_label) {
2826 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL,
2827 lse_key->mpls_label);
2828 if (err)
2829 return err;
2830 }
2831
2832 return 0;
2833 }
2834
fl_dump_key_mpls_opts(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2835 static int fl_dump_key_mpls_opts(struct sk_buff *skb,
2836 struct flow_dissector_key_mpls *mpls_key,
2837 struct flow_dissector_key_mpls *mpls_mask)
2838 {
2839 struct nlattr *opts;
2840 struct nlattr *lse;
2841 u8 lse_index;
2842 int err;
2843
2844 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS);
2845 if (!opts)
2846 return -EMSGSIZE;
2847
2848 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) {
2849 if (!(mpls_mask->used_lses & 1 << lse_index))
2850 continue;
2851
2852 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE);
2853 if (!lse) {
2854 err = -EMSGSIZE;
2855 goto err_opts;
2856 }
2857
2858 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask,
2859 lse_index);
2860 if (err)
2861 goto err_opts_lse;
2862 nla_nest_end(skb, lse);
2863 }
2864 nla_nest_end(skb, opts);
2865
2866 return 0;
2867
2868 err_opts_lse:
2869 nla_nest_cancel(skb, lse);
2870 err_opts:
2871 nla_nest_cancel(skb, opts);
2872
2873 return err;
2874 }
2875
fl_dump_key_mpls(struct sk_buff * skb,struct flow_dissector_key_mpls * mpls_key,struct flow_dissector_key_mpls * mpls_mask)2876 static int fl_dump_key_mpls(struct sk_buff *skb,
2877 struct flow_dissector_key_mpls *mpls_key,
2878 struct flow_dissector_key_mpls *mpls_mask)
2879 {
2880 struct flow_dissector_mpls_lse *lse_mask;
2881 struct flow_dissector_mpls_lse *lse_key;
2882 int err;
2883
2884 if (!mpls_mask->used_lses)
2885 return 0;
2886
2887 lse_mask = &mpls_mask->ls[0];
2888 lse_key = &mpls_key->ls[0];
2889
2890 /* For backward compatibility, don't use the MPLS nested attributes if
2891 * the rule can be expressed using the old attributes.
2892 */
2893 if (mpls_mask->used_lses & ~1 ||
2894 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos &&
2895 !lse_mask->mpls_tc && !lse_mask->mpls_label))
2896 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask);
2897
2898 if (lse_mask->mpls_ttl) {
2899 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL,
2900 lse_key->mpls_ttl);
2901 if (err)
2902 return err;
2903 }
2904 if (lse_mask->mpls_tc) {
2905 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC,
2906 lse_key->mpls_tc);
2907 if (err)
2908 return err;
2909 }
2910 if (lse_mask->mpls_label) {
2911 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL,
2912 lse_key->mpls_label);
2913 if (err)
2914 return err;
2915 }
2916 if (lse_mask->mpls_bos) {
2917 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS,
2918 lse_key->mpls_bos);
2919 if (err)
2920 return err;
2921 }
2922 return 0;
2923 }
2924
fl_dump_key_ip(struct sk_buff * skb,bool encap,struct flow_dissector_key_ip * key,struct flow_dissector_key_ip * mask)2925 static int fl_dump_key_ip(struct sk_buff *skb, bool encap,
2926 struct flow_dissector_key_ip *key,
2927 struct flow_dissector_key_ip *mask)
2928 {
2929 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS;
2930 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL;
2931 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK;
2932 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK;
2933
2934 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) ||
2935 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)))
2936 return -1;
2937
2938 return 0;
2939 }
2940
fl_dump_key_vlan(struct sk_buff * skb,int vlan_id_key,int vlan_prio_key,struct flow_dissector_key_vlan * vlan_key,struct flow_dissector_key_vlan * vlan_mask)2941 static int fl_dump_key_vlan(struct sk_buff *skb,
2942 int vlan_id_key, int vlan_prio_key,
2943 struct flow_dissector_key_vlan *vlan_key,
2944 struct flow_dissector_key_vlan *vlan_mask)
2945 {
2946 int err;
2947
2948 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask)))
2949 return 0;
2950 if (vlan_mask->vlan_id) {
2951 err = nla_put_u16(skb, vlan_id_key,
2952 vlan_key->vlan_id);
2953 if (err)
2954 return err;
2955 }
2956 if (vlan_mask->vlan_priority) {
2957 err = nla_put_u8(skb, vlan_prio_key,
2958 vlan_key->vlan_priority);
2959 if (err)
2960 return err;
2961 }
2962 return 0;
2963 }
2964
fl_get_key_flag(u32 dissector_key,u32 dissector_mask,u32 * flower_key,u32 * flower_mask,u32 flower_flag_bit,u32 dissector_flag_bit)2965 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask,
2966 u32 *flower_key, u32 *flower_mask,
2967 u32 flower_flag_bit, u32 dissector_flag_bit)
2968 {
2969 if (dissector_mask & dissector_flag_bit) {
2970 *flower_mask |= flower_flag_bit;
2971 if (dissector_key & dissector_flag_bit)
2972 *flower_key |= flower_flag_bit;
2973 }
2974 }
2975
fl_dump_key_flags(struct sk_buff * skb,u32 flags_key,u32 flags_mask)2976 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask)
2977 {
2978 u32 key, mask;
2979 __be32 _key, _mask;
2980 int err;
2981
2982 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask)))
2983 return 0;
2984
2985 key = 0;
2986 mask = 0;
2987
2988 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2989 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT);
2990 fl_get_key_flag(flags_key, flags_mask, &key, &mask,
2991 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST,
2992 FLOW_DIS_FIRST_FRAG);
2993
2994 _key = cpu_to_be32(key);
2995 _mask = cpu_to_be32(mask);
2996
2997 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key);
2998 if (err)
2999 return err;
3000
3001 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask);
3002 }
3003
fl_dump_key_geneve_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3004 static int fl_dump_key_geneve_opt(struct sk_buff *skb,
3005 struct flow_dissector_key_enc_opts *enc_opts)
3006 {
3007 struct geneve_opt *opt;
3008 struct nlattr *nest;
3009 int opt_off = 0;
3010
3011 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE);
3012 if (!nest)
3013 goto nla_put_failure;
3014
3015 while (enc_opts->len > opt_off) {
3016 opt = (struct geneve_opt *)&enc_opts->data[opt_off];
3017
3018 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,
3019 opt->opt_class))
3020 goto nla_put_failure;
3021 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,
3022 opt->type))
3023 goto nla_put_failure;
3024 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,
3025 opt->length * 4, opt->opt_data))
3026 goto nla_put_failure;
3027
3028 opt_off += sizeof(struct geneve_opt) + opt->length * 4;
3029 }
3030 nla_nest_end(skb, nest);
3031 return 0;
3032
3033 nla_put_failure:
3034 nla_nest_cancel(skb, nest);
3035 return -EMSGSIZE;
3036 }
3037
fl_dump_key_vxlan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3038 static int fl_dump_key_vxlan_opt(struct sk_buff *skb,
3039 struct flow_dissector_key_enc_opts *enc_opts)
3040 {
3041 struct vxlan_metadata *md;
3042 struct nlattr *nest;
3043
3044 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN);
3045 if (!nest)
3046 goto nla_put_failure;
3047
3048 md = (struct vxlan_metadata *)&enc_opts->data[0];
3049 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp))
3050 goto nla_put_failure;
3051
3052 nla_nest_end(skb, nest);
3053 return 0;
3054
3055 nla_put_failure:
3056 nla_nest_cancel(skb, nest);
3057 return -EMSGSIZE;
3058 }
3059
fl_dump_key_erspan_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3060 static int fl_dump_key_erspan_opt(struct sk_buff *skb,
3061 struct flow_dissector_key_enc_opts *enc_opts)
3062 {
3063 struct erspan_metadata *md;
3064 struct nlattr *nest;
3065
3066 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN);
3067 if (!nest)
3068 goto nla_put_failure;
3069
3070 md = (struct erspan_metadata *)&enc_opts->data[0];
3071 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version))
3072 goto nla_put_failure;
3073
3074 if (md->version == 1 &&
3075 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index))
3076 goto nla_put_failure;
3077
3078 if (md->version == 2 &&
3079 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR,
3080 md->u.md2.dir) ||
3081 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID,
3082 get_hwid(&md->u.md2))))
3083 goto nla_put_failure;
3084
3085 nla_nest_end(skb, nest);
3086 return 0;
3087
3088 nla_put_failure:
3089 nla_nest_cancel(skb, nest);
3090 return -EMSGSIZE;
3091 }
3092
fl_dump_key_gtp_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * enc_opts)3093 static int fl_dump_key_gtp_opt(struct sk_buff *skb,
3094 struct flow_dissector_key_enc_opts *enc_opts)
3095
3096 {
3097 struct gtp_pdu_session_info *session_info;
3098 struct nlattr *nest;
3099
3100 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP);
3101 if (!nest)
3102 goto nla_put_failure;
3103
3104 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0];
3105
3106 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE,
3107 session_info->pdu_type))
3108 goto nla_put_failure;
3109
3110 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi))
3111 goto nla_put_failure;
3112
3113 nla_nest_end(skb, nest);
3114 return 0;
3115
3116 nla_put_failure:
3117 nla_nest_cancel(skb, nest);
3118 return -EMSGSIZE;
3119 }
3120
fl_dump_key_ct(struct sk_buff * skb,struct flow_dissector_key_ct * key,struct flow_dissector_key_ct * mask)3121 static int fl_dump_key_ct(struct sk_buff *skb,
3122 struct flow_dissector_key_ct *key,
3123 struct flow_dissector_key_ct *mask)
3124 {
3125 if (IS_ENABLED(CONFIG_NF_CONNTRACK) &&
3126 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE,
3127 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
3128 sizeof(key->ct_state)))
3129 goto nla_put_failure;
3130
3131 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
3132 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE,
3133 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK,
3134 sizeof(key->ct_zone)))
3135 goto nla_put_failure;
3136
3137 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
3138 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK,
3139 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK,
3140 sizeof(key->ct_mark)))
3141 goto nla_put_failure;
3142
3143 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
3144 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS,
3145 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK,
3146 sizeof(key->ct_labels)))
3147 goto nla_put_failure;
3148
3149 return 0;
3150
3151 nla_put_failure:
3152 return -EMSGSIZE;
3153 }
3154
fl_dump_key_cfm(struct sk_buff * skb,struct flow_dissector_key_cfm * key,struct flow_dissector_key_cfm * mask)3155 static int fl_dump_key_cfm(struct sk_buff *skb,
3156 struct flow_dissector_key_cfm *key,
3157 struct flow_dissector_key_cfm *mask)
3158 {
3159 struct nlattr *opts;
3160 int err;
3161 u8 mdl;
3162
3163 if (!memchr_inv(mask, 0, sizeof(*mask)))
3164 return 0;
3165
3166 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM);
3167 if (!opts)
3168 return -EMSGSIZE;
3169
3170 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) {
3171 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver);
3172 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl);
3173 if (err)
3174 goto err_cfm_opts;
3175 }
3176
3177 if (mask->opcode) {
3178 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode);
3179 if (err)
3180 goto err_cfm_opts;
3181 }
3182
3183 nla_nest_end(skb, opts);
3184
3185 return 0;
3186
3187 err_cfm_opts:
3188 nla_nest_cancel(skb, opts);
3189 return err;
3190 }
3191
fl_dump_key_options(struct sk_buff * skb,int enc_opt_type,struct flow_dissector_key_enc_opts * enc_opts)3192 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type,
3193 struct flow_dissector_key_enc_opts *enc_opts)
3194 {
3195 struct nlattr *nest;
3196 int err;
3197
3198 if (!enc_opts->len)
3199 return 0;
3200
3201 nest = nla_nest_start_noflag(skb, enc_opt_type);
3202 if (!nest)
3203 goto nla_put_failure;
3204
3205 switch (enc_opts->dst_opt_type) {
3206 case TUNNEL_GENEVE_OPT:
3207 err = fl_dump_key_geneve_opt(skb, enc_opts);
3208 if (err)
3209 goto nla_put_failure;
3210 break;
3211 case TUNNEL_VXLAN_OPT:
3212 err = fl_dump_key_vxlan_opt(skb, enc_opts);
3213 if (err)
3214 goto nla_put_failure;
3215 break;
3216 case TUNNEL_ERSPAN_OPT:
3217 err = fl_dump_key_erspan_opt(skb, enc_opts);
3218 if (err)
3219 goto nla_put_failure;
3220 break;
3221 case TUNNEL_GTP_OPT:
3222 err = fl_dump_key_gtp_opt(skb, enc_opts);
3223 if (err)
3224 goto nla_put_failure;
3225 break;
3226 default:
3227 goto nla_put_failure;
3228 }
3229 nla_nest_end(skb, nest);
3230 return 0;
3231
3232 nla_put_failure:
3233 nla_nest_cancel(skb, nest);
3234 return -EMSGSIZE;
3235 }
3236
fl_dump_key_enc_opt(struct sk_buff * skb,struct flow_dissector_key_enc_opts * key_opts,struct flow_dissector_key_enc_opts * msk_opts)3237 static int fl_dump_key_enc_opt(struct sk_buff *skb,
3238 struct flow_dissector_key_enc_opts *key_opts,
3239 struct flow_dissector_key_enc_opts *msk_opts)
3240 {
3241 int err;
3242
3243 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts);
3244 if (err)
3245 return err;
3246
3247 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts);
3248 }
3249
fl_dump_key(struct sk_buff * skb,struct net * net,struct fl_flow_key * key,struct fl_flow_key * mask)3250 static int fl_dump_key(struct sk_buff *skb, struct net *net,
3251 struct fl_flow_key *key, struct fl_flow_key *mask)
3252 {
3253 if (mask->meta.ingress_ifindex) {
3254 struct net_device *dev;
3255
3256 dev = __dev_get_by_index(net, key->meta.ingress_ifindex);
3257 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name))
3258 goto nla_put_failure;
3259 }
3260
3261 if (fl_dump_key_val(skb, &key->meta.l2_miss,
3262 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss,
3263 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss)))
3264 goto nla_put_failure;
3265
3266 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST,
3267 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK,
3268 sizeof(key->eth.dst)) ||
3269 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
3270 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
3271 sizeof(key->eth.src)) ||
3272 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
3273 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
3274 sizeof(key->basic.n_proto)))
3275 goto nla_put_failure;
3276
3277 if (mask->num_of_vlans.num_of_vlans) {
3278 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans))
3279 goto nla_put_failure;
3280 }
3281
3282 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls))
3283 goto nla_put_failure;
3284
3285 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID,
3286 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan))
3287 goto nla_put_failure;
3288
3289 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID,
3290 TCA_FLOWER_KEY_CVLAN_PRIO,
3291 &key->cvlan, &mask->cvlan) ||
3292 (mask->cvlan.vlan_tpid &&
3293 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3294 key->cvlan.vlan_tpid)))
3295 goto nla_put_failure;
3296
3297 if (mask->basic.n_proto) {
3298 if (mask->cvlan.vlan_eth_type) {
3299 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
3300 key->basic.n_proto))
3301 goto nla_put_failure;
3302 } else if (mask->vlan.vlan_eth_type) {
3303 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
3304 key->vlan.vlan_eth_type))
3305 goto nla_put_failure;
3306 }
3307 }
3308
3309 if ((key->basic.n_proto == htons(ETH_P_IP) ||
3310 key->basic.n_proto == htons(ETH_P_IPV6)) &&
3311 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
3312 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
3313 sizeof(key->basic.ip_proto)) ||
3314 fl_dump_key_ip(skb, false, &key->ip, &mask->ip)))
3315 goto nla_put_failure;
3316
3317 if (mask->pppoe.session_id) {
3318 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID,
3319 key->pppoe.session_id))
3320 goto nla_put_failure;
3321 }
3322 if (mask->basic.n_proto && mask->pppoe.ppp_proto) {
3323 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO,
3324 key->pppoe.ppp_proto))
3325 goto nla_put_failure;
3326 }
3327
3328 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3329 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
3330 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
3331 sizeof(key->ipv4.src)) ||
3332 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
3333 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
3334 sizeof(key->ipv4.dst))))
3335 goto nla_put_failure;
3336 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3337 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
3338 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
3339 sizeof(key->ipv6.src)) ||
3340 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST,
3341 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
3342 sizeof(key->ipv6.dst))))
3343 goto nla_put_failure;
3344
3345 if (key->basic.ip_proto == IPPROTO_TCP &&
3346 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
3347 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK,
3348 sizeof(key->tp.src)) ||
3349 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST,
3350 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK,
3351 sizeof(key->tp.dst)) ||
3352 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS,
3353 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK,
3354 sizeof(key->tcp.flags))))
3355 goto nla_put_failure;
3356 else if (key->basic.ip_proto == IPPROTO_UDP &&
3357 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC,
3358 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK,
3359 sizeof(key->tp.src)) ||
3360 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST,
3361 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK,
3362 sizeof(key->tp.dst))))
3363 goto nla_put_failure;
3364 else if (key->basic.ip_proto == IPPROTO_SCTP &&
3365 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC,
3366 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK,
3367 sizeof(key->tp.src)) ||
3368 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST,
3369 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK,
3370 sizeof(key->tp.dst))))
3371 goto nla_put_failure;
3372 else if (key->basic.n_proto == htons(ETH_P_IP) &&
3373 key->basic.ip_proto == IPPROTO_ICMP &&
3374 (fl_dump_key_val(skb, &key->icmp.type,
3375 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type,
3376 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,
3377 sizeof(key->icmp.type)) ||
3378 fl_dump_key_val(skb, &key->icmp.code,
3379 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code,
3380 TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
3381 sizeof(key->icmp.code))))
3382 goto nla_put_failure;
3383 else if (key->basic.n_proto == htons(ETH_P_IPV6) &&
3384 key->basic.ip_proto == IPPROTO_ICMPV6 &&
3385 (fl_dump_key_val(skb, &key->icmp.type,
3386 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type,
3387 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,
3388 sizeof(key->icmp.type)) ||
3389 fl_dump_key_val(skb, &key->icmp.code,
3390 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code,
3391 TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
3392 sizeof(key->icmp.code))))
3393 goto nla_put_failure;
3394 else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
3395 key->basic.n_proto == htons(ETH_P_RARP)) &&
3396 (fl_dump_key_val(skb, &key->arp.sip,
3397 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
3398 TCA_FLOWER_KEY_ARP_SIP_MASK,
3399 sizeof(key->arp.sip)) ||
3400 fl_dump_key_val(skb, &key->arp.tip,
3401 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
3402 TCA_FLOWER_KEY_ARP_TIP_MASK,
3403 sizeof(key->arp.tip)) ||
3404 fl_dump_key_val(skb, &key->arp.op,
3405 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
3406 TCA_FLOWER_KEY_ARP_OP_MASK,
3407 sizeof(key->arp.op)) ||
3408 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
3409 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
3410 sizeof(key->arp.sha)) ||
3411 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
3412 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
3413 sizeof(key->arp.tha))))
3414 goto nla_put_failure;
3415 else if (key->basic.ip_proto == IPPROTO_L2TP &&
3416 fl_dump_key_val(skb, &key->l2tpv3.session_id,
3417 TCA_FLOWER_KEY_L2TPV3_SID,
3418 &mask->l2tpv3.session_id,
3419 TCA_FLOWER_UNSPEC,
3420 sizeof(key->l2tpv3.session_id)))
3421 goto nla_put_failure;
3422
3423 if (key->ipsec.spi &&
3424 fl_dump_key_val(skb, &key->ipsec.spi, TCA_FLOWER_KEY_SPI,
3425 &mask->ipsec.spi, TCA_FLOWER_KEY_SPI_MASK,
3426 sizeof(key->ipsec.spi)))
3427 goto nla_put_failure;
3428
3429 if ((key->basic.ip_proto == IPPROTO_TCP ||
3430 key->basic.ip_proto == IPPROTO_UDP ||
3431 key->basic.ip_proto == IPPROTO_SCTP) &&
3432 fl_dump_key_port_range(skb, key, mask))
3433 goto nla_put_failure;
3434
3435 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
3436 (fl_dump_key_val(skb, &key->enc_ipv4.src,
3437 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
3438 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
3439 sizeof(key->enc_ipv4.src)) ||
3440 fl_dump_key_val(skb, &key->enc_ipv4.dst,
3441 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst,
3442 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,
3443 sizeof(key->enc_ipv4.dst))))
3444 goto nla_put_failure;
3445 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS &&
3446 (fl_dump_key_val(skb, &key->enc_ipv6.src,
3447 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src,
3448 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
3449 sizeof(key->enc_ipv6.src)) ||
3450 fl_dump_key_val(skb, &key->enc_ipv6.dst,
3451 TCA_FLOWER_KEY_ENC_IPV6_DST,
3452 &mask->enc_ipv6.dst,
3453 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,
3454 sizeof(key->enc_ipv6.dst))))
3455 goto nla_put_failure;
3456
3457 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID,
3458 &mask->enc_key_id, TCA_FLOWER_UNSPEC,
3459 sizeof(key->enc_key_id)) ||
3460 fl_dump_key_val(skb, &key->enc_tp.src,
3461 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,
3462 &mask->enc_tp.src,
3463 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,
3464 sizeof(key->enc_tp.src)) ||
3465 fl_dump_key_val(skb, &key->enc_tp.dst,
3466 TCA_FLOWER_KEY_ENC_UDP_DST_PORT,
3467 &mask->enc_tp.dst,
3468 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,
3469 sizeof(key->enc_tp.dst)) ||
3470 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) ||
3471 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts))
3472 goto nla_put_failure;
3473
3474 if (fl_dump_key_ct(skb, &key->ct, &mask->ct))
3475 goto nla_put_failure;
3476
3477 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
3478 goto nla_put_failure;
3479
3480 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
3481 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
3482 sizeof(key->hash.hash)))
3483 goto nla_put_failure;
3484
3485 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm))
3486 goto nla_put_failure;
3487
3488 return 0;
3489
3490 nla_put_failure:
3491 return -EMSGSIZE;
3492 }
3493
fl_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3494 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
3495 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3496 {
3497 struct cls_fl_filter *f = fh;
3498 struct nlattr *nest;
3499 struct fl_flow_key *key, *mask;
3500 bool skip_hw;
3501
3502 if (!f)
3503 return skb->len;
3504
3505 t->tcm_handle = f->handle;
3506
3507 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3508 if (!nest)
3509 goto nla_put_failure;
3510
3511 spin_lock(&tp->lock);
3512
3513 if (f->res.classid &&
3514 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
3515 goto nla_put_failure_locked;
3516
3517 key = &f->key;
3518 mask = &f->mask->key;
3519 skip_hw = tc_skip_hw(f->flags);
3520
3521 if (fl_dump_key(skb, net, key, mask))
3522 goto nla_put_failure_locked;
3523
3524 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3525 goto nla_put_failure_locked;
3526
3527 spin_unlock(&tp->lock);
3528
3529 if (!skip_hw)
3530 fl_hw_update_stats(tp, f, rtnl_held);
3531
3532 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
3533 goto nla_put_failure;
3534
3535 if (tcf_exts_dump(skb, &f->exts))
3536 goto nla_put_failure;
3537
3538 nla_nest_end(skb, nest);
3539
3540 if (tcf_exts_dump_stats(skb, &f->exts) < 0)
3541 goto nla_put_failure;
3542
3543 return skb->len;
3544
3545 nla_put_failure_locked:
3546 spin_unlock(&tp->lock);
3547 nla_put_failure:
3548 nla_nest_cancel(skb, nest);
3549 return -1;
3550 }
3551
fl_terse_dump(struct net * net,struct tcf_proto * tp,void * fh,struct sk_buff * skb,struct tcmsg * t,bool rtnl_held)3552 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh,
3553 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
3554 {
3555 struct cls_fl_filter *f = fh;
3556 struct nlattr *nest;
3557 bool skip_hw;
3558
3559 if (!f)
3560 return skb->len;
3561
3562 t->tcm_handle = f->handle;
3563
3564 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3565 if (!nest)
3566 goto nla_put_failure;
3567
3568 spin_lock(&tp->lock);
3569
3570 skip_hw = tc_skip_hw(f->flags);
3571
3572 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
3573 goto nla_put_failure_locked;
3574
3575 spin_unlock(&tp->lock);
3576
3577 if (!skip_hw)
3578 fl_hw_update_stats(tp, f, rtnl_held);
3579
3580 if (tcf_exts_terse_dump(skb, &f->exts))
3581 goto nla_put_failure;
3582
3583 nla_nest_end(skb, nest);
3584
3585 return skb->len;
3586
3587 nla_put_failure_locked:
3588 spin_unlock(&tp->lock);
3589 nla_put_failure:
3590 nla_nest_cancel(skb, nest);
3591 return -1;
3592 }
3593
fl_tmplt_dump(struct sk_buff * skb,struct net * net,void * tmplt_priv)3594 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv)
3595 {
3596 struct fl_flow_tmplt *tmplt = tmplt_priv;
3597 struct fl_flow_key *key, *mask;
3598 struct nlattr *nest;
3599
3600 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
3601 if (!nest)
3602 goto nla_put_failure;
3603
3604 key = &tmplt->dummy_key;
3605 mask = &tmplt->mask;
3606
3607 if (fl_dump_key(skb, net, key, mask))
3608 goto nla_put_failure;
3609
3610 nla_nest_end(skb, nest);
3611
3612 return skb->len;
3613
3614 nla_put_failure:
3615 nla_nest_cancel(skb, nest);
3616 return -EMSGSIZE;
3617 }
3618
fl_bind_class(void * fh,u32 classid,unsigned long cl,void * q,unsigned long base)3619 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
3620 unsigned long base)
3621 {
3622 struct cls_fl_filter *f = fh;
3623
3624 tc_cls_bind_class(classid, cl, q, &f->res, base);
3625 }
3626
fl_delete_empty(struct tcf_proto * tp)3627 static bool fl_delete_empty(struct tcf_proto *tp)
3628 {
3629 struct cls_fl_head *head = fl_head_dereference(tp);
3630
3631 spin_lock(&tp->lock);
3632 tp->deleting = idr_is_empty(&head->handle_idr);
3633 spin_unlock(&tp->lock);
3634
3635 return tp->deleting;
3636 }
3637
3638 static struct tcf_proto_ops cls_fl_ops __read_mostly = {
3639 .kind = "flower",
3640 .classify = fl_classify,
3641 .init = fl_init,
3642 .destroy = fl_destroy,
3643 .get = fl_get,
3644 .put = fl_put,
3645 .change = fl_change,
3646 .delete = fl_delete,
3647 .delete_empty = fl_delete_empty,
3648 .walk = fl_walk,
3649 .reoffload = fl_reoffload,
3650 .hw_add = fl_hw_add,
3651 .hw_del = fl_hw_del,
3652 .dump = fl_dump,
3653 .terse_dump = fl_terse_dump,
3654 .bind_class = fl_bind_class,
3655 .tmplt_create = fl_tmplt_create,
3656 .tmplt_destroy = fl_tmplt_destroy,
3657 .tmplt_reoffload = fl_tmplt_reoffload,
3658 .tmplt_dump = fl_tmplt_dump,
3659 .get_exts = fl_get_exts,
3660 .owner = THIS_MODULE,
3661 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED,
3662 };
3663
cls_fl_init(void)3664 static int __init cls_fl_init(void)
3665 {
3666 return register_tcf_proto_ops(&cls_fl_ops);
3667 }
3668
cls_fl_exit(void)3669 static void __exit cls_fl_exit(void)
3670 {
3671 unregister_tcf_proto_ops(&cls_fl_ops);
3672 }
3673
3674 module_init(cls_fl_init);
3675 module_exit(cls_fl_exit);
3676
3677 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
3678 MODULE_DESCRIPTION("Flower classifier");
3679 MODULE_LICENSE("GPL v2");
3680