1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 #include <linux/bitfield.h> 15 16 #include <linux/if_ether.h> 17 #include <linux/in6.h> 18 #include <linux/ip.h> 19 #include <linux/mpls.h> 20 #include <linux/ppp_defs.h> 21 22 #include <net/sch_generic.h> 23 #include <net/pkt_cls.h> 24 #include <net/pkt_sched.h> 25 #include <net/ip.h> 26 #include <net/flow_dissector.h> 27 #include <net/geneve.h> 28 #include <net/vxlan.h> 29 #include <net/erspan.h> 30 #include <net/gtp.h> 31 #include <net/tc_wrapper.h> 32 33 #include <net/dst.h> 34 #include <net/dst_metadata.h> 35 36 #include <uapi/linux/netfilter/nf_conntrack_common.h> 37 38 #define TCA_FLOWER_KEY_CT_FLAGS_MAX \ 39 ((__TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) << 1) 40 #define TCA_FLOWER_KEY_CT_FLAGS_MASK \ 41 (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) 42 43 struct fl_flow_key { 44 struct flow_dissector_key_meta meta; 45 struct flow_dissector_key_control control; 46 struct flow_dissector_key_control enc_control; 47 struct flow_dissector_key_basic basic; 48 struct flow_dissector_key_eth_addrs eth; 49 struct flow_dissector_key_vlan vlan; 50 struct flow_dissector_key_vlan cvlan; 51 union { 52 struct flow_dissector_key_ipv4_addrs ipv4; 53 struct flow_dissector_key_ipv6_addrs ipv6; 54 }; 55 struct flow_dissector_key_ports tp; 56 struct flow_dissector_key_icmp icmp; 57 struct flow_dissector_key_arp arp; 58 struct flow_dissector_key_keyid enc_key_id; 59 union { 60 struct flow_dissector_key_ipv4_addrs enc_ipv4; 61 struct flow_dissector_key_ipv6_addrs enc_ipv6; 62 }; 63 struct flow_dissector_key_ports enc_tp; 64 struct flow_dissector_key_mpls mpls; 65 struct flow_dissector_key_tcp tcp; 66 struct flow_dissector_key_ip ip; 67 struct flow_dissector_key_ip enc_ip; 68 struct flow_dissector_key_enc_opts enc_opts; 69 struct flow_dissector_key_ports_range tp_range; 70 struct flow_dissector_key_ct ct; 71 struct flow_dissector_key_hash hash; 72 struct flow_dissector_key_num_of_vlans num_of_vlans; 73 struct flow_dissector_key_pppoe pppoe; 74 struct flow_dissector_key_l2tpv3 l2tpv3; 75 struct flow_dissector_key_cfm cfm; 76 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 77 78 struct fl_flow_mask_range { 79 unsigned short int start; 80 unsigned short int end; 81 }; 82 83 struct fl_flow_mask { 84 struct fl_flow_key key; 85 struct fl_flow_mask_range range; 86 u32 flags; 87 struct rhash_head ht_node; 88 struct rhashtable ht; 89 struct rhashtable_params filter_ht_params; 90 struct flow_dissector dissector; 91 struct list_head filters; 92 struct rcu_work rwork; 93 struct list_head list; 94 refcount_t refcnt; 95 }; 96 97 struct fl_flow_tmplt { 98 struct fl_flow_key dummy_key; 99 struct fl_flow_key mask; 100 struct flow_dissector dissector; 101 struct tcf_chain *chain; 102 }; 103 104 struct cls_fl_head { 105 struct rhashtable ht; 106 spinlock_t masks_lock; /* Protect masks list */ 107 struct list_head masks; 108 struct list_head hw_filters; 109 struct rcu_work rwork; 110 struct idr handle_idr; 111 }; 112 113 struct cls_fl_filter { 114 struct fl_flow_mask *mask; 115 struct rhash_head ht_node; 116 struct fl_flow_key mkey; 117 struct tcf_exts exts; 118 struct tcf_result res; 119 struct fl_flow_key key; 120 struct list_head list; 121 struct list_head hw_list; 122 u32 handle; 123 u32 flags; 124 u32 in_hw_count; 125 u8 needs_tc_skb_ext:1; 126 struct rcu_work rwork; 127 struct net_device *hw_dev; 128 /* Flower classifier is unlocked, which means that its reference counter 129 * can be changed concurrently without any kind of external 130 * synchronization. Use atomic reference counter to be concurrency-safe. 131 */ 132 refcount_t refcnt; 133 bool deleted; 134 }; 135 136 static const struct rhashtable_params mask_ht_params = { 137 .key_offset = offsetof(struct fl_flow_mask, key), 138 .key_len = sizeof(struct fl_flow_key), 139 .head_offset = offsetof(struct fl_flow_mask, ht_node), 140 .automatic_shrinking = true, 141 }; 142 143 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 144 { 145 return mask->range.end - mask->range.start; 146 } 147 148 static void fl_mask_update_range(struct fl_flow_mask *mask) 149 { 150 const u8 *bytes = (const u8 *) &mask->key; 151 size_t size = sizeof(mask->key); 152 size_t i, first = 0, last; 153 154 for (i = 0; i < size; i++) { 155 if (bytes[i]) { 156 first = i; 157 break; 158 } 159 } 160 last = first; 161 for (i = size - 1; i != first; i--) { 162 if (bytes[i]) { 163 last = i; 164 break; 165 } 166 } 167 mask->range.start = rounddown(first, sizeof(long)); 168 mask->range.end = roundup(last + 1, sizeof(long)); 169 } 170 171 static void *fl_key_get_start(struct fl_flow_key *key, 172 const struct fl_flow_mask *mask) 173 { 174 return (u8 *) key + mask->range.start; 175 } 176 177 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 178 struct fl_flow_mask *mask) 179 { 180 const long *lkey = fl_key_get_start(key, mask); 181 const long *lmask = fl_key_get_start(&mask->key, mask); 182 long *lmkey = fl_key_get_start(mkey, mask); 183 int i; 184 185 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 186 *lmkey++ = *lkey++ & *lmask++; 187 } 188 189 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 190 struct fl_flow_mask *mask) 191 { 192 const long *lmask = fl_key_get_start(&mask->key, mask); 193 const long *ltmplt; 194 int i; 195 196 if (!tmplt) 197 return true; 198 ltmplt = fl_key_get_start(&tmplt->mask, mask); 199 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 200 if (~*ltmplt++ & *lmask++) 201 return false; 202 } 203 return true; 204 } 205 206 static void fl_clear_masked_range(struct fl_flow_key *key, 207 struct fl_flow_mask *mask) 208 { 209 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 210 } 211 212 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 213 struct fl_flow_key *key, 214 struct fl_flow_key *mkey) 215 { 216 u16 min_mask, max_mask, min_val, max_val; 217 218 min_mask = ntohs(filter->mask->key.tp_range.tp_min.dst); 219 max_mask = ntohs(filter->mask->key.tp_range.tp_max.dst); 220 min_val = ntohs(filter->key.tp_range.tp_min.dst); 221 max_val = ntohs(filter->key.tp_range.tp_max.dst); 222 223 if (min_mask && max_mask) { 224 if (ntohs(key->tp_range.tp.dst) < min_val || 225 ntohs(key->tp_range.tp.dst) > max_val) 226 return false; 227 228 /* skb does not have min and max values */ 229 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 230 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 231 } 232 return true; 233 } 234 235 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 236 struct fl_flow_key *key, 237 struct fl_flow_key *mkey) 238 { 239 u16 min_mask, max_mask, min_val, max_val; 240 241 min_mask = ntohs(filter->mask->key.tp_range.tp_min.src); 242 max_mask = ntohs(filter->mask->key.tp_range.tp_max.src); 243 min_val = ntohs(filter->key.tp_range.tp_min.src); 244 max_val = ntohs(filter->key.tp_range.tp_max.src); 245 246 if (min_mask && max_mask) { 247 if (ntohs(key->tp_range.tp.src) < min_val || 248 ntohs(key->tp_range.tp.src) > max_val) 249 return false; 250 251 /* skb does not have min and max values */ 252 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 253 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 254 } 255 return true; 256 } 257 258 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 259 struct fl_flow_key *mkey) 260 { 261 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 262 mask->filter_ht_params); 263 } 264 265 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 266 struct fl_flow_key *mkey, 267 struct fl_flow_key *key) 268 { 269 struct cls_fl_filter *filter, *f; 270 271 list_for_each_entry_rcu(filter, &mask->filters, list) { 272 if (!fl_range_port_dst_cmp(filter, key, mkey)) 273 continue; 274 275 if (!fl_range_port_src_cmp(filter, key, mkey)) 276 continue; 277 278 f = __fl_lookup(mask, mkey); 279 if (f) 280 return f; 281 } 282 return NULL; 283 } 284 285 static noinline_for_stack 286 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key) 287 { 288 struct fl_flow_key mkey; 289 290 fl_set_masked_key(&mkey, key, mask); 291 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 292 return fl_lookup_range(mask, &mkey, key); 293 294 return __fl_lookup(mask, &mkey); 295 } 296 297 static u16 fl_ct_info_to_flower_map[] = { 298 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 299 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 300 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 301 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 302 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 303 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED | 304 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 305 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 306 TCA_FLOWER_KEY_CT_FLAGS_RELATED | 307 TCA_FLOWER_KEY_CT_FLAGS_REPLY, 308 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 309 TCA_FLOWER_KEY_CT_FLAGS_NEW, 310 }; 311 312 TC_INDIRECT_SCOPE int fl_classify(struct sk_buff *skb, 313 const struct tcf_proto *tp, 314 struct tcf_result *res) 315 { 316 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 317 bool post_ct = tc_skb_cb(skb)->post_ct; 318 u16 zone = tc_skb_cb(skb)->zone; 319 struct fl_flow_key skb_key; 320 struct fl_flow_mask *mask; 321 struct cls_fl_filter *f; 322 323 list_for_each_entry_rcu(mask, &head->masks, list) { 324 flow_dissector_init_keys(&skb_key.control, &skb_key.basic); 325 fl_clear_masked_range(&skb_key, mask); 326 327 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 328 /* skb_flow_dissect() does not set n_proto in case an unknown 329 * protocol, so do it rather here. 330 */ 331 skb_key.basic.n_proto = skb_protocol(skb, false); 332 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 333 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 334 fl_ct_info_to_flower_map, 335 ARRAY_SIZE(fl_ct_info_to_flower_map), 336 post_ct, zone); 337 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); 338 skb_flow_dissect(skb, &mask->dissector, &skb_key, 339 FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP); 340 341 f = fl_mask_lookup(mask, &skb_key); 342 if (f && !tc_skip_sw(f->flags)) { 343 *res = f->res; 344 return tcf_exts_exec(skb, &f->exts, res); 345 } 346 } 347 return -1; 348 } 349 350 static int fl_init(struct tcf_proto *tp) 351 { 352 struct cls_fl_head *head; 353 354 head = kzalloc(sizeof(*head), GFP_KERNEL); 355 if (!head) 356 return -ENOBUFS; 357 358 spin_lock_init(&head->masks_lock); 359 INIT_LIST_HEAD_RCU(&head->masks); 360 INIT_LIST_HEAD(&head->hw_filters); 361 rcu_assign_pointer(tp->root, head); 362 idr_init(&head->handle_idr); 363 364 return rhashtable_init(&head->ht, &mask_ht_params); 365 } 366 367 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 368 { 369 /* temporary masks don't have their filters list and ht initialized */ 370 if (mask_init_done) { 371 WARN_ON(!list_empty(&mask->filters)); 372 rhashtable_destroy(&mask->ht); 373 } 374 kfree(mask); 375 } 376 377 static void fl_mask_free_work(struct work_struct *work) 378 { 379 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 380 struct fl_flow_mask, rwork); 381 382 fl_mask_free(mask, true); 383 } 384 385 static void fl_uninit_mask_free_work(struct work_struct *work) 386 { 387 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 388 struct fl_flow_mask, rwork); 389 390 fl_mask_free(mask, false); 391 } 392 393 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 394 { 395 if (!refcount_dec_and_test(&mask->refcnt)) 396 return false; 397 398 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 399 400 spin_lock(&head->masks_lock); 401 list_del_rcu(&mask->list); 402 spin_unlock(&head->masks_lock); 403 404 tcf_queue_work(&mask->rwork, fl_mask_free_work); 405 406 return true; 407 } 408 409 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 410 { 411 /* Flower classifier only changes root pointer during init and destroy. 412 * Users must obtain reference to tcf_proto instance before calling its 413 * API, so tp->root pointer is protected from concurrent call to 414 * fl_destroy() by reference counting. 415 */ 416 return rcu_dereference_raw(tp->root); 417 } 418 419 static void __fl_destroy_filter(struct cls_fl_filter *f) 420 { 421 if (f->needs_tc_skb_ext) 422 tc_skb_ext_tc_disable(); 423 tcf_exts_destroy(&f->exts); 424 tcf_exts_put_net(&f->exts); 425 kfree(f); 426 } 427 428 static void fl_destroy_filter_work(struct work_struct *work) 429 { 430 struct cls_fl_filter *f = container_of(to_rcu_work(work), 431 struct cls_fl_filter, rwork); 432 433 __fl_destroy_filter(f); 434 } 435 436 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 437 bool rtnl_held, struct netlink_ext_ack *extack) 438 { 439 struct tcf_block *block = tp->chain->block; 440 struct flow_cls_offload cls_flower = {}; 441 442 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 443 cls_flower.command = FLOW_CLS_DESTROY; 444 cls_flower.cookie = (unsigned long) f; 445 446 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, 447 &f->flags, &f->in_hw_count, rtnl_held); 448 449 } 450 451 static int fl_hw_replace_filter(struct tcf_proto *tp, 452 struct cls_fl_filter *f, bool rtnl_held, 453 struct netlink_ext_ack *extack) 454 { 455 struct tcf_block *block = tp->chain->block; 456 struct flow_cls_offload cls_flower = {}; 457 bool skip_sw = tc_skip_sw(f->flags); 458 int err = 0; 459 460 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 461 if (!cls_flower.rule) 462 return -ENOMEM; 463 464 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 465 cls_flower.command = FLOW_CLS_REPLACE; 466 cls_flower.cookie = (unsigned long) f; 467 cls_flower.rule->match.dissector = &f->mask->dissector; 468 cls_flower.rule->match.mask = &f->mask->key; 469 cls_flower.rule->match.key = &f->mkey; 470 cls_flower.classid = f->res.classid; 471 472 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 473 cls_flower.common.extack); 474 if (err) { 475 kfree(cls_flower.rule); 476 477 return skip_sw ? err : 0; 478 } 479 480 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, 481 skip_sw, &f->flags, &f->in_hw_count, rtnl_held); 482 tc_cleanup_offload_action(&cls_flower.rule->action); 483 kfree(cls_flower.rule); 484 485 if (err) { 486 fl_hw_destroy_filter(tp, f, rtnl_held, NULL); 487 return err; 488 } 489 490 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 491 return -EINVAL; 492 493 return 0; 494 } 495 496 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 497 bool rtnl_held) 498 { 499 struct tcf_block *block = tp->chain->block; 500 struct flow_cls_offload cls_flower = {}; 501 502 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 503 cls_flower.command = FLOW_CLS_STATS; 504 cls_flower.cookie = (unsigned long) f; 505 cls_flower.classid = f->res.classid; 506 507 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, 508 rtnl_held); 509 510 tcf_exts_hw_stats_update(&f->exts, &cls_flower.stats, cls_flower.use_act_stats); 511 } 512 513 static void __fl_put(struct cls_fl_filter *f) 514 { 515 if (!refcount_dec_and_test(&f->refcnt)) 516 return; 517 518 if (tcf_exts_get_net(&f->exts)) 519 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 520 else 521 __fl_destroy_filter(f); 522 } 523 524 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 525 { 526 struct cls_fl_filter *f; 527 528 rcu_read_lock(); 529 f = idr_find(&head->handle_idr, handle); 530 if (f && !refcount_inc_not_zero(&f->refcnt)) 531 f = NULL; 532 rcu_read_unlock(); 533 534 return f; 535 } 536 537 static struct tcf_exts *fl_get_exts(const struct tcf_proto *tp, u32 handle) 538 { 539 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 540 struct cls_fl_filter *f; 541 542 f = idr_find(&head->handle_idr, handle); 543 return f ? &f->exts : NULL; 544 } 545 546 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 547 bool *last, bool rtnl_held, 548 struct netlink_ext_ack *extack) 549 { 550 struct cls_fl_head *head = fl_head_dereference(tp); 551 552 *last = false; 553 554 spin_lock(&tp->lock); 555 if (f->deleted) { 556 spin_unlock(&tp->lock); 557 return -ENOENT; 558 } 559 560 f->deleted = true; 561 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 562 f->mask->filter_ht_params); 563 idr_remove(&head->handle_idr, f->handle); 564 list_del_rcu(&f->list); 565 spin_unlock(&tp->lock); 566 567 *last = fl_mask_put(head, f->mask); 568 if (!tc_skip_hw(f->flags)) 569 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 570 tcf_unbind_filter(tp, &f->res); 571 __fl_put(f); 572 573 return 0; 574 } 575 576 static void fl_destroy_sleepable(struct work_struct *work) 577 { 578 struct cls_fl_head *head = container_of(to_rcu_work(work), 579 struct cls_fl_head, 580 rwork); 581 582 rhashtable_destroy(&head->ht); 583 kfree(head); 584 module_put(THIS_MODULE); 585 } 586 587 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 588 struct netlink_ext_ack *extack) 589 { 590 struct cls_fl_head *head = fl_head_dereference(tp); 591 struct fl_flow_mask *mask, *next_mask; 592 struct cls_fl_filter *f, *next; 593 bool last; 594 595 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 596 list_for_each_entry_safe(f, next, &mask->filters, list) { 597 __fl_delete(tp, f, &last, rtnl_held, extack); 598 if (last) 599 break; 600 } 601 } 602 idr_destroy(&head->handle_idr); 603 604 __module_get(THIS_MODULE); 605 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 606 } 607 608 static void fl_put(struct tcf_proto *tp, void *arg) 609 { 610 struct cls_fl_filter *f = arg; 611 612 __fl_put(f); 613 } 614 615 static void *fl_get(struct tcf_proto *tp, u32 handle) 616 { 617 struct cls_fl_head *head = fl_head_dereference(tp); 618 619 return __fl_get(head, handle); 620 } 621 622 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 623 [TCA_FLOWER_UNSPEC] = { .strict_start_type = 624 TCA_FLOWER_L2_MISS }, 625 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 626 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 627 .len = IFNAMSIZ }, 628 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 629 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 630 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 631 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 632 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 633 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 634 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 635 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 636 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 637 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 638 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 639 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 640 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 641 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 642 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 643 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 644 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 645 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 646 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 647 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 648 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 649 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 650 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 651 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 652 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 653 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 654 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 655 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 656 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 657 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 658 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 659 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 660 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 661 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 662 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 663 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 664 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 665 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 666 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 667 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 668 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 669 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 670 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 671 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 672 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 673 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 674 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 675 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 676 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 677 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 678 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 679 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 680 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 681 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 682 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 683 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 684 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 685 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 686 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 687 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 688 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 689 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 690 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 691 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 692 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 693 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 694 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, 695 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 696 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 697 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 698 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 699 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 700 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 701 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 702 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 703 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 704 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 705 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 706 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 707 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 708 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 709 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 710 [TCA_FLOWER_KEY_CT_STATE] = 711 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 712 [TCA_FLOWER_KEY_CT_STATE_MASK] = 713 NLA_POLICY_MASK(NLA_U16, TCA_FLOWER_KEY_CT_FLAGS_MASK), 714 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 715 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 716 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 717 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 718 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 719 .len = 128 / BITS_PER_BYTE }, 720 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 721 .len = 128 / BITS_PER_BYTE }, 722 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, 723 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, 724 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, 725 [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 }, 726 [TCA_FLOWER_KEY_PPPOE_SID] = { .type = NLA_U16 }, 727 [TCA_FLOWER_KEY_PPP_PROTO] = { .type = NLA_U16 }, 728 [TCA_FLOWER_KEY_L2TPV3_SID] = { .type = NLA_U32 }, 729 [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), 730 [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED }, 731 }; 732 733 static const struct nla_policy 734 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 735 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { 736 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, 737 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 738 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 739 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 740 [TCA_FLOWER_KEY_ENC_OPTS_GTP] = { .type = NLA_NESTED }, 741 }; 742 743 static const struct nla_policy 744 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 745 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 746 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 747 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 748 .len = 128 }, 749 }; 750 751 static const struct nla_policy 752 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { 753 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, 754 }; 755 756 static const struct nla_policy 757 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { 758 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, 759 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, 760 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, 761 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 762 }; 763 764 static const struct nla_policy 765 gtp_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1] = { 766 [TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] = { .type = NLA_U8 }, 767 [TCA_FLOWER_KEY_ENC_OPT_GTP_QFI] = { .type = NLA_U8 }, 768 }; 769 770 static const struct nla_policy 771 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { 772 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, 773 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, 774 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, 775 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, 776 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, 777 }; 778 779 static const struct nla_policy 780 cfm_opt_policy[TCA_FLOWER_KEY_CFM_OPT_MAX + 1] = { 781 [TCA_FLOWER_KEY_CFM_MD_LEVEL] = NLA_POLICY_MAX(NLA_U8, 782 FLOW_DIS_CFM_MDL_MAX), 783 [TCA_FLOWER_KEY_CFM_OPCODE] = { .type = NLA_U8 }, 784 }; 785 786 static void fl_set_key_val(struct nlattr **tb, 787 void *val, int val_type, 788 void *mask, int mask_type, int len) 789 { 790 if (!tb[val_type]) 791 return; 792 nla_memcpy(val, tb[val_type], len); 793 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 794 memset(mask, 0xff, len); 795 else 796 nla_memcpy(mask, tb[mask_type], len); 797 } 798 799 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 800 struct fl_flow_key *mask, 801 struct netlink_ext_ack *extack) 802 { 803 fl_set_key_val(tb, &key->tp_range.tp_min.dst, 804 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 805 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 806 fl_set_key_val(tb, &key->tp_range.tp_max.dst, 807 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 808 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 809 fl_set_key_val(tb, &key->tp_range.tp_min.src, 810 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 811 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 812 fl_set_key_val(tb, &key->tp_range.tp_max.src, 813 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 814 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 815 816 if (mask->tp_range.tp_min.dst != mask->tp_range.tp_max.dst) { 817 NL_SET_ERR_MSG(extack, 818 "Both min and max destination ports must be specified"); 819 return -EINVAL; 820 } 821 if (mask->tp_range.tp_min.src != mask->tp_range.tp_max.src) { 822 NL_SET_ERR_MSG(extack, 823 "Both min and max source ports must be specified"); 824 return -EINVAL; 825 } 826 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 827 ntohs(key->tp_range.tp_max.dst) <= 828 ntohs(key->tp_range.tp_min.dst)) { 829 NL_SET_ERR_MSG_ATTR(extack, 830 tb[TCA_FLOWER_KEY_PORT_DST_MIN], 831 "Invalid destination port range (min must be strictly smaller than max)"); 832 return -EINVAL; 833 } 834 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 835 ntohs(key->tp_range.tp_max.src) <= 836 ntohs(key->tp_range.tp_min.src)) { 837 NL_SET_ERR_MSG_ATTR(extack, 838 tb[TCA_FLOWER_KEY_PORT_SRC_MIN], 839 "Invalid source port range (min must be strictly smaller than max)"); 840 return -EINVAL; 841 } 842 843 return 0; 844 } 845 846 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, 847 struct flow_dissector_key_mpls *key_val, 848 struct flow_dissector_key_mpls *key_mask, 849 struct netlink_ext_ack *extack) 850 { 851 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; 852 struct flow_dissector_mpls_lse *lse_mask; 853 struct flow_dissector_mpls_lse *lse_val; 854 u8 lse_index; 855 u8 depth; 856 int err; 857 858 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, 859 mpls_stack_entry_policy, extack); 860 if (err < 0) 861 return err; 862 863 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { 864 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); 865 return -EINVAL; 866 } 867 868 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); 869 870 /* LSE depth starts at 1, for consistency with terminology used by 871 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. 872 */ 873 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { 874 NL_SET_ERR_MSG_ATTR(extack, 875 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], 876 "Invalid MPLS depth"); 877 return -EINVAL; 878 } 879 lse_index = depth - 1; 880 881 dissector_set_mpls_lse(key_val, lse_index); 882 dissector_set_mpls_lse(key_mask, lse_index); 883 884 lse_val = &key_val->ls[lse_index]; 885 lse_mask = &key_mask->ls[lse_index]; 886 887 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { 888 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); 889 lse_mask->mpls_ttl = MPLS_TTL_MASK; 890 } 891 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { 892 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); 893 894 if (bos & ~MPLS_BOS_MASK) { 895 NL_SET_ERR_MSG_ATTR(extack, 896 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], 897 "Bottom Of Stack (BOS) must be 0 or 1"); 898 return -EINVAL; 899 } 900 lse_val->mpls_bos = bos; 901 lse_mask->mpls_bos = MPLS_BOS_MASK; 902 } 903 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { 904 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); 905 906 if (tc & ~MPLS_TC_MASK) { 907 NL_SET_ERR_MSG_ATTR(extack, 908 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], 909 "Traffic Class (TC) must be between 0 and 7"); 910 return -EINVAL; 911 } 912 lse_val->mpls_tc = tc; 913 lse_mask->mpls_tc = MPLS_TC_MASK; 914 } 915 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { 916 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); 917 918 if (label & ~MPLS_LABEL_MASK) { 919 NL_SET_ERR_MSG_ATTR(extack, 920 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], 921 "Label must be between 0 and 1048575"); 922 return -EINVAL; 923 } 924 lse_val->mpls_label = label; 925 lse_mask->mpls_label = MPLS_LABEL_MASK; 926 } 927 928 return 0; 929 } 930 931 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, 932 struct flow_dissector_key_mpls *key_val, 933 struct flow_dissector_key_mpls *key_mask, 934 struct netlink_ext_ack *extack) 935 { 936 struct nlattr *nla_lse; 937 int rem; 938 int err; 939 940 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { 941 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, 942 "NLA_F_NESTED is missing"); 943 return -EINVAL; 944 } 945 946 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { 947 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { 948 NL_SET_ERR_MSG_ATTR(extack, nla_lse, 949 "Invalid MPLS option type"); 950 return -EINVAL; 951 } 952 953 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); 954 if (err < 0) 955 return err; 956 } 957 if (rem) { 958 NL_SET_ERR_MSG(extack, 959 "Bytes leftover after parsing MPLS options"); 960 return -EINVAL; 961 } 962 963 return 0; 964 } 965 966 static int fl_set_key_mpls(struct nlattr **tb, 967 struct flow_dissector_key_mpls *key_val, 968 struct flow_dissector_key_mpls *key_mask, 969 struct netlink_ext_ack *extack) 970 { 971 struct flow_dissector_mpls_lse *lse_mask; 972 struct flow_dissector_mpls_lse *lse_val; 973 974 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { 975 if (tb[TCA_FLOWER_KEY_MPLS_TTL] || 976 tb[TCA_FLOWER_KEY_MPLS_BOS] || 977 tb[TCA_FLOWER_KEY_MPLS_TC] || 978 tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 979 NL_SET_ERR_MSG_ATTR(extack, 980 tb[TCA_FLOWER_KEY_MPLS_OPTS], 981 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); 982 return -EBADMSG; 983 } 984 985 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], 986 key_val, key_mask, extack); 987 } 988 989 lse_val = &key_val->ls[0]; 990 lse_mask = &key_mask->ls[0]; 991 992 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 993 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 994 lse_mask->mpls_ttl = MPLS_TTL_MASK; 995 dissector_set_mpls_lse(key_val, 0); 996 dissector_set_mpls_lse(key_mask, 0); 997 } 998 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 999 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 1000 1001 if (bos & ~MPLS_BOS_MASK) { 1002 NL_SET_ERR_MSG_ATTR(extack, 1003 tb[TCA_FLOWER_KEY_MPLS_BOS], 1004 "Bottom Of Stack (BOS) must be 0 or 1"); 1005 return -EINVAL; 1006 } 1007 lse_val->mpls_bos = bos; 1008 lse_mask->mpls_bos = MPLS_BOS_MASK; 1009 dissector_set_mpls_lse(key_val, 0); 1010 dissector_set_mpls_lse(key_mask, 0); 1011 } 1012 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 1013 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 1014 1015 if (tc & ~MPLS_TC_MASK) { 1016 NL_SET_ERR_MSG_ATTR(extack, 1017 tb[TCA_FLOWER_KEY_MPLS_TC], 1018 "Traffic Class (TC) must be between 0 and 7"); 1019 return -EINVAL; 1020 } 1021 lse_val->mpls_tc = tc; 1022 lse_mask->mpls_tc = MPLS_TC_MASK; 1023 dissector_set_mpls_lse(key_val, 0); 1024 dissector_set_mpls_lse(key_mask, 0); 1025 } 1026 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 1027 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 1028 1029 if (label & ~MPLS_LABEL_MASK) { 1030 NL_SET_ERR_MSG_ATTR(extack, 1031 tb[TCA_FLOWER_KEY_MPLS_LABEL], 1032 "Label must be between 0 and 1048575"); 1033 return -EINVAL; 1034 } 1035 lse_val->mpls_label = label; 1036 lse_mask->mpls_label = MPLS_LABEL_MASK; 1037 dissector_set_mpls_lse(key_val, 0); 1038 dissector_set_mpls_lse(key_mask, 0); 1039 } 1040 return 0; 1041 } 1042 1043 static void fl_set_key_vlan(struct nlattr **tb, 1044 __be16 ethertype, 1045 int vlan_id_key, int vlan_prio_key, 1046 int vlan_next_eth_type_key, 1047 struct flow_dissector_key_vlan *key_val, 1048 struct flow_dissector_key_vlan *key_mask) 1049 { 1050 #define VLAN_PRIORITY_MASK 0x7 1051 1052 if (tb[vlan_id_key]) { 1053 key_val->vlan_id = 1054 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 1055 key_mask->vlan_id = VLAN_VID_MASK; 1056 } 1057 if (tb[vlan_prio_key]) { 1058 key_val->vlan_priority = 1059 nla_get_u8(tb[vlan_prio_key]) & 1060 VLAN_PRIORITY_MASK; 1061 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 1062 } 1063 if (ethertype) { 1064 key_val->vlan_tpid = ethertype; 1065 key_mask->vlan_tpid = cpu_to_be16(~0); 1066 } 1067 if (tb[vlan_next_eth_type_key]) { 1068 key_val->vlan_eth_type = 1069 nla_get_be16(tb[vlan_next_eth_type_key]); 1070 key_mask->vlan_eth_type = cpu_to_be16(~0); 1071 } 1072 } 1073 1074 static void fl_set_key_pppoe(struct nlattr **tb, 1075 struct flow_dissector_key_pppoe *key_val, 1076 struct flow_dissector_key_pppoe *key_mask, 1077 struct fl_flow_key *key, 1078 struct fl_flow_key *mask) 1079 { 1080 /* key_val::type must be set to ETH_P_PPP_SES 1081 * because ETH_P_PPP_SES was stored in basic.n_proto 1082 * which might get overwritten by ppp_proto 1083 * or might be set to 0, the role of key_val::type 1084 * is similar to vlan_key::tpid 1085 */ 1086 key_val->type = htons(ETH_P_PPP_SES); 1087 key_mask->type = cpu_to_be16(~0); 1088 1089 if (tb[TCA_FLOWER_KEY_PPPOE_SID]) { 1090 key_val->session_id = 1091 nla_get_be16(tb[TCA_FLOWER_KEY_PPPOE_SID]); 1092 key_mask->session_id = cpu_to_be16(~0); 1093 } 1094 if (tb[TCA_FLOWER_KEY_PPP_PROTO]) { 1095 key_val->ppp_proto = 1096 nla_get_be16(tb[TCA_FLOWER_KEY_PPP_PROTO]); 1097 key_mask->ppp_proto = cpu_to_be16(~0); 1098 1099 if (key_val->ppp_proto == htons(PPP_IP)) { 1100 key->basic.n_proto = htons(ETH_P_IP); 1101 mask->basic.n_proto = cpu_to_be16(~0); 1102 } else if (key_val->ppp_proto == htons(PPP_IPV6)) { 1103 key->basic.n_proto = htons(ETH_P_IPV6); 1104 mask->basic.n_proto = cpu_to_be16(~0); 1105 } else if (key_val->ppp_proto == htons(PPP_MPLS_UC)) { 1106 key->basic.n_proto = htons(ETH_P_MPLS_UC); 1107 mask->basic.n_proto = cpu_to_be16(~0); 1108 } else if (key_val->ppp_proto == htons(PPP_MPLS_MC)) { 1109 key->basic.n_proto = htons(ETH_P_MPLS_MC); 1110 mask->basic.n_proto = cpu_to_be16(~0); 1111 } 1112 } else { 1113 key->basic.n_proto = 0; 1114 mask->basic.n_proto = cpu_to_be16(0); 1115 } 1116 } 1117 1118 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 1119 u32 *dissector_key, u32 *dissector_mask, 1120 u32 flower_flag_bit, u32 dissector_flag_bit) 1121 { 1122 if (flower_mask & flower_flag_bit) { 1123 *dissector_mask |= dissector_flag_bit; 1124 if (flower_key & flower_flag_bit) 1125 *dissector_key |= dissector_flag_bit; 1126 } 1127 } 1128 1129 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, 1130 u32 *flags_mask, struct netlink_ext_ack *extack) 1131 { 1132 u32 key, mask; 1133 1134 /* mask is mandatory for flags */ 1135 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { 1136 NL_SET_ERR_MSG(extack, "Missing flags mask"); 1137 return -EINVAL; 1138 } 1139 1140 key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS])); 1141 mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 1142 1143 *flags_key = 0; 1144 *flags_mask = 0; 1145 1146 fl_set_key_flag(key, mask, flags_key, flags_mask, 1147 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 1148 fl_set_key_flag(key, mask, flags_key, flags_mask, 1149 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 1150 FLOW_DIS_FIRST_FRAG); 1151 1152 return 0; 1153 } 1154 1155 static void fl_set_key_ip(struct nlattr **tb, bool encap, 1156 struct flow_dissector_key_ip *key, 1157 struct flow_dissector_key_ip *mask) 1158 { 1159 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 1160 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 1161 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 1162 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 1163 1164 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 1165 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 1166 } 1167 1168 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 1169 int depth, int option_len, 1170 struct netlink_ext_ack *extack) 1171 { 1172 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 1173 struct nlattr *class = NULL, *type = NULL, *data = NULL; 1174 struct geneve_opt *opt; 1175 int err, data_len = 0; 1176 1177 if (option_len > sizeof(struct geneve_opt)) 1178 data_len = option_len - sizeof(struct geneve_opt); 1179 1180 if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4) 1181 return -ERANGE; 1182 1183 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 1184 memset(opt, 0xff, option_len); 1185 opt->length = data_len / 4; 1186 opt->r1 = 0; 1187 opt->r2 = 0; 1188 opt->r3 = 0; 1189 1190 /* If no mask has been prodived we assume an exact match. */ 1191 if (!depth) 1192 return sizeof(struct geneve_opt) + data_len; 1193 1194 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 1195 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 1196 return -EINVAL; 1197 } 1198 1199 err = nla_parse_nested_deprecated(tb, 1200 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 1201 nla, geneve_opt_policy, extack); 1202 if (err < 0) 1203 return err; 1204 1205 /* We are not allowed to omit any of CLASS, TYPE or DATA 1206 * fields from the key. 1207 */ 1208 if (!option_len && 1209 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 1210 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 1211 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 1212 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 1213 return -EINVAL; 1214 } 1215 1216 /* Omitting any of CLASS, TYPE or DATA fields is allowed 1217 * for the mask. 1218 */ 1219 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 1220 int new_len = key->enc_opts.len; 1221 1222 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 1223 data_len = nla_len(data); 1224 if (data_len < 4) { 1225 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 1226 return -ERANGE; 1227 } 1228 if (data_len % 4) { 1229 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 1230 return -ERANGE; 1231 } 1232 1233 new_len += sizeof(struct geneve_opt) + data_len; 1234 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 1235 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 1236 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 1237 return -ERANGE; 1238 } 1239 opt->length = data_len / 4; 1240 memcpy(opt->opt_data, nla_data(data), data_len); 1241 } 1242 1243 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 1244 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 1245 opt->opt_class = nla_get_be16(class); 1246 } 1247 1248 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 1249 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 1250 opt->type = nla_get_u8(type); 1251 } 1252 1253 return sizeof(struct geneve_opt) + data_len; 1254 } 1255 1256 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1257 int depth, int option_len, 1258 struct netlink_ext_ack *extack) 1259 { 1260 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; 1261 struct vxlan_metadata *md; 1262 int err; 1263 1264 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1265 memset(md, 0xff, sizeof(*md)); 1266 1267 if (!depth) 1268 return sizeof(*md); 1269 1270 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { 1271 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); 1272 return -EINVAL; 1273 } 1274 1275 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, 1276 vxlan_opt_policy, extack); 1277 if (err < 0) 1278 return err; 1279 1280 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1281 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); 1282 return -EINVAL; 1283 } 1284 1285 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1286 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); 1287 md->gbp &= VXLAN_GBP_MASK; 1288 } 1289 1290 return sizeof(*md); 1291 } 1292 1293 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1294 int depth, int option_len, 1295 struct netlink_ext_ack *extack) 1296 { 1297 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; 1298 struct erspan_metadata *md; 1299 int err; 1300 1301 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1302 memset(md, 0xff, sizeof(*md)); 1303 md->version = 1; 1304 1305 if (!depth) 1306 return sizeof(*md); 1307 1308 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { 1309 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); 1310 return -EINVAL; 1311 } 1312 1313 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, 1314 erspan_opt_policy, extack); 1315 if (err < 0) 1316 return err; 1317 1318 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { 1319 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); 1320 return -EINVAL; 1321 } 1322 1323 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) 1324 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); 1325 1326 if (md->version == 1) { 1327 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1328 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); 1329 return -EINVAL; 1330 } 1331 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1332 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; 1333 memset(&md->u, 0x00, sizeof(md->u)); 1334 md->u.index = nla_get_be32(nla); 1335 } 1336 } else if (md->version == 2) { 1337 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || 1338 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { 1339 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); 1340 return -EINVAL; 1341 } 1342 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { 1343 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; 1344 md->u.md2.dir = nla_get_u8(nla); 1345 } 1346 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { 1347 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; 1348 set_hwid(&md->u.md2, nla_get_u8(nla)); 1349 } 1350 } else { 1351 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); 1352 return -EINVAL; 1353 } 1354 1355 return sizeof(*md); 1356 } 1357 1358 static int fl_set_gtp_opt(const struct nlattr *nla, struct fl_flow_key *key, 1359 int depth, int option_len, 1360 struct netlink_ext_ack *extack) 1361 { 1362 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GTP_MAX + 1]; 1363 struct gtp_pdu_session_info *sinfo; 1364 u8 len = key->enc_opts.len; 1365 int err; 1366 1367 sinfo = (struct gtp_pdu_session_info *)&key->enc_opts.data[len]; 1368 memset(sinfo, 0xff, option_len); 1369 1370 if (!depth) 1371 return sizeof(*sinfo); 1372 1373 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GTP) { 1374 NL_SET_ERR_MSG_MOD(extack, "Non-gtp option type for mask"); 1375 return -EINVAL; 1376 } 1377 1378 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, nla, 1379 gtp_opt_policy, extack); 1380 if (err < 0) 1381 return err; 1382 1383 if (!option_len && 1384 (!tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE] || 1385 !tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI])) { 1386 NL_SET_ERR_MSG_MOD(extack, 1387 "Missing tunnel key gtp option pdu type or qfi"); 1388 return -EINVAL; 1389 } 1390 1391 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]) 1392 sinfo->pdu_type = 1393 nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE]); 1394 1395 if (tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]) 1396 sinfo->qfi = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_GTP_QFI]); 1397 1398 return sizeof(*sinfo); 1399 } 1400 1401 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1402 struct fl_flow_key *mask, 1403 struct netlink_ext_ack *extack) 1404 { 1405 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 1406 int err, option_len, key_depth, msk_depth = 0; 1407 1408 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 1409 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1410 enc_opts_policy, extack); 1411 if (err) 1412 return err; 1413 1414 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 1415 1416 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 1417 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 1418 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1419 enc_opts_policy, extack); 1420 if (err) 1421 return err; 1422 1423 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1424 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1425 if (!nla_ok(nla_opt_msk, msk_depth)) { 1426 NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks"); 1427 return -EINVAL; 1428 } 1429 } 1430 1431 nla_for_each_attr(nla_opt_key, nla_enc_key, 1432 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 1433 switch (nla_type(nla_opt_key)) { 1434 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1435 if (key->enc_opts.dst_opt_type && 1436 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { 1437 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1438 return -EINVAL; 1439 } 1440 option_len = 0; 1441 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1442 option_len = fl_set_geneve_opt(nla_opt_key, key, 1443 key_depth, option_len, 1444 extack); 1445 if (option_len < 0) 1446 return option_len; 1447 1448 key->enc_opts.len += option_len; 1449 /* At the same time we need to parse through the mask 1450 * in order to verify exact and mask attribute lengths. 1451 */ 1452 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1453 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1454 msk_depth, option_len, 1455 extack); 1456 if (option_len < 0) 1457 return option_len; 1458 1459 mask->enc_opts.len += option_len; 1460 if (key->enc_opts.len != mask->enc_opts.len) { 1461 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1462 return -EINVAL; 1463 } 1464 break; 1465 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: 1466 if (key->enc_opts.dst_opt_type) { 1467 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); 1468 return -EINVAL; 1469 } 1470 option_len = 0; 1471 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1472 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1473 key_depth, option_len, 1474 extack); 1475 if (option_len < 0) 1476 return option_len; 1477 1478 key->enc_opts.len += option_len; 1479 /* At the same time we need to parse through the mask 1480 * in order to verify exact and mask attribute lengths. 1481 */ 1482 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1483 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1484 msk_depth, option_len, 1485 extack); 1486 if (option_len < 0) 1487 return option_len; 1488 1489 mask->enc_opts.len += option_len; 1490 if (key->enc_opts.len != mask->enc_opts.len) { 1491 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1492 return -EINVAL; 1493 } 1494 break; 1495 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: 1496 if (key->enc_opts.dst_opt_type) { 1497 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); 1498 return -EINVAL; 1499 } 1500 option_len = 0; 1501 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1502 option_len = fl_set_erspan_opt(nla_opt_key, key, 1503 key_depth, option_len, 1504 extack); 1505 if (option_len < 0) 1506 return option_len; 1507 1508 key->enc_opts.len += option_len; 1509 /* At the same time we need to parse through the mask 1510 * in order to verify exact and mask attribute lengths. 1511 */ 1512 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1513 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1514 msk_depth, option_len, 1515 extack); 1516 if (option_len < 0) 1517 return option_len; 1518 1519 mask->enc_opts.len += option_len; 1520 if (key->enc_opts.len != mask->enc_opts.len) { 1521 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1522 return -EINVAL; 1523 } 1524 break; 1525 case TCA_FLOWER_KEY_ENC_OPTS_GTP: 1526 if (key->enc_opts.dst_opt_type) { 1527 NL_SET_ERR_MSG_MOD(extack, 1528 "Duplicate type for gtp options"); 1529 return -EINVAL; 1530 } 1531 option_len = 0; 1532 key->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; 1533 option_len = fl_set_gtp_opt(nla_opt_key, key, 1534 key_depth, option_len, 1535 extack); 1536 if (option_len < 0) 1537 return option_len; 1538 1539 key->enc_opts.len += option_len; 1540 /* At the same time we need to parse through the mask 1541 * in order to verify exact and mask attribute lengths. 1542 */ 1543 mask->enc_opts.dst_opt_type = TUNNEL_GTP_OPT; 1544 option_len = fl_set_gtp_opt(nla_opt_msk, mask, 1545 msk_depth, option_len, 1546 extack); 1547 if (option_len < 0) 1548 return option_len; 1549 1550 mask->enc_opts.len += option_len; 1551 if (key->enc_opts.len != mask->enc_opts.len) { 1552 NL_SET_ERR_MSG_MOD(extack, 1553 "Key and mask miss aligned"); 1554 return -EINVAL; 1555 } 1556 break; 1557 default: 1558 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1559 return -EINVAL; 1560 } 1561 1562 if (!msk_depth) 1563 continue; 1564 1565 if (!nla_ok(nla_opt_msk, msk_depth)) { 1566 NL_SET_ERR_MSG(extack, "A mask attribute is invalid"); 1567 return -EINVAL; 1568 } 1569 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1570 } 1571 1572 return 0; 1573 } 1574 1575 static int fl_validate_ct_state(u16 state, struct nlattr *tb, 1576 struct netlink_ext_ack *extack) 1577 { 1578 if (state && !(state & TCA_FLOWER_KEY_CT_FLAGS_TRACKED)) { 1579 NL_SET_ERR_MSG_ATTR(extack, tb, 1580 "no trk, so no other flag can be set"); 1581 return -EINVAL; 1582 } 1583 1584 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1585 state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED) { 1586 NL_SET_ERR_MSG_ATTR(extack, tb, 1587 "new and est are mutually exclusive"); 1588 return -EINVAL; 1589 } 1590 1591 if (state & TCA_FLOWER_KEY_CT_FLAGS_INVALID && 1592 state & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 1593 TCA_FLOWER_KEY_CT_FLAGS_INVALID)) { 1594 NL_SET_ERR_MSG_ATTR(extack, tb, 1595 "when inv is set, only trk may be set"); 1596 return -EINVAL; 1597 } 1598 1599 if (state & TCA_FLOWER_KEY_CT_FLAGS_NEW && 1600 state & TCA_FLOWER_KEY_CT_FLAGS_REPLY) { 1601 NL_SET_ERR_MSG_ATTR(extack, tb, 1602 "new and rpl are mutually exclusive"); 1603 return -EINVAL; 1604 } 1605 1606 return 0; 1607 } 1608 1609 static int fl_set_key_ct(struct nlattr **tb, 1610 struct flow_dissector_key_ct *key, 1611 struct flow_dissector_key_ct *mask, 1612 struct netlink_ext_ack *extack) 1613 { 1614 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1615 int err; 1616 1617 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1618 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1619 return -EOPNOTSUPP; 1620 } 1621 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1622 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1623 sizeof(key->ct_state)); 1624 1625 err = fl_validate_ct_state(key->ct_state & mask->ct_state, 1626 tb[TCA_FLOWER_KEY_CT_STATE_MASK], 1627 extack); 1628 if (err) 1629 return err; 1630 1631 } 1632 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1633 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1634 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1635 return -EOPNOTSUPP; 1636 } 1637 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1638 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1639 sizeof(key->ct_zone)); 1640 } 1641 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1642 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1643 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1644 return -EOPNOTSUPP; 1645 } 1646 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1647 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1648 sizeof(key->ct_mark)); 1649 } 1650 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1651 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1652 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1653 return -EOPNOTSUPP; 1654 } 1655 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1656 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1657 sizeof(key->ct_labels)); 1658 } 1659 1660 return 0; 1661 } 1662 1663 static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype, 1664 struct fl_flow_key *key, struct fl_flow_key *mask, 1665 int vthresh) 1666 { 1667 const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh; 1668 1669 if (!tb) { 1670 *ethertype = 0; 1671 return good_num_of_vlans; 1672 } 1673 1674 *ethertype = nla_get_be16(tb); 1675 if (good_num_of_vlans || eth_type_vlan(*ethertype)) 1676 return true; 1677 1678 key->basic.n_proto = *ethertype; 1679 mask->basic.n_proto = cpu_to_be16(~0); 1680 return false; 1681 } 1682 1683 static void fl_set_key_cfm_md_level(struct nlattr **tb, 1684 struct fl_flow_key *key, 1685 struct fl_flow_key *mask, 1686 struct netlink_ext_ack *extack) 1687 { 1688 u8 level; 1689 1690 if (!tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]) 1691 return; 1692 1693 level = nla_get_u8(tb[TCA_FLOWER_KEY_CFM_MD_LEVEL]); 1694 key->cfm.mdl_ver = FIELD_PREP(FLOW_DIS_CFM_MDL_MASK, level); 1695 mask->cfm.mdl_ver = FLOW_DIS_CFM_MDL_MASK; 1696 } 1697 1698 static void fl_set_key_cfm_opcode(struct nlattr **tb, 1699 struct fl_flow_key *key, 1700 struct fl_flow_key *mask, 1701 struct netlink_ext_ack *extack) 1702 { 1703 fl_set_key_val(tb, &key->cfm.opcode, TCA_FLOWER_KEY_CFM_OPCODE, 1704 &mask->cfm.opcode, TCA_FLOWER_UNSPEC, 1705 sizeof(key->cfm.opcode)); 1706 } 1707 1708 static int fl_set_key_cfm(struct nlattr **tb, 1709 struct fl_flow_key *key, 1710 struct fl_flow_key *mask, 1711 struct netlink_ext_ack *extack) 1712 { 1713 struct nlattr *nla_cfm_opt[TCA_FLOWER_KEY_CFM_OPT_MAX + 1]; 1714 int err; 1715 1716 if (!tb[TCA_FLOWER_KEY_CFM]) 1717 return 0; 1718 1719 err = nla_parse_nested(nla_cfm_opt, TCA_FLOWER_KEY_CFM_OPT_MAX, 1720 tb[TCA_FLOWER_KEY_CFM], cfm_opt_policy, extack); 1721 if (err < 0) 1722 return err; 1723 1724 fl_set_key_cfm_opcode(nla_cfm_opt, key, mask, extack); 1725 fl_set_key_cfm_md_level(nla_cfm_opt, key, mask, extack); 1726 1727 return 0; 1728 } 1729 1730 static int fl_set_key(struct net *net, struct nlattr **tb, 1731 struct fl_flow_key *key, struct fl_flow_key *mask, 1732 struct netlink_ext_ack *extack) 1733 { 1734 __be16 ethertype; 1735 int ret = 0; 1736 1737 if (tb[TCA_FLOWER_INDEV]) { 1738 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1739 if (err < 0) 1740 return err; 1741 key->meta.ingress_ifindex = err; 1742 mask->meta.ingress_ifindex = 0xffffffff; 1743 } 1744 1745 fl_set_key_val(tb, &key->meta.l2_miss, TCA_FLOWER_L2_MISS, 1746 &mask->meta.l2_miss, TCA_FLOWER_UNSPEC, 1747 sizeof(key->meta.l2_miss)); 1748 1749 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1750 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1751 sizeof(key->eth.dst)); 1752 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1753 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1754 sizeof(key->eth.src)); 1755 fl_set_key_val(tb, &key->num_of_vlans, 1756 TCA_FLOWER_KEY_NUM_OF_VLANS, 1757 &mask->num_of_vlans, 1758 TCA_FLOWER_UNSPEC, 1759 sizeof(key->num_of_vlans)); 1760 1761 if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) { 1762 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1763 TCA_FLOWER_KEY_VLAN_PRIO, 1764 TCA_FLOWER_KEY_VLAN_ETH_TYPE, 1765 &key->vlan, &mask->vlan); 1766 1767 if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE], 1768 ðertype, key, mask, 1)) { 1769 fl_set_key_vlan(tb, ethertype, 1770 TCA_FLOWER_KEY_CVLAN_ID, 1771 TCA_FLOWER_KEY_CVLAN_PRIO, 1772 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1773 &key->cvlan, &mask->cvlan); 1774 fl_set_key_val(tb, &key->basic.n_proto, 1775 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1776 &mask->basic.n_proto, 1777 TCA_FLOWER_UNSPEC, 1778 sizeof(key->basic.n_proto)); 1779 } 1780 } 1781 1782 if (key->basic.n_proto == htons(ETH_P_PPP_SES)) 1783 fl_set_key_pppoe(tb, &key->pppoe, &mask->pppoe, key, mask); 1784 1785 if (key->basic.n_proto == htons(ETH_P_IP) || 1786 key->basic.n_proto == htons(ETH_P_IPV6)) { 1787 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1788 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1789 sizeof(key->basic.ip_proto)); 1790 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1791 } 1792 1793 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1794 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1795 mask->control.addr_type = ~0; 1796 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1797 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1798 sizeof(key->ipv4.src)); 1799 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1800 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1801 sizeof(key->ipv4.dst)); 1802 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1803 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1804 mask->control.addr_type = ~0; 1805 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1806 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1807 sizeof(key->ipv6.src)); 1808 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1809 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1810 sizeof(key->ipv6.dst)); 1811 } 1812 1813 if (key->basic.ip_proto == IPPROTO_TCP) { 1814 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1815 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1816 sizeof(key->tp.src)); 1817 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1818 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1819 sizeof(key->tp.dst)); 1820 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1821 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1822 sizeof(key->tcp.flags)); 1823 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1824 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1825 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1826 sizeof(key->tp.src)); 1827 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1828 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1829 sizeof(key->tp.dst)); 1830 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1831 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1832 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1833 sizeof(key->tp.src)); 1834 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1835 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1836 sizeof(key->tp.dst)); 1837 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1838 key->basic.ip_proto == IPPROTO_ICMP) { 1839 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1840 &mask->icmp.type, 1841 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1842 sizeof(key->icmp.type)); 1843 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1844 &mask->icmp.code, 1845 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1846 sizeof(key->icmp.code)); 1847 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1848 key->basic.ip_proto == IPPROTO_ICMPV6) { 1849 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1850 &mask->icmp.type, 1851 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1852 sizeof(key->icmp.type)); 1853 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1854 &mask->icmp.code, 1855 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1856 sizeof(key->icmp.code)); 1857 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1858 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1859 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); 1860 if (ret) 1861 return ret; 1862 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1863 key->basic.n_proto == htons(ETH_P_RARP)) { 1864 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 1865 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 1866 sizeof(key->arp.sip)); 1867 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 1868 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 1869 sizeof(key->arp.tip)); 1870 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 1871 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 1872 sizeof(key->arp.op)); 1873 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1874 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1875 sizeof(key->arp.sha)); 1876 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 1877 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 1878 sizeof(key->arp.tha)); 1879 } else if (key->basic.ip_proto == IPPROTO_L2TP) { 1880 fl_set_key_val(tb, &key->l2tpv3.session_id, 1881 TCA_FLOWER_KEY_L2TPV3_SID, 1882 &mask->l2tpv3.session_id, TCA_FLOWER_UNSPEC, 1883 sizeof(key->l2tpv3.session_id)); 1884 } else if (key->basic.n_proto == htons(ETH_P_CFM)) { 1885 ret = fl_set_key_cfm(tb, key, mask, extack); 1886 if (ret) 1887 return ret; 1888 } 1889 1890 if (key->basic.ip_proto == IPPROTO_TCP || 1891 key->basic.ip_proto == IPPROTO_UDP || 1892 key->basic.ip_proto == IPPROTO_SCTP) { 1893 ret = fl_set_key_port_range(tb, key, mask, extack); 1894 if (ret) 1895 return ret; 1896 } 1897 1898 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 1899 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 1900 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1901 mask->enc_control.addr_type = ~0; 1902 fl_set_key_val(tb, &key->enc_ipv4.src, 1903 TCA_FLOWER_KEY_ENC_IPV4_SRC, 1904 &mask->enc_ipv4.src, 1905 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 1906 sizeof(key->enc_ipv4.src)); 1907 fl_set_key_val(tb, &key->enc_ipv4.dst, 1908 TCA_FLOWER_KEY_ENC_IPV4_DST, 1909 &mask->enc_ipv4.dst, 1910 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 1911 sizeof(key->enc_ipv4.dst)); 1912 } 1913 1914 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 1915 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 1916 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1917 mask->enc_control.addr_type = ~0; 1918 fl_set_key_val(tb, &key->enc_ipv6.src, 1919 TCA_FLOWER_KEY_ENC_IPV6_SRC, 1920 &mask->enc_ipv6.src, 1921 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 1922 sizeof(key->enc_ipv6.src)); 1923 fl_set_key_val(tb, &key->enc_ipv6.dst, 1924 TCA_FLOWER_KEY_ENC_IPV6_DST, 1925 &mask->enc_ipv6.dst, 1926 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 1927 sizeof(key->enc_ipv6.dst)); 1928 } 1929 1930 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 1931 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 1932 sizeof(key->enc_key_id.keyid)); 1933 1934 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 1935 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 1936 sizeof(key->enc_tp.src)); 1937 1938 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 1939 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 1940 sizeof(key->enc_tp.dst)); 1941 1942 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 1943 1944 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 1945 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 1946 sizeof(key->hash.hash)); 1947 1948 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 1949 ret = fl_set_enc_opt(tb, key, mask, extack); 1950 if (ret) 1951 return ret; 1952 } 1953 1954 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 1955 if (ret) 1956 return ret; 1957 1958 if (tb[TCA_FLOWER_KEY_FLAGS]) 1959 ret = fl_set_key_flags(tb, &key->control.flags, 1960 &mask->control.flags, extack); 1961 1962 return ret; 1963 } 1964 1965 static void fl_mask_copy(struct fl_flow_mask *dst, 1966 struct fl_flow_mask *src) 1967 { 1968 const void *psrc = fl_key_get_start(&src->key, src); 1969 void *pdst = fl_key_get_start(&dst->key, src); 1970 1971 memcpy(pdst, psrc, fl_mask_range(src)); 1972 dst->range = src->range; 1973 } 1974 1975 static const struct rhashtable_params fl_ht_params = { 1976 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 1977 .head_offset = offsetof(struct cls_fl_filter, ht_node), 1978 .automatic_shrinking = true, 1979 }; 1980 1981 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 1982 { 1983 mask->filter_ht_params = fl_ht_params; 1984 mask->filter_ht_params.key_len = fl_mask_range(mask); 1985 mask->filter_ht_params.key_offset += mask->range.start; 1986 1987 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 1988 } 1989 1990 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 1991 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 1992 1993 #define FL_KEY_IS_MASKED(mask, member) \ 1994 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 1995 0, FL_KEY_MEMBER_SIZE(member)) \ 1996 1997 #define FL_KEY_SET(keys, cnt, id, member) \ 1998 do { \ 1999 keys[cnt].key_id = id; \ 2000 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 2001 cnt++; \ 2002 } while(0); 2003 2004 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 2005 do { \ 2006 if (FL_KEY_IS_MASKED(mask, member)) \ 2007 FL_KEY_SET(keys, cnt, id, member); \ 2008 } while(0); 2009 2010 static void fl_init_dissector(struct flow_dissector *dissector, 2011 struct fl_flow_key *mask) 2012 { 2013 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 2014 size_t cnt = 0; 2015 2016 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2017 FLOW_DISSECTOR_KEY_META, meta); 2018 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 2019 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 2020 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2021 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 2022 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2023 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 2024 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2025 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 2026 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2027 FLOW_DISSECTOR_KEY_PORTS, tp); 2028 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2029 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 2030 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2031 FLOW_DISSECTOR_KEY_IP, ip); 2032 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2033 FLOW_DISSECTOR_KEY_TCP, tcp); 2034 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2035 FLOW_DISSECTOR_KEY_ICMP, icmp); 2036 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2037 FLOW_DISSECTOR_KEY_ARP, arp); 2038 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2039 FLOW_DISSECTOR_KEY_MPLS, mpls); 2040 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2041 FLOW_DISSECTOR_KEY_VLAN, vlan); 2042 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2043 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 2044 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2045 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 2046 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2047 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 2048 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2049 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 2050 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 2051 FL_KEY_IS_MASKED(mask, enc_ipv6)) 2052 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 2053 enc_control); 2054 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2055 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 2056 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2057 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 2058 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2059 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 2060 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2061 FLOW_DISSECTOR_KEY_CT, ct); 2062 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2063 FLOW_DISSECTOR_KEY_HASH, hash); 2064 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2065 FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans); 2066 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2067 FLOW_DISSECTOR_KEY_PPPOE, pppoe); 2068 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2069 FLOW_DISSECTOR_KEY_L2TPV3, l2tpv3); 2070 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 2071 FLOW_DISSECTOR_KEY_CFM, cfm); 2072 2073 skb_flow_dissector_init(dissector, keys, cnt); 2074 } 2075 2076 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 2077 struct fl_flow_mask *mask) 2078 { 2079 struct fl_flow_mask *newmask; 2080 int err; 2081 2082 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); 2083 if (!newmask) 2084 return ERR_PTR(-ENOMEM); 2085 2086 fl_mask_copy(newmask, mask); 2087 2088 if ((newmask->key.tp_range.tp_min.dst && 2089 newmask->key.tp_range.tp_max.dst) || 2090 (newmask->key.tp_range.tp_min.src && 2091 newmask->key.tp_range.tp_max.src)) 2092 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 2093 2094 err = fl_init_mask_hashtable(newmask); 2095 if (err) 2096 goto errout_free; 2097 2098 fl_init_dissector(&newmask->dissector, &newmask->key); 2099 2100 INIT_LIST_HEAD_RCU(&newmask->filters); 2101 2102 refcount_set(&newmask->refcnt, 1); 2103 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 2104 &newmask->ht_node, mask_ht_params); 2105 if (err) 2106 goto errout_destroy; 2107 2108 spin_lock(&head->masks_lock); 2109 list_add_tail_rcu(&newmask->list, &head->masks); 2110 spin_unlock(&head->masks_lock); 2111 2112 return newmask; 2113 2114 errout_destroy: 2115 rhashtable_destroy(&newmask->ht); 2116 errout_free: 2117 kfree(newmask); 2118 2119 return ERR_PTR(err); 2120 } 2121 2122 static int fl_check_assign_mask(struct cls_fl_head *head, 2123 struct cls_fl_filter *fnew, 2124 struct cls_fl_filter *fold, 2125 struct fl_flow_mask *mask) 2126 { 2127 struct fl_flow_mask *newmask; 2128 int ret = 0; 2129 2130 rcu_read_lock(); 2131 2132 /* Insert mask as temporary node to prevent concurrent creation of mask 2133 * with same key. Any concurrent lookups with same key will return 2134 * -EAGAIN because mask's refcnt is zero. 2135 */ 2136 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 2137 &mask->ht_node, 2138 mask_ht_params); 2139 if (!fnew->mask) { 2140 rcu_read_unlock(); 2141 2142 if (fold) { 2143 ret = -EINVAL; 2144 goto errout_cleanup; 2145 } 2146 2147 newmask = fl_create_new_mask(head, mask); 2148 if (IS_ERR(newmask)) { 2149 ret = PTR_ERR(newmask); 2150 goto errout_cleanup; 2151 } 2152 2153 fnew->mask = newmask; 2154 return 0; 2155 } else if (IS_ERR(fnew->mask)) { 2156 ret = PTR_ERR(fnew->mask); 2157 } else if (fold && fold->mask != fnew->mask) { 2158 ret = -EINVAL; 2159 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 2160 /* Mask was deleted concurrently, try again */ 2161 ret = -EAGAIN; 2162 } 2163 rcu_read_unlock(); 2164 return ret; 2165 2166 errout_cleanup: 2167 rhashtable_remove_fast(&head->ht, &mask->ht_node, 2168 mask_ht_params); 2169 return ret; 2170 } 2171 2172 static bool fl_needs_tc_skb_ext(const struct fl_flow_key *mask) 2173 { 2174 return mask->meta.l2_miss; 2175 } 2176 2177 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 2178 struct cls_fl_filter *fold, 2179 bool *in_ht) 2180 { 2181 struct fl_flow_mask *mask = fnew->mask; 2182 int err; 2183 2184 err = rhashtable_lookup_insert_fast(&mask->ht, 2185 &fnew->ht_node, 2186 mask->filter_ht_params); 2187 if (err) { 2188 *in_ht = false; 2189 /* It is okay if filter with same key exists when 2190 * overwriting. 2191 */ 2192 return fold && err == -EEXIST ? 0 : err; 2193 } 2194 2195 *in_ht = true; 2196 return 0; 2197 } 2198 2199 static int fl_change(struct net *net, struct sk_buff *in_skb, 2200 struct tcf_proto *tp, unsigned long base, 2201 u32 handle, struct nlattr **tca, 2202 void **arg, u32 flags, 2203 struct netlink_ext_ack *extack) 2204 { 2205 struct cls_fl_head *head = fl_head_dereference(tp); 2206 bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL); 2207 struct cls_fl_filter *fold = *arg; 2208 bool bound_to_filter = false; 2209 struct cls_fl_filter *fnew; 2210 struct fl_flow_mask *mask; 2211 struct nlattr **tb; 2212 bool in_ht; 2213 int err; 2214 2215 if (!tca[TCA_OPTIONS]) { 2216 err = -EINVAL; 2217 goto errout_fold; 2218 } 2219 2220 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 2221 if (!mask) { 2222 err = -ENOBUFS; 2223 goto errout_fold; 2224 } 2225 2226 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2227 if (!tb) { 2228 err = -ENOBUFS; 2229 goto errout_mask_alloc; 2230 } 2231 2232 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2233 tca[TCA_OPTIONS], fl_policy, NULL); 2234 if (err < 0) 2235 goto errout_tb; 2236 2237 if (fold && handle && fold->handle != handle) { 2238 err = -EINVAL; 2239 goto errout_tb; 2240 } 2241 2242 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 2243 if (!fnew) { 2244 err = -ENOBUFS; 2245 goto errout_tb; 2246 } 2247 INIT_LIST_HEAD(&fnew->hw_list); 2248 refcount_set(&fnew->refcnt, 1); 2249 2250 if (tb[TCA_FLOWER_FLAGS]) { 2251 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 2252 2253 if (!tc_flags_valid(fnew->flags)) { 2254 kfree(fnew); 2255 err = -EINVAL; 2256 goto errout_tb; 2257 } 2258 } 2259 2260 if (!fold) { 2261 spin_lock(&tp->lock); 2262 if (!handle) { 2263 handle = 1; 2264 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2265 INT_MAX, GFP_ATOMIC); 2266 } else { 2267 err = idr_alloc_u32(&head->handle_idr, NULL, &handle, 2268 handle, GFP_ATOMIC); 2269 2270 /* Filter with specified handle was concurrently 2271 * inserted after initial check in cls_api. This is not 2272 * necessarily an error if NLM_F_EXCL is not set in 2273 * message flags. Returning EAGAIN will cause cls_api to 2274 * try to update concurrently inserted rule. 2275 */ 2276 if (err == -ENOSPC) 2277 err = -EAGAIN; 2278 } 2279 spin_unlock(&tp->lock); 2280 2281 if (err) { 2282 kfree(fnew); 2283 goto errout_tb; 2284 } 2285 } 2286 fnew->handle = handle; 2287 2288 err = tcf_exts_init_ex(&fnew->exts, net, TCA_FLOWER_ACT, 0, tp, handle, 2289 !tc_skip_hw(fnew->flags)); 2290 if (err < 0) 2291 goto errout_idr; 2292 2293 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE], 2294 &fnew->exts, flags, fnew->flags, 2295 extack); 2296 if (err < 0) 2297 goto errout_idr; 2298 2299 if (tb[TCA_FLOWER_CLASSID]) { 2300 fnew->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 2301 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2302 rtnl_lock(); 2303 tcf_bind_filter(tp, &fnew->res, base); 2304 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2305 rtnl_unlock(); 2306 bound_to_filter = true; 2307 } 2308 2309 err = fl_set_key(net, tb, &fnew->key, &mask->key, extack); 2310 if (err) 2311 goto unbind_filter; 2312 2313 fl_mask_update_range(mask); 2314 fl_set_masked_key(&fnew->mkey, &fnew->key, mask); 2315 2316 if (!fl_mask_fits_tmplt(tp->chain->tmplt_priv, mask)) { 2317 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 2318 err = -EINVAL; 2319 goto unbind_filter; 2320 } 2321 2322 /* Enable tc skb extension if filter matches on data extracted from 2323 * this extension. 2324 */ 2325 if (fl_needs_tc_skb_ext(&mask->key)) { 2326 fnew->needs_tc_skb_ext = 1; 2327 tc_skb_ext_tc_enable(); 2328 } 2329 2330 err = fl_check_assign_mask(head, fnew, fold, mask); 2331 if (err) 2332 goto unbind_filter; 2333 2334 err = fl_ht_insert_unique(fnew, fold, &in_ht); 2335 if (err) 2336 goto errout_mask; 2337 2338 if (!tc_skip_hw(fnew->flags)) { 2339 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 2340 if (err) 2341 goto errout_ht; 2342 } 2343 2344 if (!tc_in_hw(fnew->flags)) 2345 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 2346 2347 spin_lock(&tp->lock); 2348 2349 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 2350 * proto again or create new one, if necessary. 2351 */ 2352 if (tp->deleting) { 2353 err = -EAGAIN; 2354 goto errout_hw; 2355 } 2356 2357 if (fold) { 2358 /* Fold filter was deleted concurrently. Retry lookup. */ 2359 if (fold->deleted) { 2360 err = -EAGAIN; 2361 goto errout_hw; 2362 } 2363 2364 fnew->handle = handle; 2365 2366 if (!in_ht) { 2367 struct rhashtable_params params = 2368 fnew->mask->filter_ht_params; 2369 2370 err = rhashtable_insert_fast(&fnew->mask->ht, 2371 &fnew->ht_node, 2372 params); 2373 if (err) 2374 goto errout_hw; 2375 in_ht = true; 2376 } 2377 2378 refcount_inc(&fnew->refcnt); 2379 rhashtable_remove_fast(&fold->mask->ht, 2380 &fold->ht_node, 2381 fold->mask->filter_ht_params); 2382 idr_replace(&head->handle_idr, fnew, fnew->handle); 2383 list_replace_rcu(&fold->list, &fnew->list); 2384 fold->deleted = true; 2385 2386 spin_unlock(&tp->lock); 2387 2388 fl_mask_put(head, fold->mask); 2389 if (!tc_skip_hw(fold->flags)) 2390 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 2391 tcf_unbind_filter(tp, &fold->res); 2392 /* Caller holds reference to fold, so refcnt is always > 0 2393 * after this. 2394 */ 2395 refcount_dec(&fold->refcnt); 2396 __fl_put(fold); 2397 } else { 2398 idr_replace(&head->handle_idr, fnew, fnew->handle); 2399 2400 refcount_inc(&fnew->refcnt); 2401 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 2402 spin_unlock(&tp->lock); 2403 } 2404 2405 *arg = fnew; 2406 2407 kfree(tb); 2408 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2409 return 0; 2410 2411 errout_ht: 2412 spin_lock(&tp->lock); 2413 errout_hw: 2414 fnew->deleted = true; 2415 spin_unlock(&tp->lock); 2416 if (!tc_skip_hw(fnew->flags)) 2417 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 2418 if (in_ht) 2419 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 2420 fnew->mask->filter_ht_params); 2421 errout_mask: 2422 fl_mask_put(head, fnew->mask); 2423 2424 unbind_filter: 2425 if (bound_to_filter) { 2426 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2427 rtnl_lock(); 2428 tcf_unbind_filter(tp, &fnew->res); 2429 if (flags & TCA_ACT_FLAGS_NO_RTNL) 2430 rtnl_unlock(); 2431 } 2432 2433 errout_idr: 2434 if (!fold) 2435 idr_remove(&head->handle_idr, fnew->handle); 2436 __fl_put(fnew); 2437 errout_tb: 2438 kfree(tb); 2439 errout_mask_alloc: 2440 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2441 errout_fold: 2442 if (fold) 2443 __fl_put(fold); 2444 return err; 2445 } 2446 2447 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 2448 bool rtnl_held, struct netlink_ext_ack *extack) 2449 { 2450 struct cls_fl_head *head = fl_head_dereference(tp); 2451 struct cls_fl_filter *f = arg; 2452 bool last_on_mask; 2453 int err = 0; 2454 2455 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 2456 *last = list_empty(&head->masks); 2457 __fl_put(f); 2458 2459 return err; 2460 } 2461 2462 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 2463 bool rtnl_held) 2464 { 2465 struct cls_fl_head *head = fl_head_dereference(tp); 2466 unsigned long id = arg->cookie, tmp; 2467 struct cls_fl_filter *f; 2468 2469 arg->count = arg->skip; 2470 2471 rcu_read_lock(); 2472 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2473 /* don't return filters that are being deleted */ 2474 if (!f || !refcount_inc_not_zero(&f->refcnt)) 2475 continue; 2476 rcu_read_unlock(); 2477 2478 if (arg->fn(tp, f, arg) < 0) { 2479 __fl_put(f); 2480 arg->stop = 1; 2481 rcu_read_lock(); 2482 break; 2483 } 2484 __fl_put(f); 2485 arg->count++; 2486 rcu_read_lock(); 2487 } 2488 rcu_read_unlock(); 2489 arg->cookie = id; 2490 } 2491 2492 static struct cls_fl_filter * 2493 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 2494 { 2495 struct cls_fl_head *head = fl_head_dereference(tp); 2496 2497 spin_lock(&tp->lock); 2498 if (list_empty(&head->hw_filters)) { 2499 spin_unlock(&tp->lock); 2500 return NULL; 2501 } 2502 2503 if (!f) 2504 f = list_entry(&head->hw_filters, struct cls_fl_filter, 2505 hw_list); 2506 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 2507 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 2508 spin_unlock(&tp->lock); 2509 return f; 2510 } 2511 } 2512 2513 spin_unlock(&tp->lock); 2514 return NULL; 2515 } 2516 2517 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 2518 void *cb_priv, struct netlink_ext_ack *extack) 2519 { 2520 struct tcf_block *block = tp->chain->block; 2521 struct flow_cls_offload cls_flower = {}; 2522 struct cls_fl_filter *f = NULL; 2523 int err; 2524 2525 /* hw_filters list can only be changed by hw offload functions after 2526 * obtaining rtnl lock. Make sure it is not changed while reoffload is 2527 * iterating it. 2528 */ 2529 ASSERT_RTNL(); 2530 2531 while ((f = fl_get_next_hw_filter(tp, f, add))) { 2532 cls_flower.rule = 2533 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 2534 if (!cls_flower.rule) { 2535 __fl_put(f); 2536 return -ENOMEM; 2537 } 2538 2539 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 2540 extack); 2541 cls_flower.command = add ? 2542 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 2543 cls_flower.cookie = (unsigned long)f; 2544 cls_flower.rule->match.dissector = &f->mask->dissector; 2545 cls_flower.rule->match.mask = &f->mask->key; 2546 cls_flower.rule->match.key = &f->mkey; 2547 2548 err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, 2549 cls_flower.common.extack); 2550 if (err) { 2551 kfree(cls_flower.rule); 2552 if (tc_skip_sw(f->flags)) { 2553 __fl_put(f); 2554 return err; 2555 } 2556 goto next_flow; 2557 } 2558 2559 cls_flower.classid = f->res.classid; 2560 2561 err = tc_setup_cb_reoffload(block, tp, add, cb, 2562 TC_SETUP_CLSFLOWER, &cls_flower, 2563 cb_priv, &f->flags, 2564 &f->in_hw_count); 2565 tc_cleanup_offload_action(&cls_flower.rule->action); 2566 kfree(cls_flower.rule); 2567 2568 if (err) { 2569 __fl_put(f); 2570 return err; 2571 } 2572 next_flow: 2573 __fl_put(f); 2574 } 2575 2576 return 0; 2577 } 2578 2579 static void fl_hw_add(struct tcf_proto *tp, void *type_data) 2580 { 2581 struct flow_cls_offload *cls_flower = type_data; 2582 struct cls_fl_filter *f = 2583 (struct cls_fl_filter *) cls_flower->cookie; 2584 struct cls_fl_head *head = fl_head_dereference(tp); 2585 2586 spin_lock(&tp->lock); 2587 list_add(&f->hw_list, &head->hw_filters); 2588 spin_unlock(&tp->lock); 2589 } 2590 2591 static void fl_hw_del(struct tcf_proto *tp, void *type_data) 2592 { 2593 struct flow_cls_offload *cls_flower = type_data; 2594 struct cls_fl_filter *f = 2595 (struct cls_fl_filter *) cls_flower->cookie; 2596 2597 spin_lock(&tp->lock); 2598 if (!list_empty(&f->hw_list)) 2599 list_del_init(&f->hw_list); 2600 spin_unlock(&tp->lock); 2601 } 2602 2603 static int fl_hw_create_tmplt(struct tcf_chain *chain, 2604 struct fl_flow_tmplt *tmplt) 2605 { 2606 struct flow_cls_offload cls_flower = {}; 2607 struct tcf_block *block = chain->block; 2608 2609 cls_flower.rule = flow_rule_alloc(0); 2610 if (!cls_flower.rule) 2611 return -ENOMEM; 2612 2613 cls_flower.common.chain_index = chain->index; 2614 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 2615 cls_flower.cookie = (unsigned long) tmplt; 2616 cls_flower.rule->match.dissector = &tmplt->dissector; 2617 cls_flower.rule->match.mask = &tmplt->mask; 2618 cls_flower.rule->match.key = &tmplt->dummy_key; 2619 2620 /* We don't care if driver (any of them) fails to handle this 2621 * call. It serves just as a hint for it. 2622 */ 2623 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2624 kfree(cls_flower.rule); 2625 2626 return 0; 2627 } 2628 2629 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 2630 struct fl_flow_tmplt *tmplt) 2631 { 2632 struct flow_cls_offload cls_flower = {}; 2633 struct tcf_block *block = chain->block; 2634 2635 cls_flower.common.chain_index = chain->index; 2636 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 2637 cls_flower.cookie = (unsigned long) tmplt; 2638 2639 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2640 } 2641 2642 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 2643 struct nlattr **tca, 2644 struct netlink_ext_ack *extack) 2645 { 2646 struct fl_flow_tmplt *tmplt; 2647 struct nlattr **tb; 2648 int err; 2649 2650 if (!tca[TCA_OPTIONS]) 2651 return ERR_PTR(-EINVAL); 2652 2653 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2654 if (!tb) 2655 return ERR_PTR(-ENOBUFS); 2656 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2657 tca[TCA_OPTIONS], fl_policy, NULL); 2658 if (err) 2659 goto errout_tb; 2660 2661 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); 2662 if (!tmplt) { 2663 err = -ENOMEM; 2664 goto errout_tb; 2665 } 2666 tmplt->chain = chain; 2667 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); 2668 if (err) 2669 goto errout_tmplt; 2670 2671 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 2672 2673 err = fl_hw_create_tmplt(chain, tmplt); 2674 if (err) 2675 goto errout_tmplt; 2676 2677 kfree(tb); 2678 return tmplt; 2679 2680 errout_tmplt: 2681 kfree(tmplt); 2682 errout_tb: 2683 kfree(tb); 2684 return ERR_PTR(err); 2685 } 2686 2687 static void fl_tmplt_destroy(void *tmplt_priv) 2688 { 2689 struct fl_flow_tmplt *tmplt = tmplt_priv; 2690 2691 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 2692 kfree(tmplt); 2693 } 2694 2695 static int fl_dump_key_val(struct sk_buff *skb, 2696 void *val, int val_type, 2697 void *mask, int mask_type, int len) 2698 { 2699 int err; 2700 2701 if (!memchr_inv(mask, 0, len)) 2702 return 0; 2703 err = nla_put(skb, val_type, len, val); 2704 if (err) 2705 return err; 2706 if (mask_type != TCA_FLOWER_UNSPEC) { 2707 err = nla_put(skb, mask_type, len, mask); 2708 if (err) 2709 return err; 2710 } 2711 return 0; 2712 } 2713 2714 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2715 struct fl_flow_key *mask) 2716 { 2717 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2718 TCA_FLOWER_KEY_PORT_DST_MIN, 2719 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2720 sizeof(key->tp_range.tp_min.dst)) || 2721 fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2722 TCA_FLOWER_KEY_PORT_DST_MAX, 2723 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2724 sizeof(key->tp_range.tp_max.dst)) || 2725 fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2726 TCA_FLOWER_KEY_PORT_SRC_MIN, 2727 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2728 sizeof(key->tp_range.tp_min.src)) || 2729 fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2730 TCA_FLOWER_KEY_PORT_SRC_MAX, 2731 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2732 sizeof(key->tp_range.tp_max.src))) 2733 return -1; 2734 2735 return 0; 2736 } 2737 2738 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, 2739 struct flow_dissector_key_mpls *mpls_key, 2740 struct flow_dissector_key_mpls *mpls_mask, 2741 u8 lse_index) 2742 { 2743 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; 2744 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; 2745 int err; 2746 2747 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 2748 lse_index + 1); 2749 if (err) 2750 return err; 2751 2752 if (lse_mask->mpls_ttl) { 2753 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 2754 lse_key->mpls_ttl); 2755 if (err) 2756 return err; 2757 } 2758 if (lse_mask->mpls_bos) { 2759 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 2760 lse_key->mpls_bos); 2761 if (err) 2762 return err; 2763 } 2764 if (lse_mask->mpls_tc) { 2765 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 2766 lse_key->mpls_tc); 2767 if (err) 2768 return err; 2769 } 2770 if (lse_mask->mpls_label) { 2771 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2772 lse_key->mpls_label); 2773 if (err) 2774 return err; 2775 } 2776 2777 return 0; 2778 } 2779 2780 static int fl_dump_key_mpls_opts(struct sk_buff *skb, 2781 struct flow_dissector_key_mpls *mpls_key, 2782 struct flow_dissector_key_mpls *mpls_mask) 2783 { 2784 struct nlattr *opts; 2785 struct nlattr *lse; 2786 u8 lse_index; 2787 int err; 2788 2789 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); 2790 if (!opts) 2791 return -EMSGSIZE; 2792 2793 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { 2794 if (!(mpls_mask->used_lses & 1 << lse_index)) 2795 continue; 2796 2797 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); 2798 if (!lse) { 2799 err = -EMSGSIZE; 2800 goto err_opts; 2801 } 2802 2803 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, 2804 lse_index); 2805 if (err) 2806 goto err_opts_lse; 2807 nla_nest_end(skb, lse); 2808 } 2809 nla_nest_end(skb, opts); 2810 2811 return 0; 2812 2813 err_opts_lse: 2814 nla_nest_cancel(skb, lse); 2815 err_opts: 2816 nla_nest_cancel(skb, opts); 2817 2818 return err; 2819 } 2820 2821 static int fl_dump_key_mpls(struct sk_buff *skb, 2822 struct flow_dissector_key_mpls *mpls_key, 2823 struct flow_dissector_key_mpls *mpls_mask) 2824 { 2825 struct flow_dissector_mpls_lse *lse_mask; 2826 struct flow_dissector_mpls_lse *lse_key; 2827 int err; 2828 2829 if (!mpls_mask->used_lses) 2830 return 0; 2831 2832 lse_mask = &mpls_mask->ls[0]; 2833 lse_key = &mpls_key->ls[0]; 2834 2835 /* For backward compatibility, don't use the MPLS nested attributes if 2836 * the rule can be expressed using the old attributes. 2837 */ 2838 if (mpls_mask->used_lses & ~1 || 2839 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && 2840 !lse_mask->mpls_tc && !lse_mask->mpls_label)) 2841 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); 2842 2843 if (lse_mask->mpls_ttl) { 2844 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 2845 lse_key->mpls_ttl); 2846 if (err) 2847 return err; 2848 } 2849 if (lse_mask->mpls_tc) { 2850 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 2851 lse_key->mpls_tc); 2852 if (err) 2853 return err; 2854 } 2855 if (lse_mask->mpls_label) { 2856 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 2857 lse_key->mpls_label); 2858 if (err) 2859 return err; 2860 } 2861 if (lse_mask->mpls_bos) { 2862 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 2863 lse_key->mpls_bos); 2864 if (err) 2865 return err; 2866 } 2867 return 0; 2868 } 2869 2870 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 2871 struct flow_dissector_key_ip *key, 2872 struct flow_dissector_key_ip *mask) 2873 { 2874 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 2875 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 2876 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 2877 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 2878 2879 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 2880 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 2881 return -1; 2882 2883 return 0; 2884 } 2885 2886 static int fl_dump_key_vlan(struct sk_buff *skb, 2887 int vlan_id_key, int vlan_prio_key, 2888 struct flow_dissector_key_vlan *vlan_key, 2889 struct flow_dissector_key_vlan *vlan_mask) 2890 { 2891 int err; 2892 2893 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 2894 return 0; 2895 if (vlan_mask->vlan_id) { 2896 err = nla_put_u16(skb, vlan_id_key, 2897 vlan_key->vlan_id); 2898 if (err) 2899 return err; 2900 } 2901 if (vlan_mask->vlan_priority) { 2902 err = nla_put_u8(skb, vlan_prio_key, 2903 vlan_key->vlan_priority); 2904 if (err) 2905 return err; 2906 } 2907 return 0; 2908 } 2909 2910 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 2911 u32 *flower_key, u32 *flower_mask, 2912 u32 flower_flag_bit, u32 dissector_flag_bit) 2913 { 2914 if (dissector_mask & dissector_flag_bit) { 2915 *flower_mask |= flower_flag_bit; 2916 if (dissector_key & dissector_flag_bit) 2917 *flower_key |= flower_flag_bit; 2918 } 2919 } 2920 2921 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 2922 { 2923 u32 key, mask; 2924 __be32 _key, _mask; 2925 int err; 2926 2927 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 2928 return 0; 2929 2930 key = 0; 2931 mask = 0; 2932 2933 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2934 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 2935 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2936 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 2937 FLOW_DIS_FIRST_FRAG); 2938 2939 _key = cpu_to_be32(key); 2940 _mask = cpu_to_be32(mask); 2941 2942 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 2943 if (err) 2944 return err; 2945 2946 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 2947 } 2948 2949 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 2950 struct flow_dissector_key_enc_opts *enc_opts) 2951 { 2952 struct geneve_opt *opt; 2953 struct nlattr *nest; 2954 int opt_off = 0; 2955 2956 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 2957 if (!nest) 2958 goto nla_put_failure; 2959 2960 while (enc_opts->len > opt_off) { 2961 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 2962 2963 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 2964 opt->opt_class)) 2965 goto nla_put_failure; 2966 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 2967 opt->type)) 2968 goto nla_put_failure; 2969 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 2970 opt->length * 4, opt->opt_data)) 2971 goto nla_put_failure; 2972 2973 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 2974 } 2975 nla_nest_end(skb, nest); 2976 return 0; 2977 2978 nla_put_failure: 2979 nla_nest_cancel(skb, nest); 2980 return -EMSGSIZE; 2981 } 2982 2983 static int fl_dump_key_vxlan_opt(struct sk_buff *skb, 2984 struct flow_dissector_key_enc_opts *enc_opts) 2985 { 2986 struct vxlan_metadata *md; 2987 struct nlattr *nest; 2988 2989 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); 2990 if (!nest) 2991 goto nla_put_failure; 2992 2993 md = (struct vxlan_metadata *)&enc_opts->data[0]; 2994 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) 2995 goto nla_put_failure; 2996 2997 nla_nest_end(skb, nest); 2998 return 0; 2999 3000 nla_put_failure: 3001 nla_nest_cancel(skb, nest); 3002 return -EMSGSIZE; 3003 } 3004 3005 static int fl_dump_key_erspan_opt(struct sk_buff *skb, 3006 struct flow_dissector_key_enc_opts *enc_opts) 3007 { 3008 struct erspan_metadata *md; 3009 struct nlattr *nest; 3010 3011 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); 3012 if (!nest) 3013 goto nla_put_failure; 3014 3015 md = (struct erspan_metadata *)&enc_opts->data[0]; 3016 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) 3017 goto nla_put_failure; 3018 3019 if (md->version == 1 && 3020 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) 3021 goto nla_put_failure; 3022 3023 if (md->version == 2 && 3024 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, 3025 md->u.md2.dir) || 3026 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, 3027 get_hwid(&md->u.md2)))) 3028 goto nla_put_failure; 3029 3030 nla_nest_end(skb, nest); 3031 return 0; 3032 3033 nla_put_failure: 3034 nla_nest_cancel(skb, nest); 3035 return -EMSGSIZE; 3036 } 3037 3038 static int fl_dump_key_gtp_opt(struct sk_buff *skb, 3039 struct flow_dissector_key_enc_opts *enc_opts) 3040 3041 { 3042 struct gtp_pdu_session_info *session_info; 3043 struct nlattr *nest; 3044 3045 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GTP); 3046 if (!nest) 3047 goto nla_put_failure; 3048 3049 session_info = (struct gtp_pdu_session_info *)&enc_opts->data[0]; 3050 3051 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, 3052 session_info->pdu_type)) 3053 goto nla_put_failure; 3054 3055 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, session_info->qfi)) 3056 goto nla_put_failure; 3057 3058 nla_nest_end(skb, nest); 3059 return 0; 3060 3061 nla_put_failure: 3062 nla_nest_cancel(skb, nest); 3063 return -EMSGSIZE; 3064 } 3065 3066 static int fl_dump_key_ct(struct sk_buff *skb, 3067 struct flow_dissector_key_ct *key, 3068 struct flow_dissector_key_ct *mask) 3069 { 3070 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 3071 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 3072 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 3073 sizeof(key->ct_state))) 3074 goto nla_put_failure; 3075 3076 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 3077 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 3078 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 3079 sizeof(key->ct_zone))) 3080 goto nla_put_failure; 3081 3082 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 3083 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 3084 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 3085 sizeof(key->ct_mark))) 3086 goto nla_put_failure; 3087 3088 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 3089 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 3090 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 3091 sizeof(key->ct_labels))) 3092 goto nla_put_failure; 3093 3094 return 0; 3095 3096 nla_put_failure: 3097 return -EMSGSIZE; 3098 } 3099 3100 static int fl_dump_key_cfm(struct sk_buff *skb, 3101 struct flow_dissector_key_cfm *key, 3102 struct flow_dissector_key_cfm *mask) 3103 { 3104 struct nlattr *opts; 3105 int err; 3106 u8 mdl; 3107 3108 if (!memchr_inv(mask, 0, sizeof(*mask))) 3109 return 0; 3110 3111 opts = nla_nest_start(skb, TCA_FLOWER_KEY_CFM); 3112 if (!opts) 3113 return -EMSGSIZE; 3114 3115 if (FIELD_GET(FLOW_DIS_CFM_MDL_MASK, mask->mdl_ver)) { 3116 mdl = FIELD_GET(FLOW_DIS_CFM_MDL_MASK, key->mdl_ver); 3117 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_MD_LEVEL, mdl); 3118 if (err) 3119 goto err_cfm_opts; 3120 } 3121 3122 if (mask->opcode) { 3123 err = nla_put_u8(skb, TCA_FLOWER_KEY_CFM_OPCODE, key->opcode); 3124 if (err) 3125 goto err_cfm_opts; 3126 } 3127 3128 nla_nest_end(skb, opts); 3129 3130 return 0; 3131 3132 err_cfm_opts: 3133 nla_nest_cancel(skb, opts); 3134 return err; 3135 } 3136 3137 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 3138 struct flow_dissector_key_enc_opts *enc_opts) 3139 { 3140 struct nlattr *nest; 3141 int err; 3142 3143 if (!enc_opts->len) 3144 return 0; 3145 3146 nest = nla_nest_start_noflag(skb, enc_opt_type); 3147 if (!nest) 3148 goto nla_put_failure; 3149 3150 switch (enc_opts->dst_opt_type) { 3151 case TUNNEL_GENEVE_OPT: 3152 err = fl_dump_key_geneve_opt(skb, enc_opts); 3153 if (err) 3154 goto nla_put_failure; 3155 break; 3156 case TUNNEL_VXLAN_OPT: 3157 err = fl_dump_key_vxlan_opt(skb, enc_opts); 3158 if (err) 3159 goto nla_put_failure; 3160 break; 3161 case TUNNEL_ERSPAN_OPT: 3162 err = fl_dump_key_erspan_opt(skb, enc_opts); 3163 if (err) 3164 goto nla_put_failure; 3165 break; 3166 case TUNNEL_GTP_OPT: 3167 err = fl_dump_key_gtp_opt(skb, enc_opts); 3168 if (err) 3169 goto nla_put_failure; 3170 break; 3171 default: 3172 goto nla_put_failure; 3173 } 3174 nla_nest_end(skb, nest); 3175 return 0; 3176 3177 nla_put_failure: 3178 nla_nest_cancel(skb, nest); 3179 return -EMSGSIZE; 3180 } 3181 3182 static int fl_dump_key_enc_opt(struct sk_buff *skb, 3183 struct flow_dissector_key_enc_opts *key_opts, 3184 struct flow_dissector_key_enc_opts *msk_opts) 3185 { 3186 int err; 3187 3188 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 3189 if (err) 3190 return err; 3191 3192 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 3193 } 3194 3195 static int fl_dump_key(struct sk_buff *skb, struct net *net, 3196 struct fl_flow_key *key, struct fl_flow_key *mask) 3197 { 3198 if (mask->meta.ingress_ifindex) { 3199 struct net_device *dev; 3200 3201 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 3202 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 3203 goto nla_put_failure; 3204 } 3205 3206 if (fl_dump_key_val(skb, &key->meta.l2_miss, 3207 TCA_FLOWER_L2_MISS, &mask->meta.l2_miss, 3208 TCA_FLOWER_UNSPEC, sizeof(key->meta.l2_miss))) 3209 goto nla_put_failure; 3210 3211 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 3212 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 3213 sizeof(key->eth.dst)) || 3214 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 3215 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 3216 sizeof(key->eth.src)) || 3217 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 3218 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 3219 sizeof(key->basic.n_proto))) 3220 goto nla_put_failure; 3221 3222 if (mask->num_of_vlans.num_of_vlans) { 3223 if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans)) 3224 goto nla_put_failure; 3225 } 3226 3227 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 3228 goto nla_put_failure; 3229 3230 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 3231 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 3232 goto nla_put_failure; 3233 3234 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 3235 TCA_FLOWER_KEY_CVLAN_PRIO, 3236 &key->cvlan, &mask->cvlan) || 3237 (mask->cvlan.vlan_tpid && 3238 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3239 key->cvlan.vlan_tpid))) 3240 goto nla_put_failure; 3241 3242 if (mask->basic.n_proto) { 3243 if (mask->cvlan.vlan_eth_type) { 3244 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 3245 key->basic.n_proto)) 3246 goto nla_put_failure; 3247 } else if (mask->vlan.vlan_eth_type) { 3248 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 3249 key->vlan.vlan_eth_type)) 3250 goto nla_put_failure; 3251 } 3252 } 3253 3254 if ((key->basic.n_proto == htons(ETH_P_IP) || 3255 key->basic.n_proto == htons(ETH_P_IPV6)) && 3256 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 3257 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 3258 sizeof(key->basic.ip_proto)) || 3259 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 3260 goto nla_put_failure; 3261 3262 if (mask->pppoe.session_id) { 3263 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPPOE_SID, 3264 key->pppoe.session_id)) 3265 goto nla_put_failure; 3266 } 3267 if (mask->basic.n_proto && mask->pppoe.ppp_proto) { 3268 if (nla_put_be16(skb, TCA_FLOWER_KEY_PPP_PROTO, 3269 key->pppoe.ppp_proto)) 3270 goto nla_put_failure; 3271 } 3272 3273 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3274 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 3275 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 3276 sizeof(key->ipv4.src)) || 3277 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 3278 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 3279 sizeof(key->ipv4.dst)))) 3280 goto nla_put_failure; 3281 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3282 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 3283 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 3284 sizeof(key->ipv6.src)) || 3285 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 3286 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 3287 sizeof(key->ipv6.dst)))) 3288 goto nla_put_failure; 3289 3290 if (key->basic.ip_proto == IPPROTO_TCP && 3291 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 3292 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 3293 sizeof(key->tp.src)) || 3294 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 3295 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 3296 sizeof(key->tp.dst)) || 3297 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 3298 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 3299 sizeof(key->tcp.flags)))) 3300 goto nla_put_failure; 3301 else if (key->basic.ip_proto == IPPROTO_UDP && 3302 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 3303 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 3304 sizeof(key->tp.src)) || 3305 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 3306 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 3307 sizeof(key->tp.dst)))) 3308 goto nla_put_failure; 3309 else if (key->basic.ip_proto == IPPROTO_SCTP && 3310 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 3311 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 3312 sizeof(key->tp.src)) || 3313 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 3314 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 3315 sizeof(key->tp.dst)))) 3316 goto nla_put_failure; 3317 else if (key->basic.n_proto == htons(ETH_P_IP) && 3318 key->basic.ip_proto == IPPROTO_ICMP && 3319 (fl_dump_key_val(skb, &key->icmp.type, 3320 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 3321 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 3322 sizeof(key->icmp.type)) || 3323 fl_dump_key_val(skb, &key->icmp.code, 3324 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 3325 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 3326 sizeof(key->icmp.code)))) 3327 goto nla_put_failure; 3328 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 3329 key->basic.ip_proto == IPPROTO_ICMPV6 && 3330 (fl_dump_key_val(skb, &key->icmp.type, 3331 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 3332 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 3333 sizeof(key->icmp.type)) || 3334 fl_dump_key_val(skb, &key->icmp.code, 3335 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 3336 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 3337 sizeof(key->icmp.code)))) 3338 goto nla_put_failure; 3339 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 3340 key->basic.n_proto == htons(ETH_P_RARP)) && 3341 (fl_dump_key_val(skb, &key->arp.sip, 3342 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 3343 TCA_FLOWER_KEY_ARP_SIP_MASK, 3344 sizeof(key->arp.sip)) || 3345 fl_dump_key_val(skb, &key->arp.tip, 3346 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 3347 TCA_FLOWER_KEY_ARP_TIP_MASK, 3348 sizeof(key->arp.tip)) || 3349 fl_dump_key_val(skb, &key->arp.op, 3350 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 3351 TCA_FLOWER_KEY_ARP_OP_MASK, 3352 sizeof(key->arp.op)) || 3353 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 3354 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 3355 sizeof(key->arp.sha)) || 3356 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 3357 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 3358 sizeof(key->arp.tha)))) 3359 goto nla_put_failure; 3360 else if (key->basic.ip_proto == IPPROTO_L2TP && 3361 fl_dump_key_val(skb, &key->l2tpv3.session_id, 3362 TCA_FLOWER_KEY_L2TPV3_SID, 3363 &mask->l2tpv3.session_id, 3364 TCA_FLOWER_UNSPEC, 3365 sizeof(key->l2tpv3.session_id))) 3366 goto nla_put_failure; 3367 3368 if ((key->basic.ip_proto == IPPROTO_TCP || 3369 key->basic.ip_proto == IPPROTO_UDP || 3370 key->basic.ip_proto == IPPROTO_SCTP) && 3371 fl_dump_key_port_range(skb, key, mask)) 3372 goto nla_put_failure; 3373 3374 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 3375 (fl_dump_key_val(skb, &key->enc_ipv4.src, 3376 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 3377 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 3378 sizeof(key->enc_ipv4.src)) || 3379 fl_dump_key_val(skb, &key->enc_ipv4.dst, 3380 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 3381 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 3382 sizeof(key->enc_ipv4.dst)))) 3383 goto nla_put_failure; 3384 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 3385 (fl_dump_key_val(skb, &key->enc_ipv6.src, 3386 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 3387 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 3388 sizeof(key->enc_ipv6.src)) || 3389 fl_dump_key_val(skb, &key->enc_ipv6.dst, 3390 TCA_FLOWER_KEY_ENC_IPV6_DST, 3391 &mask->enc_ipv6.dst, 3392 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 3393 sizeof(key->enc_ipv6.dst)))) 3394 goto nla_put_failure; 3395 3396 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 3397 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 3398 sizeof(key->enc_key_id)) || 3399 fl_dump_key_val(skb, &key->enc_tp.src, 3400 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 3401 &mask->enc_tp.src, 3402 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 3403 sizeof(key->enc_tp.src)) || 3404 fl_dump_key_val(skb, &key->enc_tp.dst, 3405 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 3406 &mask->enc_tp.dst, 3407 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 3408 sizeof(key->enc_tp.dst)) || 3409 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 3410 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 3411 goto nla_put_failure; 3412 3413 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 3414 goto nla_put_failure; 3415 3416 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 3417 goto nla_put_failure; 3418 3419 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 3420 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 3421 sizeof(key->hash.hash))) 3422 goto nla_put_failure; 3423 3424 if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm)) 3425 goto nla_put_failure; 3426 3427 return 0; 3428 3429 nla_put_failure: 3430 return -EMSGSIZE; 3431 } 3432 3433 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 3434 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3435 { 3436 struct cls_fl_filter *f = fh; 3437 struct nlattr *nest; 3438 struct fl_flow_key *key, *mask; 3439 bool skip_hw; 3440 3441 if (!f) 3442 return skb->len; 3443 3444 t->tcm_handle = f->handle; 3445 3446 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3447 if (!nest) 3448 goto nla_put_failure; 3449 3450 spin_lock(&tp->lock); 3451 3452 if (f->res.classid && 3453 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 3454 goto nla_put_failure_locked; 3455 3456 key = &f->key; 3457 mask = &f->mask->key; 3458 skip_hw = tc_skip_hw(f->flags); 3459 3460 if (fl_dump_key(skb, net, key, mask)) 3461 goto nla_put_failure_locked; 3462 3463 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3464 goto nla_put_failure_locked; 3465 3466 spin_unlock(&tp->lock); 3467 3468 if (!skip_hw) 3469 fl_hw_update_stats(tp, f, rtnl_held); 3470 3471 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 3472 goto nla_put_failure; 3473 3474 if (tcf_exts_dump(skb, &f->exts)) 3475 goto nla_put_failure; 3476 3477 nla_nest_end(skb, nest); 3478 3479 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 3480 goto nla_put_failure; 3481 3482 return skb->len; 3483 3484 nla_put_failure_locked: 3485 spin_unlock(&tp->lock); 3486 nla_put_failure: 3487 nla_nest_cancel(skb, nest); 3488 return -1; 3489 } 3490 3491 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh, 3492 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3493 { 3494 struct cls_fl_filter *f = fh; 3495 struct nlattr *nest; 3496 bool skip_hw; 3497 3498 if (!f) 3499 return skb->len; 3500 3501 t->tcm_handle = f->handle; 3502 3503 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3504 if (!nest) 3505 goto nla_put_failure; 3506 3507 spin_lock(&tp->lock); 3508 3509 skip_hw = tc_skip_hw(f->flags); 3510 3511 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3512 goto nla_put_failure_locked; 3513 3514 spin_unlock(&tp->lock); 3515 3516 if (!skip_hw) 3517 fl_hw_update_stats(tp, f, rtnl_held); 3518 3519 if (tcf_exts_terse_dump(skb, &f->exts)) 3520 goto nla_put_failure; 3521 3522 nla_nest_end(skb, nest); 3523 3524 return skb->len; 3525 3526 nla_put_failure_locked: 3527 spin_unlock(&tp->lock); 3528 nla_put_failure: 3529 nla_nest_cancel(skb, nest); 3530 return -1; 3531 } 3532 3533 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 3534 { 3535 struct fl_flow_tmplt *tmplt = tmplt_priv; 3536 struct fl_flow_key *key, *mask; 3537 struct nlattr *nest; 3538 3539 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3540 if (!nest) 3541 goto nla_put_failure; 3542 3543 key = &tmplt->dummy_key; 3544 mask = &tmplt->mask; 3545 3546 if (fl_dump_key(skb, net, key, mask)) 3547 goto nla_put_failure; 3548 3549 nla_nest_end(skb, nest); 3550 3551 return skb->len; 3552 3553 nla_put_failure: 3554 nla_nest_cancel(skb, nest); 3555 return -EMSGSIZE; 3556 } 3557 3558 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 3559 unsigned long base) 3560 { 3561 struct cls_fl_filter *f = fh; 3562 3563 tc_cls_bind_class(classid, cl, q, &f->res, base); 3564 } 3565 3566 static bool fl_delete_empty(struct tcf_proto *tp) 3567 { 3568 struct cls_fl_head *head = fl_head_dereference(tp); 3569 3570 spin_lock(&tp->lock); 3571 tp->deleting = idr_is_empty(&head->handle_idr); 3572 spin_unlock(&tp->lock); 3573 3574 return tp->deleting; 3575 } 3576 3577 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 3578 .kind = "flower", 3579 .classify = fl_classify, 3580 .init = fl_init, 3581 .destroy = fl_destroy, 3582 .get = fl_get, 3583 .put = fl_put, 3584 .change = fl_change, 3585 .delete = fl_delete, 3586 .delete_empty = fl_delete_empty, 3587 .walk = fl_walk, 3588 .reoffload = fl_reoffload, 3589 .hw_add = fl_hw_add, 3590 .hw_del = fl_hw_del, 3591 .dump = fl_dump, 3592 .terse_dump = fl_terse_dump, 3593 .bind_class = fl_bind_class, 3594 .tmplt_create = fl_tmplt_create, 3595 .tmplt_destroy = fl_tmplt_destroy, 3596 .tmplt_dump = fl_tmplt_dump, 3597 .get_exts = fl_get_exts, 3598 .owner = THIS_MODULE, 3599 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 3600 }; 3601 3602 static int __init cls_fl_init(void) 3603 { 3604 return register_tcf_proto_ops(&cls_fl_ops); 3605 } 3606 3607 static void __exit cls_fl_exit(void) 3608 { 3609 unregister_tcf_proto_ops(&cls_fl_ops); 3610 } 3611 3612 module_init(cls_fl_init); 3613 module_exit(cls_fl_exit); 3614 3615 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3616 MODULE_DESCRIPTION("Flower classifier"); 3617 MODULE_LICENSE("GPL v2"); 3618