1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 15 #include <linux/if_ether.h> 16 #include <linux/in6.h> 17 #include <linux/ip.h> 18 #include <linux/mpls.h> 19 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/ip.h> 23 #include <net/flow_dissector.h> 24 #include <net/geneve.h> 25 26 #include <net/dst.h> 27 #include <net/dst_metadata.h> 28 29 #include <uapi/linux/netfilter/nf_conntrack_common.h> 30 31 struct fl_flow_key { 32 struct flow_dissector_key_meta meta; 33 struct flow_dissector_key_control control; 34 struct flow_dissector_key_control enc_control; 35 struct flow_dissector_key_basic basic; 36 struct flow_dissector_key_eth_addrs eth; 37 struct flow_dissector_key_vlan vlan; 38 struct flow_dissector_key_vlan cvlan; 39 union { 40 struct flow_dissector_key_ipv4_addrs ipv4; 41 struct flow_dissector_key_ipv6_addrs ipv6; 42 }; 43 struct flow_dissector_key_ports tp; 44 struct flow_dissector_key_icmp icmp; 45 struct flow_dissector_key_arp arp; 46 struct flow_dissector_key_keyid enc_key_id; 47 union { 48 struct flow_dissector_key_ipv4_addrs enc_ipv4; 49 struct flow_dissector_key_ipv6_addrs enc_ipv6; 50 }; 51 struct flow_dissector_key_ports enc_tp; 52 struct flow_dissector_key_mpls mpls; 53 struct flow_dissector_key_tcp tcp; 54 struct flow_dissector_key_ip ip; 55 struct flow_dissector_key_ip enc_ip; 56 struct flow_dissector_key_enc_opts enc_opts; 57 struct flow_dissector_key_ports tp_min; 58 struct flow_dissector_key_ports tp_max; 59 struct flow_dissector_key_ct ct; 60 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 61 62 struct fl_flow_mask_range { 63 unsigned short int start; 64 unsigned short int end; 65 }; 66 67 struct fl_flow_mask { 68 struct fl_flow_key key; 69 struct fl_flow_mask_range range; 70 u32 flags; 71 struct rhash_head ht_node; 72 struct rhashtable ht; 73 struct rhashtable_params filter_ht_params; 74 struct flow_dissector dissector; 75 struct list_head filters; 76 struct rcu_work rwork; 77 struct list_head list; 78 refcount_t refcnt; 79 }; 80 81 struct fl_flow_tmplt { 82 struct fl_flow_key dummy_key; 83 struct fl_flow_key mask; 84 struct flow_dissector dissector; 85 struct tcf_chain *chain; 86 }; 87 88 struct cls_fl_head { 89 struct rhashtable ht; 90 spinlock_t masks_lock; /* Protect masks list */ 91 struct list_head masks; 92 struct list_head hw_filters; 93 struct rcu_work rwork; 94 struct idr handle_idr; 95 }; 96 97 struct cls_fl_filter { 98 struct fl_flow_mask *mask; 99 struct rhash_head ht_node; 100 struct fl_flow_key mkey; 101 struct tcf_exts exts; 102 struct tcf_result res; 103 struct fl_flow_key key; 104 struct list_head list; 105 struct list_head hw_list; 106 u32 handle; 107 u32 flags; 108 u32 in_hw_count; 109 struct rcu_work rwork; 110 struct net_device *hw_dev; 111 /* Flower classifier is unlocked, which means that its reference counter 112 * can be changed concurrently without any kind of external 113 * synchronization. Use atomic reference counter to be concurrency-safe. 114 */ 115 refcount_t refcnt; 116 bool deleted; 117 }; 118 119 static const struct rhashtable_params mask_ht_params = { 120 .key_offset = offsetof(struct fl_flow_mask, key), 121 .key_len = sizeof(struct fl_flow_key), 122 .head_offset = offsetof(struct fl_flow_mask, ht_node), 123 .automatic_shrinking = true, 124 }; 125 126 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 127 { 128 return mask->range.end - mask->range.start; 129 } 130 131 static void fl_mask_update_range(struct fl_flow_mask *mask) 132 { 133 const u8 *bytes = (const u8 *) &mask->key; 134 size_t size = sizeof(mask->key); 135 size_t i, first = 0, last; 136 137 for (i = 0; i < size; i++) { 138 if (bytes[i]) { 139 first = i; 140 break; 141 } 142 } 143 last = first; 144 for (i = size - 1; i != first; i--) { 145 if (bytes[i]) { 146 last = i; 147 break; 148 } 149 } 150 mask->range.start = rounddown(first, sizeof(long)); 151 mask->range.end = roundup(last + 1, sizeof(long)); 152 } 153 154 static void *fl_key_get_start(struct fl_flow_key *key, 155 const struct fl_flow_mask *mask) 156 { 157 return (u8 *) key + mask->range.start; 158 } 159 160 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 161 struct fl_flow_mask *mask) 162 { 163 const long *lkey = fl_key_get_start(key, mask); 164 const long *lmask = fl_key_get_start(&mask->key, mask); 165 long *lmkey = fl_key_get_start(mkey, mask); 166 int i; 167 168 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 169 *lmkey++ = *lkey++ & *lmask++; 170 } 171 172 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 173 struct fl_flow_mask *mask) 174 { 175 const long *lmask = fl_key_get_start(&mask->key, mask); 176 const long *ltmplt; 177 int i; 178 179 if (!tmplt) 180 return true; 181 ltmplt = fl_key_get_start(&tmplt->mask, mask); 182 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 183 if (~*ltmplt++ & *lmask++) 184 return false; 185 } 186 return true; 187 } 188 189 static void fl_clear_masked_range(struct fl_flow_key *key, 190 struct fl_flow_mask *mask) 191 { 192 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 193 } 194 195 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 196 struct fl_flow_key *key, 197 struct fl_flow_key *mkey) 198 { 199 __be16 min_mask, max_mask, min_val, max_val; 200 201 min_mask = htons(filter->mask->key.tp_min.dst); 202 max_mask = htons(filter->mask->key.tp_max.dst); 203 min_val = htons(filter->key.tp_min.dst); 204 max_val = htons(filter->key.tp_max.dst); 205 206 if (min_mask && max_mask) { 207 if (htons(key->tp.dst) < min_val || 208 htons(key->tp.dst) > max_val) 209 return false; 210 211 /* skb does not have min and max values */ 212 mkey->tp_min.dst = filter->mkey.tp_min.dst; 213 mkey->tp_max.dst = filter->mkey.tp_max.dst; 214 } 215 return true; 216 } 217 218 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 219 struct fl_flow_key *key, 220 struct fl_flow_key *mkey) 221 { 222 __be16 min_mask, max_mask, min_val, max_val; 223 224 min_mask = htons(filter->mask->key.tp_min.src); 225 max_mask = htons(filter->mask->key.tp_max.src); 226 min_val = htons(filter->key.tp_min.src); 227 max_val = htons(filter->key.tp_max.src); 228 229 if (min_mask && max_mask) { 230 if (htons(key->tp.src) < min_val || 231 htons(key->tp.src) > max_val) 232 return false; 233 234 /* skb does not have min and max values */ 235 mkey->tp_min.src = filter->mkey.tp_min.src; 236 mkey->tp_max.src = filter->mkey.tp_max.src; 237 } 238 return true; 239 } 240 241 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 242 struct fl_flow_key *mkey) 243 { 244 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 245 mask->filter_ht_params); 246 } 247 248 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 249 struct fl_flow_key *mkey, 250 struct fl_flow_key *key) 251 { 252 struct cls_fl_filter *filter, *f; 253 254 list_for_each_entry_rcu(filter, &mask->filters, list) { 255 if (!fl_range_port_dst_cmp(filter, key, mkey)) 256 continue; 257 258 if (!fl_range_port_src_cmp(filter, key, mkey)) 259 continue; 260 261 f = __fl_lookup(mask, mkey); 262 if (f) 263 return f; 264 } 265 return NULL; 266 } 267 268 static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask, 269 struct fl_flow_key *mkey, 270 struct fl_flow_key *key) 271 { 272 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 273 return fl_lookup_range(mask, mkey, key); 274 275 return __fl_lookup(mask, mkey); 276 } 277 278 static u16 fl_ct_info_to_flower_map[] = { 279 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 280 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 281 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 282 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 283 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 284 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 285 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 286 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 287 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 288 TCA_FLOWER_KEY_CT_FLAGS_NEW, 289 }; 290 291 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, 292 struct tcf_result *res) 293 { 294 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 295 struct fl_flow_key skb_mkey; 296 struct fl_flow_key skb_key; 297 struct fl_flow_mask *mask; 298 struct cls_fl_filter *f; 299 300 list_for_each_entry_rcu(mask, &head->masks, list) { 301 fl_clear_masked_range(&skb_key, mask); 302 303 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 304 /* skb_flow_dissect() does not set n_proto in case an unknown 305 * protocol, so do it rather here. 306 */ 307 skb_key.basic.n_proto = skb->protocol; 308 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 309 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 310 fl_ct_info_to_flower_map, 311 ARRAY_SIZE(fl_ct_info_to_flower_map)); 312 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); 313 314 fl_set_masked_key(&skb_mkey, &skb_key, mask); 315 316 f = fl_lookup(mask, &skb_mkey, &skb_key); 317 if (f && !tc_skip_sw(f->flags)) { 318 *res = f->res; 319 return tcf_exts_exec(skb, &f->exts, res); 320 } 321 } 322 return -1; 323 } 324 325 static int fl_init(struct tcf_proto *tp) 326 { 327 struct cls_fl_head *head; 328 329 head = kzalloc(sizeof(*head), GFP_KERNEL); 330 if (!head) 331 return -ENOBUFS; 332 333 spin_lock_init(&head->masks_lock); 334 INIT_LIST_HEAD_RCU(&head->masks); 335 INIT_LIST_HEAD(&head->hw_filters); 336 rcu_assign_pointer(tp->root, head); 337 idr_init(&head->handle_idr); 338 339 return rhashtable_init(&head->ht, &mask_ht_params); 340 } 341 342 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 343 { 344 /* temporary masks don't have their filters list and ht initialized */ 345 if (mask_init_done) { 346 WARN_ON(!list_empty(&mask->filters)); 347 rhashtable_destroy(&mask->ht); 348 } 349 kfree(mask); 350 } 351 352 static void fl_mask_free_work(struct work_struct *work) 353 { 354 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 355 struct fl_flow_mask, rwork); 356 357 fl_mask_free(mask, true); 358 } 359 360 static void fl_uninit_mask_free_work(struct work_struct *work) 361 { 362 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 363 struct fl_flow_mask, rwork); 364 365 fl_mask_free(mask, false); 366 } 367 368 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 369 { 370 if (!refcount_dec_and_test(&mask->refcnt)) 371 return false; 372 373 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 374 375 spin_lock(&head->masks_lock); 376 list_del_rcu(&mask->list); 377 spin_unlock(&head->masks_lock); 378 379 tcf_queue_work(&mask->rwork, fl_mask_free_work); 380 381 return true; 382 } 383 384 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 385 { 386 /* Flower classifier only changes root pointer during init and destroy. 387 * Users must obtain reference to tcf_proto instance before calling its 388 * API, so tp->root pointer is protected from concurrent call to 389 * fl_destroy() by reference counting. 390 */ 391 return rcu_dereference_raw(tp->root); 392 } 393 394 static void __fl_destroy_filter(struct cls_fl_filter *f) 395 { 396 tcf_exts_destroy(&f->exts); 397 tcf_exts_put_net(&f->exts); 398 kfree(f); 399 } 400 401 static void fl_destroy_filter_work(struct work_struct *work) 402 { 403 struct cls_fl_filter *f = container_of(to_rcu_work(work), 404 struct cls_fl_filter, rwork); 405 406 __fl_destroy_filter(f); 407 } 408 409 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 410 bool rtnl_held, struct netlink_ext_ack *extack) 411 { 412 struct tcf_block *block = tp->chain->block; 413 struct flow_cls_offload cls_flower = {}; 414 415 if (!rtnl_held) 416 rtnl_lock(); 417 418 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 419 cls_flower.command = FLOW_CLS_DESTROY; 420 cls_flower.cookie = (unsigned long) f; 421 422 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false); 423 spin_lock(&tp->lock); 424 list_del_init(&f->hw_list); 425 tcf_block_offload_dec(block, &f->flags); 426 spin_unlock(&tp->lock); 427 428 if (!rtnl_held) 429 rtnl_unlock(); 430 } 431 432 static int fl_hw_replace_filter(struct tcf_proto *tp, 433 struct cls_fl_filter *f, bool rtnl_held, 434 struct netlink_ext_ack *extack) 435 { 436 struct cls_fl_head *head = fl_head_dereference(tp); 437 struct tcf_block *block = tp->chain->block; 438 struct flow_cls_offload cls_flower = {}; 439 bool skip_sw = tc_skip_sw(f->flags); 440 int err = 0; 441 442 if (!rtnl_held) 443 rtnl_lock(); 444 445 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 446 if (!cls_flower.rule) { 447 err = -ENOMEM; 448 goto errout; 449 } 450 451 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 452 cls_flower.command = FLOW_CLS_REPLACE; 453 cls_flower.cookie = (unsigned long) f; 454 cls_flower.rule->match.dissector = &f->mask->dissector; 455 cls_flower.rule->match.mask = &f->mask->key; 456 cls_flower.rule->match.key = &f->mkey; 457 cls_flower.classid = f->res.classid; 458 459 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 460 if (err) { 461 kfree(cls_flower.rule); 462 if (skip_sw) 463 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 464 else 465 err = 0; 466 goto errout; 467 } 468 469 err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw); 470 kfree(cls_flower.rule); 471 472 if (err < 0) { 473 fl_hw_destroy_filter(tp, f, true, NULL); 474 goto errout; 475 } else if (err > 0) { 476 f->in_hw_count = err; 477 err = 0; 478 spin_lock(&tp->lock); 479 tcf_block_offload_inc(block, &f->flags); 480 spin_unlock(&tp->lock); 481 } 482 483 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) { 484 err = -EINVAL; 485 goto errout; 486 } 487 488 spin_lock(&tp->lock); 489 list_add(&f->hw_list, &head->hw_filters); 490 spin_unlock(&tp->lock); 491 errout: 492 if (!rtnl_held) 493 rtnl_unlock(); 494 495 return err; 496 } 497 498 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 499 bool rtnl_held) 500 { 501 struct tcf_block *block = tp->chain->block; 502 struct flow_cls_offload cls_flower = {}; 503 504 if (!rtnl_held) 505 rtnl_lock(); 506 507 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 508 cls_flower.command = FLOW_CLS_STATS; 509 cls_flower.cookie = (unsigned long) f; 510 cls_flower.classid = f->res.classid; 511 512 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false); 513 514 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, 515 cls_flower.stats.pkts, 516 cls_flower.stats.lastused); 517 518 if (!rtnl_held) 519 rtnl_unlock(); 520 } 521 522 static void __fl_put(struct cls_fl_filter *f) 523 { 524 if (!refcount_dec_and_test(&f->refcnt)) 525 return; 526 527 if (tcf_exts_get_net(&f->exts)) 528 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 529 else 530 __fl_destroy_filter(f); 531 } 532 533 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 534 { 535 struct cls_fl_filter *f; 536 537 rcu_read_lock(); 538 f = idr_find(&head->handle_idr, handle); 539 if (f && !refcount_inc_not_zero(&f->refcnt)) 540 f = NULL; 541 rcu_read_unlock(); 542 543 return f; 544 } 545 546 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 547 bool *last, bool rtnl_held, 548 struct netlink_ext_ack *extack) 549 { 550 struct cls_fl_head *head = fl_head_dereference(tp); 551 552 *last = false; 553 554 spin_lock(&tp->lock); 555 if (f->deleted) { 556 spin_unlock(&tp->lock); 557 return -ENOENT; 558 } 559 560 f->deleted = true; 561 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 562 f->mask->filter_ht_params); 563 idr_remove(&head->handle_idr, f->handle); 564 list_del_rcu(&f->list); 565 spin_unlock(&tp->lock); 566 567 *last = fl_mask_put(head, f->mask); 568 if (!tc_skip_hw(f->flags)) 569 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 570 tcf_unbind_filter(tp, &f->res); 571 __fl_put(f); 572 573 return 0; 574 } 575 576 static void fl_destroy_sleepable(struct work_struct *work) 577 { 578 struct cls_fl_head *head = container_of(to_rcu_work(work), 579 struct cls_fl_head, 580 rwork); 581 582 rhashtable_destroy(&head->ht); 583 kfree(head); 584 module_put(THIS_MODULE); 585 } 586 587 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 588 struct netlink_ext_ack *extack) 589 { 590 struct cls_fl_head *head = fl_head_dereference(tp); 591 struct fl_flow_mask *mask, *next_mask; 592 struct cls_fl_filter *f, *next; 593 bool last; 594 595 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 596 list_for_each_entry_safe(f, next, &mask->filters, list) { 597 __fl_delete(tp, f, &last, rtnl_held, extack); 598 if (last) 599 break; 600 } 601 } 602 idr_destroy(&head->handle_idr); 603 604 __module_get(THIS_MODULE); 605 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 606 } 607 608 static void fl_put(struct tcf_proto *tp, void *arg) 609 { 610 struct cls_fl_filter *f = arg; 611 612 __fl_put(f); 613 } 614 615 static void *fl_get(struct tcf_proto *tp, u32 handle) 616 { 617 struct cls_fl_head *head = fl_head_dereference(tp); 618 619 return __fl_get(head, handle); 620 } 621 622 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 623 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, 624 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 625 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 626 .len = IFNAMSIZ }, 627 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 628 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 629 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 630 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 631 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 632 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 633 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 634 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 635 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 636 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 637 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 638 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 639 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 640 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 641 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 642 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 643 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 644 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 645 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 646 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 647 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 648 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 649 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 650 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 651 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 652 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 653 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 654 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 655 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 656 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 657 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 658 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 659 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 660 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 661 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 662 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 663 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 664 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 665 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 666 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 667 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 668 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 669 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 670 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 671 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 672 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 673 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 674 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 675 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 676 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 677 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 678 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 679 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 680 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 681 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 682 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 683 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 684 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 685 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 686 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 687 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 688 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 689 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 690 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 691 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 692 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 693 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 694 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 695 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 696 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 697 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 698 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 699 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 700 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 701 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 702 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 703 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 704 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 705 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 706 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 707 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 708 [TCA_FLOWER_KEY_CT_STATE] = { .type = NLA_U16 }, 709 [TCA_FLOWER_KEY_CT_STATE_MASK] = { .type = NLA_U16 }, 710 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 711 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 712 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 713 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 714 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 715 .len = 128 / BITS_PER_BYTE }, 716 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 717 .len = 128 / BITS_PER_BYTE }, 718 }; 719 720 static const struct nla_policy 721 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 722 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 723 }; 724 725 static const struct nla_policy 726 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 727 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 728 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 729 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 730 .len = 128 }, 731 }; 732 733 static void fl_set_key_val(struct nlattr **tb, 734 void *val, int val_type, 735 void *mask, int mask_type, int len) 736 { 737 if (!tb[val_type]) 738 return; 739 nla_memcpy(val, tb[val_type], len); 740 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 741 memset(mask, 0xff, len); 742 else 743 nla_memcpy(mask, tb[mask_type], len); 744 } 745 746 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 747 struct fl_flow_key *mask) 748 { 749 fl_set_key_val(tb, &key->tp_min.dst, 750 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst, 751 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst)); 752 fl_set_key_val(tb, &key->tp_max.dst, 753 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst, 754 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst)); 755 fl_set_key_val(tb, &key->tp_min.src, 756 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src, 757 TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src)); 758 fl_set_key_val(tb, &key->tp_max.src, 759 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src, 760 TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src)); 761 762 if ((mask->tp_min.dst && mask->tp_max.dst && 763 htons(key->tp_max.dst) <= htons(key->tp_min.dst)) || 764 (mask->tp_min.src && mask->tp_max.src && 765 htons(key->tp_max.src) <= htons(key->tp_min.src))) 766 return -EINVAL; 767 768 return 0; 769 } 770 771 static int fl_set_key_mpls(struct nlattr **tb, 772 struct flow_dissector_key_mpls *key_val, 773 struct flow_dissector_key_mpls *key_mask) 774 { 775 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 776 key_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 777 key_mask->mpls_ttl = MPLS_TTL_MASK; 778 } 779 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 780 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 781 782 if (bos & ~MPLS_BOS_MASK) 783 return -EINVAL; 784 key_val->mpls_bos = bos; 785 key_mask->mpls_bos = MPLS_BOS_MASK; 786 } 787 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 788 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 789 790 if (tc & ~MPLS_TC_MASK) 791 return -EINVAL; 792 key_val->mpls_tc = tc; 793 key_mask->mpls_tc = MPLS_TC_MASK; 794 } 795 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 796 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 797 798 if (label & ~MPLS_LABEL_MASK) 799 return -EINVAL; 800 key_val->mpls_label = label; 801 key_mask->mpls_label = MPLS_LABEL_MASK; 802 } 803 return 0; 804 } 805 806 static void fl_set_key_vlan(struct nlattr **tb, 807 __be16 ethertype, 808 int vlan_id_key, int vlan_prio_key, 809 struct flow_dissector_key_vlan *key_val, 810 struct flow_dissector_key_vlan *key_mask) 811 { 812 #define VLAN_PRIORITY_MASK 0x7 813 814 if (tb[vlan_id_key]) { 815 key_val->vlan_id = 816 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 817 key_mask->vlan_id = VLAN_VID_MASK; 818 } 819 if (tb[vlan_prio_key]) { 820 key_val->vlan_priority = 821 nla_get_u8(tb[vlan_prio_key]) & 822 VLAN_PRIORITY_MASK; 823 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 824 } 825 key_val->vlan_tpid = ethertype; 826 key_mask->vlan_tpid = cpu_to_be16(~0); 827 } 828 829 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 830 u32 *dissector_key, u32 *dissector_mask, 831 u32 flower_flag_bit, u32 dissector_flag_bit) 832 { 833 if (flower_mask & flower_flag_bit) { 834 *dissector_mask |= dissector_flag_bit; 835 if (flower_key & flower_flag_bit) 836 *dissector_key |= dissector_flag_bit; 837 } 838 } 839 840 static int fl_set_key_flags(struct nlattr **tb, 841 u32 *flags_key, u32 *flags_mask) 842 { 843 u32 key, mask; 844 845 /* mask is mandatory for flags */ 846 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) 847 return -EINVAL; 848 849 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS])); 850 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 851 852 *flags_key = 0; 853 *flags_mask = 0; 854 855 fl_set_key_flag(key, mask, flags_key, flags_mask, 856 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 857 fl_set_key_flag(key, mask, flags_key, flags_mask, 858 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 859 FLOW_DIS_FIRST_FRAG); 860 861 return 0; 862 } 863 864 static void fl_set_key_ip(struct nlattr **tb, bool encap, 865 struct flow_dissector_key_ip *key, 866 struct flow_dissector_key_ip *mask) 867 { 868 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 869 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 870 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 871 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 872 873 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 874 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 875 } 876 877 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 878 int depth, int option_len, 879 struct netlink_ext_ack *extack) 880 { 881 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 882 struct nlattr *class = NULL, *type = NULL, *data = NULL; 883 struct geneve_opt *opt; 884 int err, data_len = 0; 885 886 if (option_len > sizeof(struct geneve_opt)) 887 data_len = option_len - sizeof(struct geneve_opt); 888 889 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 890 memset(opt, 0xff, option_len); 891 opt->length = data_len / 4; 892 opt->r1 = 0; 893 opt->r2 = 0; 894 opt->r3 = 0; 895 896 /* If no mask has been prodived we assume an exact match. */ 897 if (!depth) 898 return sizeof(struct geneve_opt) + data_len; 899 900 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 901 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 902 return -EINVAL; 903 } 904 905 err = nla_parse_nested_deprecated(tb, 906 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 907 nla, geneve_opt_policy, extack); 908 if (err < 0) 909 return err; 910 911 /* We are not allowed to omit any of CLASS, TYPE or DATA 912 * fields from the key. 913 */ 914 if (!option_len && 915 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 916 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 917 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 918 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 919 return -EINVAL; 920 } 921 922 /* Omitting any of CLASS, TYPE or DATA fields is allowed 923 * for the mask. 924 */ 925 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 926 int new_len = key->enc_opts.len; 927 928 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 929 data_len = nla_len(data); 930 if (data_len < 4) { 931 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 932 return -ERANGE; 933 } 934 if (data_len % 4) { 935 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 936 return -ERANGE; 937 } 938 939 new_len += sizeof(struct geneve_opt) + data_len; 940 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 941 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 942 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 943 return -ERANGE; 944 } 945 opt->length = data_len / 4; 946 memcpy(opt->opt_data, nla_data(data), data_len); 947 } 948 949 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 950 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 951 opt->opt_class = nla_get_be16(class); 952 } 953 954 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 955 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 956 opt->type = nla_get_u8(type); 957 } 958 959 return sizeof(struct geneve_opt) + data_len; 960 } 961 962 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 963 struct fl_flow_key *mask, 964 struct netlink_ext_ack *extack) 965 { 966 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 967 int err, option_len, key_depth, msk_depth = 0; 968 969 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 970 TCA_FLOWER_KEY_ENC_OPTS_MAX, 971 enc_opts_policy, extack); 972 if (err) 973 return err; 974 975 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 976 977 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 978 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 979 TCA_FLOWER_KEY_ENC_OPTS_MAX, 980 enc_opts_policy, extack); 981 if (err) 982 return err; 983 984 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 985 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 986 } 987 988 nla_for_each_attr(nla_opt_key, nla_enc_key, 989 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 990 switch (nla_type(nla_opt_key)) { 991 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 992 option_len = 0; 993 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 994 option_len = fl_set_geneve_opt(nla_opt_key, key, 995 key_depth, option_len, 996 extack); 997 if (option_len < 0) 998 return option_len; 999 1000 key->enc_opts.len += option_len; 1001 /* At the same time we need to parse through the mask 1002 * in order to verify exact and mask attribute lengths. 1003 */ 1004 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1005 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1006 msk_depth, option_len, 1007 extack); 1008 if (option_len < 0) 1009 return option_len; 1010 1011 mask->enc_opts.len += option_len; 1012 if (key->enc_opts.len != mask->enc_opts.len) { 1013 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1014 return -EINVAL; 1015 } 1016 1017 if (msk_depth) 1018 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1019 break; 1020 default: 1021 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1022 return -EINVAL; 1023 } 1024 } 1025 1026 return 0; 1027 } 1028 1029 static int fl_set_key_ct(struct nlattr **tb, 1030 struct flow_dissector_key_ct *key, 1031 struct flow_dissector_key_ct *mask, 1032 struct netlink_ext_ack *extack) 1033 { 1034 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1035 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1036 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1037 return -EOPNOTSUPP; 1038 } 1039 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1040 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1041 sizeof(key->ct_state)); 1042 } 1043 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1044 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1045 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1046 return -EOPNOTSUPP; 1047 } 1048 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1049 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1050 sizeof(key->ct_zone)); 1051 } 1052 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1053 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1054 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1055 return -EOPNOTSUPP; 1056 } 1057 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1058 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1059 sizeof(key->ct_mark)); 1060 } 1061 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1062 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1063 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1064 return -EOPNOTSUPP; 1065 } 1066 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1067 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1068 sizeof(key->ct_labels)); 1069 } 1070 1071 return 0; 1072 } 1073 1074 static int fl_set_key(struct net *net, struct nlattr **tb, 1075 struct fl_flow_key *key, struct fl_flow_key *mask, 1076 struct netlink_ext_ack *extack) 1077 { 1078 __be16 ethertype; 1079 int ret = 0; 1080 1081 if (tb[TCA_FLOWER_INDEV]) { 1082 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1083 if (err < 0) 1084 return err; 1085 key->meta.ingress_ifindex = err; 1086 mask->meta.ingress_ifindex = 0xffffffff; 1087 } 1088 1089 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1090 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1091 sizeof(key->eth.dst)); 1092 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1093 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1094 sizeof(key->eth.src)); 1095 1096 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { 1097 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); 1098 1099 if (eth_type_vlan(ethertype)) { 1100 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1101 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, 1102 &mask->vlan); 1103 1104 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { 1105 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); 1106 if (eth_type_vlan(ethertype)) { 1107 fl_set_key_vlan(tb, ethertype, 1108 TCA_FLOWER_KEY_CVLAN_ID, 1109 TCA_FLOWER_KEY_CVLAN_PRIO, 1110 &key->cvlan, &mask->cvlan); 1111 fl_set_key_val(tb, &key->basic.n_proto, 1112 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1113 &mask->basic.n_proto, 1114 TCA_FLOWER_UNSPEC, 1115 sizeof(key->basic.n_proto)); 1116 } else { 1117 key->basic.n_proto = ethertype; 1118 mask->basic.n_proto = cpu_to_be16(~0); 1119 } 1120 } 1121 } else { 1122 key->basic.n_proto = ethertype; 1123 mask->basic.n_proto = cpu_to_be16(~0); 1124 } 1125 } 1126 1127 if (key->basic.n_proto == htons(ETH_P_IP) || 1128 key->basic.n_proto == htons(ETH_P_IPV6)) { 1129 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1130 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1131 sizeof(key->basic.ip_proto)); 1132 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1133 } 1134 1135 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1136 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1137 mask->control.addr_type = ~0; 1138 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1139 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1140 sizeof(key->ipv4.src)); 1141 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1142 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1143 sizeof(key->ipv4.dst)); 1144 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1145 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1146 mask->control.addr_type = ~0; 1147 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1148 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1149 sizeof(key->ipv6.src)); 1150 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1151 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1152 sizeof(key->ipv6.dst)); 1153 } 1154 1155 if (key->basic.ip_proto == IPPROTO_TCP) { 1156 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1157 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1158 sizeof(key->tp.src)); 1159 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1160 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1161 sizeof(key->tp.dst)); 1162 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1163 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1164 sizeof(key->tcp.flags)); 1165 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1166 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1167 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1168 sizeof(key->tp.src)); 1169 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1170 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1171 sizeof(key->tp.dst)); 1172 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1173 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1174 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1175 sizeof(key->tp.src)); 1176 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1177 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1178 sizeof(key->tp.dst)); 1179 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1180 key->basic.ip_proto == IPPROTO_ICMP) { 1181 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1182 &mask->icmp.type, 1183 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1184 sizeof(key->icmp.type)); 1185 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1186 &mask->icmp.code, 1187 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1188 sizeof(key->icmp.code)); 1189 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1190 key->basic.ip_proto == IPPROTO_ICMPV6) { 1191 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1192 &mask->icmp.type, 1193 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1194 sizeof(key->icmp.type)); 1195 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1196 &mask->icmp.code, 1197 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1198 sizeof(key->icmp.code)); 1199 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1200 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1201 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls); 1202 if (ret) 1203 return ret; 1204 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1205 key->basic.n_proto == htons(ETH_P_RARP)) { 1206 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 1207 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 1208 sizeof(key->arp.sip)); 1209 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 1210 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 1211 sizeof(key->arp.tip)); 1212 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 1213 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 1214 sizeof(key->arp.op)); 1215 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1216 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1217 sizeof(key->arp.sha)); 1218 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 1219 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 1220 sizeof(key->arp.tha)); 1221 } 1222 1223 if (key->basic.ip_proto == IPPROTO_TCP || 1224 key->basic.ip_proto == IPPROTO_UDP || 1225 key->basic.ip_proto == IPPROTO_SCTP) { 1226 ret = fl_set_key_port_range(tb, key, mask); 1227 if (ret) 1228 return ret; 1229 } 1230 1231 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 1232 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 1233 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1234 mask->enc_control.addr_type = ~0; 1235 fl_set_key_val(tb, &key->enc_ipv4.src, 1236 TCA_FLOWER_KEY_ENC_IPV4_SRC, 1237 &mask->enc_ipv4.src, 1238 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 1239 sizeof(key->enc_ipv4.src)); 1240 fl_set_key_val(tb, &key->enc_ipv4.dst, 1241 TCA_FLOWER_KEY_ENC_IPV4_DST, 1242 &mask->enc_ipv4.dst, 1243 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 1244 sizeof(key->enc_ipv4.dst)); 1245 } 1246 1247 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 1248 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 1249 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1250 mask->enc_control.addr_type = ~0; 1251 fl_set_key_val(tb, &key->enc_ipv6.src, 1252 TCA_FLOWER_KEY_ENC_IPV6_SRC, 1253 &mask->enc_ipv6.src, 1254 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 1255 sizeof(key->enc_ipv6.src)); 1256 fl_set_key_val(tb, &key->enc_ipv6.dst, 1257 TCA_FLOWER_KEY_ENC_IPV6_DST, 1258 &mask->enc_ipv6.dst, 1259 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 1260 sizeof(key->enc_ipv6.dst)); 1261 } 1262 1263 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 1264 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 1265 sizeof(key->enc_key_id.keyid)); 1266 1267 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 1268 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 1269 sizeof(key->enc_tp.src)); 1270 1271 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 1272 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 1273 sizeof(key->enc_tp.dst)); 1274 1275 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 1276 1277 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 1278 ret = fl_set_enc_opt(tb, key, mask, extack); 1279 if (ret) 1280 return ret; 1281 } 1282 1283 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 1284 if (ret) 1285 return ret; 1286 1287 if (tb[TCA_FLOWER_KEY_FLAGS]) 1288 ret = fl_set_key_flags(tb, &key->control.flags, &mask->control.flags); 1289 1290 return ret; 1291 } 1292 1293 static void fl_mask_copy(struct fl_flow_mask *dst, 1294 struct fl_flow_mask *src) 1295 { 1296 const void *psrc = fl_key_get_start(&src->key, src); 1297 void *pdst = fl_key_get_start(&dst->key, src); 1298 1299 memcpy(pdst, psrc, fl_mask_range(src)); 1300 dst->range = src->range; 1301 } 1302 1303 static const struct rhashtable_params fl_ht_params = { 1304 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 1305 .head_offset = offsetof(struct cls_fl_filter, ht_node), 1306 .automatic_shrinking = true, 1307 }; 1308 1309 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 1310 { 1311 mask->filter_ht_params = fl_ht_params; 1312 mask->filter_ht_params.key_len = fl_mask_range(mask); 1313 mask->filter_ht_params.key_offset += mask->range.start; 1314 1315 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 1316 } 1317 1318 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 1319 #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member) 1320 1321 #define FL_KEY_IS_MASKED(mask, member) \ 1322 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 1323 0, FL_KEY_MEMBER_SIZE(member)) \ 1324 1325 #define FL_KEY_SET(keys, cnt, id, member) \ 1326 do { \ 1327 keys[cnt].key_id = id; \ 1328 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 1329 cnt++; \ 1330 } while(0); 1331 1332 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 1333 do { \ 1334 if (FL_KEY_IS_MASKED(mask, member)) \ 1335 FL_KEY_SET(keys, cnt, id, member); \ 1336 } while(0); 1337 1338 static void fl_init_dissector(struct flow_dissector *dissector, 1339 struct fl_flow_key *mask) 1340 { 1341 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 1342 size_t cnt = 0; 1343 1344 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1345 FLOW_DISSECTOR_KEY_META, meta); 1346 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 1347 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 1348 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1349 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 1350 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1351 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 1352 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1353 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 1354 if (FL_KEY_IS_MASKED(mask, tp) || 1355 FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max)) 1356 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp); 1357 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1358 FLOW_DISSECTOR_KEY_IP, ip); 1359 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1360 FLOW_DISSECTOR_KEY_TCP, tcp); 1361 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1362 FLOW_DISSECTOR_KEY_ICMP, icmp); 1363 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1364 FLOW_DISSECTOR_KEY_ARP, arp); 1365 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1366 FLOW_DISSECTOR_KEY_MPLS, mpls); 1367 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1368 FLOW_DISSECTOR_KEY_VLAN, vlan); 1369 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1370 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 1371 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1372 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 1373 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1374 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 1375 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1376 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 1377 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 1378 FL_KEY_IS_MASKED(mask, enc_ipv6)) 1379 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 1380 enc_control); 1381 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1382 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 1383 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1384 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 1385 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1386 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 1387 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1388 FLOW_DISSECTOR_KEY_CT, ct); 1389 1390 skb_flow_dissector_init(dissector, keys, cnt); 1391 } 1392 1393 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 1394 struct fl_flow_mask *mask) 1395 { 1396 struct fl_flow_mask *newmask; 1397 int err; 1398 1399 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); 1400 if (!newmask) 1401 return ERR_PTR(-ENOMEM); 1402 1403 fl_mask_copy(newmask, mask); 1404 1405 if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) || 1406 (newmask->key.tp_min.src && newmask->key.tp_max.src)) 1407 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 1408 1409 err = fl_init_mask_hashtable(newmask); 1410 if (err) 1411 goto errout_free; 1412 1413 fl_init_dissector(&newmask->dissector, &newmask->key); 1414 1415 INIT_LIST_HEAD_RCU(&newmask->filters); 1416 1417 refcount_set(&newmask->refcnt, 1); 1418 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 1419 &newmask->ht_node, mask_ht_params); 1420 if (err) 1421 goto errout_destroy; 1422 1423 spin_lock(&head->masks_lock); 1424 list_add_tail_rcu(&newmask->list, &head->masks); 1425 spin_unlock(&head->masks_lock); 1426 1427 return newmask; 1428 1429 errout_destroy: 1430 rhashtable_destroy(&newmask->ht); 1431 errout_free: 1432 kfree(newmask); 1433 1434 return ERR_PTR(err); 1435 } 1436 1437 static int fl_check_assign_mask(struct cls_fl_head *head, 1438 struct cls_fl_filter *fnew, 1439 struct cls_fl_filter *fold, 1440 struct fl_flow_mask *mask) 1441 { 1442 struct fl_flow_mask *newmask; 1443 int ret = 0; 1444 1445 rcu_read_lock(); 1446 1447 /* Insert mask as temporary node to prevent concurrent creation of mask 1448 * with same key. Any concurrent lookups with same key will return 1449 * -EAGAIN because mask's refcnt is zero. 1450 */ 1451 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 1452 &mask->ht_node, 1453 mask_ht_params); 1454 if (!fnew->mask) { 1455 rcu_read_unlock(); 1456 1457 if (fold) { 1458 ret = -EINVAL; 1459 goto errout_cleanup; 1460 } 1461 1462 newmask = fl_create_new_mask(head, mask); 1463 if (IS_ERR(newmask)) { 1464 ret = PTR_ERR(newmask); 1465 goto errout_cleanup; 1466 } 1467 1468 fnew->mask = newmask; 1469 return 0; 1470 } else if (IS_ERR(fnew->mask)) { 1471 ret = PTR_ERR(fnew->mask); 1472 } else if (fold && fold->mask != fnew->mask) { 1473 ret = -EINVAL; 1474 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 1475 /* Mask was deleted concurrently, try again */ 1476 ret = -EAGAIN; 1477 } 1478 rcu_read_unlock(); 1479 return ret; 1480 1481 errout_cleanup: 1482 rhashtable_remove_fast(&head->ht, &mask->ht_node, 1483 mask_ht_params); 1484 return ret; 1485 } 1486 1487 static int fl_set_parms(struct net *net, struct tcf_proto *tp, 1488 struct cls_fl_filter *f, struct fl_flow_mask *mask, 1489 unsigned long base, struct nlattr **tb, 1490 struct nlattr *est, bool ovr, 1491 struct fl_flow_tmplt *tmplt, bool rtnl_held, 1492 struct netlink_ext_ack *extack) 1493 { 1494 int err; 1495 1496 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held, 1497 extack); 1498 if (err < 0) 1499 return err; 1500 1501 if (tb[TCA_FLOWER_CLASSID]) { 1502 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 1503 if (!rtnl_held) 1504 rtnl_lock(); 1505 tcf_bind_filter(tp, &f->res, base); 1506 if (!rtnl_held) 1507 rtnl_unlock(); 1508 } 1509 1510 err = fl_set_key(net, tb, &f->key, &mask->key, extack); 1511 if (err) 1512 return err; 1513 1514 fl_mask_update_range(mask); 1515 fl_set_masked_key(&f->mkey, &f->key, mask); 1516 1517 if (!fl_mask_fits_tmplt(tmplt, mask)) { 1518 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 1519 return -EINVAL; 1520 } 1521 1522 return 0; 1523 } 1524 1525 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 1526 struct cls_fl_filter *fold, 1527 bool *in_ht) 1528 { 1529 struct fl_flow_mask *mask = fnew->mask; 1530 int err; 1531 1532 err = rhashtable_lookup_insert_fast(&mask->ht, 1533 &fnew->ht_node, 1534 mask->filter_ht_params); 1535 if (err) { 1536 *in_ht = false; 1537 /* It is okay if filter with same key exists when 1538 * overwriting. 1539 */ 1540 return fold && err == -EEXIST ? 0 : err; 1541 } 1542 1543 *in_ht = true; 1544 return 0; 1545 } 1546 1547 static int fl_change(struct net *net, struct sk_buff *in_skb, 1548 struct tcf_proto *tp, unsigned long base, 1549 u32 handle, struct nlattr **tca, 1550 void **arg, bool ovr, bool rtnl_held, 1551 struct netlink_ext_ack *extack) 1552 { 1553 struct cls_fl_head *head = fl_head_dereference(tp); 1554 struct cls_fl_filter *fold = *arg; 1555 struct cls_fl_filter *fnew; 1556 struct fl_flow_mask *mask; 1557 struct nlattr **tb; 1558 bool in_ht; 1559 int err; 1560 1561 if (!tca[TCA_OPTIONS]) { 1562 err = -EINVAL; 1563 goto errout_fold; 1564 } 1565 1566 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 1567 if (!mask) { 1568 err = -ENOBUFS; 1569 goto errout_fold; 1570 } 1571 1572 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1573 if (!tb) { 1574 err = -ENOBUFS; 1575 goto errout_mask_alloc; 1576 } 1577 1578 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 1579 tca[TCA_OPTIONS], fl_policy, NULL); 1580 if (err < 0) 1581 goto errout_tb; 1582 1583 if (fold && handle && fold->handle != handle) { 1584 err = -EINVAL; 1585 goto errout_tb; 1586 } 1587 1588 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 1589 if (!fnew) { 1590 err = -ENOBUFS; 1591 goto errout_tb; 1592 } 1593 INIT_LIST_HEAD(&fnew->hw_list); 1594 refcount_set(&fnew->refcnt, 1); 1595 1596 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); 1597 if (err < 0) 1598 goto errout; 1599 1600 if (tb[TCA_FLOWER_FLAGS]) { 1601 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 1602 1603 if (!tc_flags_valid(fnew->flags)) { 1604 err = -EINVAL; 1605 goto errout; 1606 } 1607 } 1608 1609 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, 1610 tp->chain->tmplt_priv, rtnl_held, extack); 1611 if (err) 1612 goto errout; 1613 1614 err = fl_check_assign_mask(head, fnew, fold, mask); 1615 if (err) 1616 goto errout; 1617 1618 err = fl_ht_insert_unique(fnew, fold, &in_ht); 1619 if (err) 1620 goto errout_mask; 1621 1622 if (!tc_skip_hw(fnew->flags)) { 1623 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 1624 if (err) 1625 goto errout_ht; 1626 } 1627 1628 if (!tc_in_hw(fnew->flags)) 1629 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 1630 1631 spin_lock(&tp->lock); 1632 1633 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 1634 * proto again or create new one, if necessary. 1635 */ 1636 if (tp->deleting) { 1637 err = -EAGAIN; 1638 goto errout_hw; 1639 } 1640 1641 if (fold) { 1642 /* Fold filter was deleted concurrently. Retry lookup. */ 1643 if (fold->deleted) { 1644 err = -EAGAIN; 1645 goto errout_hw; 1646 } 1647 1648 fnew->handle = handle; 1649 1650 if (!in_ht) { 1651 struct rhashtable_params params = 1652 fnew->mask->filter_ht_params; 1653 1654 err = rhashtable_insert_fast(&fnew->mask->ht, 1655 &fnew->ht_node, 1656 params); 1657 if (err) 1658 goto errout_hw; 1659 in_ht = true; 1660 } 1661 1662 refcount_inc(&fnew->refcnt); 1663 rhashtable_remove_fast(&fold->mask->ht, 1664 &fold->ht_node, 1665 fold->mask->filter_ht_params); 1666 idr_replace(&head->handle_idr, fnew, fnew->handle); 1667 list_replace_rcu(&fold->list, &fnew->list); 1668 fold->deleted = true; 1669 1670 spin_unlock(&tp->lock); 1671 1672 fl_mask_put(head, fold->mask); 1673 if (!tc_skip_hw(fold->flags)) 1674 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 1675 tcf_unbind_filter(tp, &fold->res); 1676 /* Caller holds reference to fold, so refcnt is always > 0 1677 * after this. 1678 */ 1679 refcount_dec(&fold->refcnt); 1680 __fl_put(fold); 1681 } else { 1682 if (handle) { 1683 /* user specifies a handle and it doesn't exist */ 1684 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 1685 handle, GFP_ATOMIC); 1686 1687 /* Filter with specified handle was concurrently 1688 * inserted after initial check in cls_api. This is not 1689 * necessarily an error if NLM_F_EXCL is not set in 1690 * message flags. Returning EAGAIN will cause cls_api to 1691 * try to update concurrently inserted rule. 1692 */ 1693 if (err == -ENOSPC) 1694 err = -EAGAIN; 1695 } else { 1696 handle = 1; 1697 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 1698 INT_MAX, GFP_ATOMIC); 1699 } 1700 if (err) 1701 goto errout_hw; 1702 1703 refcount_inc(&fnew->refcnt); 1704 fnew->handle = handle; 1705 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 1706 spin_unlock(&tp->lock); 1707 } 1708 1709 *arg = fnew; 1710 1711 kfree(tb); 1712 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 1713 return 0; 1714 1715 errout_ht: 1716 spin_lock(&tp->lock); 1717 errout_hw: 1718 fnew->deleted = true; 1719 spin_unlock(&tp->lock); 1720 if (!tc_skip_hw(fnew->flags)) 1721 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 1722 if (in_ht) 1723 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 1724 fnew->mask->filter_ht_params); 1725 errout_mask: 1726 fl_mask_put(head, fnew->mask); 1727 errout: 1728 __fl_put(fnew); 1729 errout_tb: 1730 kfree(tb); 1731 errout_mask_alloc: 1732 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 1733 errout_fold: 1734 if (fold) 1735 __fl_put(fold); 1736 return err; 1737 } 1738 1739 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 1740 bool rtnl_held, struct netlink_ext_ack *extack) 1741 { 1742 struct cls_fl_head *head = fl_head_dereference(tp); 1743 struct cls_fl_filter *f = arg; 1744 bool last_on_mask; 1745 int err = 0; 1746 1747 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 1748 *last = list_empty(&head->masks); 1749 __fl_put(f); 1750 1751 return err; 1752 } 1753 1754 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 1755 bool rtnl_held) 1756 { 1757 struct cls_fl_head *head = fl_head_dereference(tp); 1758 unsigned long id = arg->cookie, tmp; 1759 struct cls_fl_filter *f; 1760 1761 arg->count = arg->skip; 1762 1763 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 1764 /* don't return filters that are being deleted */ 1765 if (!refcount_inc_not_zero(&f->refcnt)) 1766 continue; 1767 if (arg->fn(tp, f, arg) < 0) { 1768 __fl_put(f); 1769 arg->stop = 1; 1770 break; 1771 } 1772 __fl_put(f); 1773 arg->count++; 1774 } 1775 arg->cookie = id; 1776 } 1777 1778 static struct cls_fl_filter * 1779 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 1780 { 1781 struct cls_fl_head *head = fl_head_dereference(tp); 1782 1783 spin_lock(&tp->lock); 1784 if (list_empty(&head->hw_filters)) { 1785 spin_unlock(&tp->lock); 1786 return NULL; 1787 } 1788 1789 if (!f) 1790 f = list_entry(&head->hw_filters, struct cls_fl_filter, 1791 hw_list); 1792 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 1793 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 1794 spin_unlock(&tp->lock); 1795 return f; 1796 } 1797 } 1798 1799 spin_unlock(&tp->lock); 1800 return NULL; 1801 } 1802 1803 static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, 1804 void *cb_priv, struct netlink_ext_ack *extack) 1805 { 1806 struct tcf_block *block = tp->chain->block; 1807 struct flow_cls_offload cls_flower = {}; 1808 struct cls_fl_filter *f = NULL; 1809 int err; 1810 1811 /* hw_filters list can only be changed by hw offload functions after 1812 * obtaining rtnl lock. Make sure it is not changed while reoffload is 1813 * iterating it. 1814 */ 1815 ASSERT_RTNL(); 1816 1817 while ((f = fl_get_next_hw_filter(tp, f, add))) { 1818 cls_flower.rule = 1819 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 1820 if (!cls_flower.rule) { 1821 __fl_put(f); 1822 return -ENOMEM; 1823 } 1824 1825 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 1826 extack); 1827 cls_flower.command = add ? 1828 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 1829 cls_flower.cookie = (unsigned long)f; 1830 cls_flower.rule->match.dissector = &f->mask->dissector; 1831 cls_flower.rule->match.mask = &f->mask->key; 1832 cls_flower.rule->match.key = &f->mkey; 1833 1834 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 1835 if (err) { 1836 kfree(cls_flower.rule); 1837 if (tc_skip_sw(f->flags)) { 1838 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 1839 __fl_put(f); 1840 return err; 1841 } 1842 goto next_flow; 1843 } 1844 1845 cls_flower.classid = f->res.classid; 1846 1847 err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv); 1848 kfree(cls_flower.rule); 1849 1850 if (err) { 1851 if (add && tc_skip_sw(f->flags)) { 1852 __fl_put(f); 1853 return err; 1854 } 1855 goto next_flow; 1856 } 1857 1858 spin_lock(&tp->lock); 1859 tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags, 1860 add); 1861 spin_unlock(&tp->lock); 1862 next_flow: 1863 __fl_put(f); 1864 } 1865 1866 return 0; 1867 } 1868 1869 static int fl_hw_create_tmplt(struct tcf_chain *chain, 1870 struct fl_flow_tmplt *tmplt) 1871 { 1872 struct flow_cls_offload cls_flower = {}; 1873 struct tcf_block *block = chain->block; 1874 1875 cls_flower.rule = flow_rule_alloc(0); 1876 if (!cls_flower.rule) 1877 return -ENOMEM; 1878 1879 cls_flower.common.chain_index = chain->index; 1880 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 1881 cls_flower.cookie = (unsigned long) tmplt; 1882 cls_flower.rule->match.dissector = &tmplt->dissector; 1883 cls_flower.rule->match.mask = &tmplt->mask; 1884 cls_flower.rule->match.key = &tmplt->dummy_key; 1885 1886 /* We don't care if driver (any of them) fails to handle this 1887 * call. It serves just as a hint for it. 1888 */ 1889 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false); 1890 kfree(cls_flower.rule); 1891 1892 return 0; 1893 } 1894 1895 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 1896 struct fl_flow_tmplt *tmplt) 1897 { 1898 struct flow_cls_offload cls_flower = {}; 1899 struct tcf_block *block = chain->block; 1900 1901 cls_flower.common.chain_index = chain->index; 1902 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 1903 cls_flower.cookie = (unsigned long) tmplt; 1904 1905 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false); 1906 } 1907 1908 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 1909 struct nlattr **tca, 1910 struct netlink_ext_ack *extack) 1911 { 1912 struct fl_flow_tmplt *tmplt; 1913 struct nlattr **tb; 1914 int err; 1915 1916 if (!tca[TCA_OPTIONS]) 1917 return ERR_PTR(-EINVAL); 1918 1919 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1920 if (!tb) 1921 return ERR_PTR(-ENOBUFS); 1922 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 1923 tca[TCA_OPTIONS], fl_policy, NULL); 1924 if (err) 1925 goto errout_tb; 1926 1927 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); 1928 if (!tmplt) { 1929 err = -ENOMEM; 1930 goto errout_tb; 1931 } 1932 tmplt->chain = chain; 1933 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); 1934 if (err) 1935 goto errout_tmplt; 1936 1937 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 1938 1939 err = fl_hw_create_tmplt(chain, tmplt); 1940 if (err) 1941 goto errout_tmplt; 1942 1943 kfree(tb); 1944 return tmplt; 1945 1946 errout_tmplt: 1947 kfree(tmplt); 1948 errout_tb: 1949 kfree(tb); 1950 return ERR_PTR(err); 1951 } 1952 1953 static void fl_tmplt_destroy(void *tmplt_priv) 1954 { 1955 struct fl_flow_tmplt *tmplt = tmplt_priv; 1956 1957 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 1958 kfree(tmplt); 1959 } 1960 1961 static int fl_dump_key_val(struct sk_buff *skb, 1962 void *val, int val_type, 1963 void *mask, int mask_type, int len) 1964 { 1965 int err; 1966 1967 if (!memchr_inv(mask, 0, len)) 1968 return 0; 1969 err = nla_put(skb, val_type, len, val); 1970 if (err) 1971 return err; 1972 if (mask_type != TCA_FLOWER_UNSPEC) { 1973 err = nla_put(skb, mask_type, len, mask); 1974 if (err) 1975 return err; 1976 } 1977 return 0; 1978 } 1979 1980 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 1981 struct fl_flow_key *mask) 1982 { 1983 if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN, 1984 &mask->tp_min.dst, TCA_FLOWER_UNSPEC, 1985 sizeof(key->tp_min.dst)) || 1986 fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX, 1987 &mask->tp_max.dst, TCA_FLOWER_UNSPEC, 1988 sizeof(key->tp_max.dst)) || 1989 fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN, 1990 &mask->tp_min.src, TCA_FLOWER_UNSPEC, 1991 sizeof(key->tp_min.src)) || 1992 fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX, 1993 &mask->tp_max.src, TCA_FLOWER_UNSPEC, 1994 sizeof(key->tp_max.src))) 1995 return -1; 1996 1997 return 0; 1998 } 1999 2000 static int fl_dump_key_mpls(struct sk_buff *skb, 2001 struct flow_dissector_key_mpls *mpls_key, 2002 struct flow_dissector_key_mpls *mpls_mask) 2003 { 2004 int err; 2005 2006 if (!memchr_inv(mpls_mask, 0, sizeof(*mpls_mask))) 2007 return 0; 2008 if (mpls_mask->mpls_ttl) { 2009 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 2010 mpls_key->mpls_ttl); 2011 if (err) 2012 return err; 2013 } 2014 if (mpls_mask->mpls_tc) { 2015 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 2016 mpls_key->mpls_tc); 2017 if (err) 2018 return err; 2019 } 2020 if (mpls_mask->mpls_label) { 2021 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 2022 mpls_key->mpls_label); 2023 if (err) 2024 return err; 2025 } 2026 if (mpls_mask->mpls_bos) { 2027 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 2028 mpls_key->mpls_bos); 2029 if (err) 2030 return err; 2031 } 2032 return 0; 2033 } 2034 2035 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 2036 struct flow_dissector_key_ip *key, 2037 struct flow_dissector_key_ip *mask) 2038 { 2039 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 2040 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 2041 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 2042 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 2043 2044 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 2045 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 2046 return -1; 2047 2048 return 0; 2049 } 2050 2051 static int fl_dump_key_vlan(struct sk_buff *skb, 2052 int vlan_id_key, int vlan_prio_key, 2053 struct flow_dissector_key_vlan *vlan_key, 2054 struct flow_dissector_key_vlan *vlan_mask) 2055 { 2056 int err; 2057 2058 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 2059 return 0; 2060 if (vlan_mask->vlan_id) { 2061 err = nla_put_u16(skb, vlan_id_key, 2062 vlan_key->vlan_id); 2063 if (err) 2064 return err; 2065 } 2066 if (vlan_mask->vlan_priority) { 2067 err = nla_put_u8(skb, vlan_prio_key, 2068 vlan_key->vlan_priority); 2069 if (err) 2070 return err; 2071 } 2072 return 0; 2073 } 2074 2075 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 2076 u32 *flower_key, u32 *flower_mask, 2077 u32 flower_flag_bit, u32 dissector_flag_bit) 2078 { 2079 if (dissector_mask & dissector_flag_bit) { 2080 *flower_mask |= flower_flag_bit; 2081 if (dissector_key & dissector_flag_bit) 2082 *flower_key |= flower_flag_bit; 2083 } 2084 } 2085 2086 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 2087 { 2088 u32 key, mask; 2089 __be32 _key, _mask; 2090 int err; 2091 2092 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 2093 return 0; 2094 2095 key = 0; 2096 mask = 0; 2097 2098 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2099 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 2100 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2101 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 2102 FLOW_DIS_FIRST_FRAG); 2103 2104 _key = cpu_to_be32(key); 2105 _mask = cpu_to_be32(mask); 2106 2107 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 2108 if (err) 2109 return err; 2110 2111 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 2112 } 2113 2114 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 2115 struct flow_dissector_key_enc_opts *enc_opts) 2116 { 2117 struct geneve_opt *opt; 2118 struct nlattr *nest; 2119 int opt_off = 0; 2120 2121 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 2122 if (!nest) 2123 goto nla_put_failure; 2124 2125 while (enc_opts->len > opt_off) { 2126 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 2127 2128 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 2129 opt->opt_class)) 2130 goto nla_put_failure; 2131 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 2132 opt->type)) 2133 goto nla_put_failure; 2134 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 2135 opt->length * 4, opt->opt_data)) 2136 goto nla_put_failure; 2137 2138 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 2139 } 2140 nla_nest_end(skb, nest); 2141 return 0; 2142 2143 nla_put_failure: 2144 nla_nest_cancel(skb, nest); 2145 return -EMSGSIZE; 2146 } 2147 2148 static int fl_dump_key_ct(struct sk_buff *skb, 2149 struct flow_dissector_key_ct *key, 2150 struct flow_dissector_key_ct *mask) 2151 { 2152 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 2153 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 2154 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 2155 sizeof(key->ct_state))) 2156 goto nla_put_failure; 2157 2158 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 2159 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 2160 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 2161 sizeof(key->ct_zone))) 2162 goto nla_put_failure; 2163 2164 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 2165 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 2166 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 2167 sizeof(key->ct_mark))) 2168 goto nla_put_failure; 2169 2170 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 2171 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 2172 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 2173 sizeof(key->ct_labels))) 2174 goto nla_put_failure; 2175 2176 return 0; 2177 2178 nla_put_failure: 2179 return -EMSGSIZE; 2180 } 2181 2182 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 2183 struct flow_dissector_key_enc_opts *enc_opts) 2184 { 2185 struct nlattr *nest; 2186 int err; 2187 2188 if (!enc_opts->len) 2189 return 0; 2190 2191 nest = nla_nest_start_noflag(skb, enc_opt_type); 2192 if (!nest) 2193 goto nla_put_failure; 2194 2195 switch (enc_opts->dst_opt_type) { 2196 case TUNNEL_GENEVE_OPT: 2197 err = fl_dump_key_geneve_opt(skb, enc_opts); 2198 if (err) 2199 goto nla_put_failure; 2200 break; 2201 default: 2202 goto nla_put_failure; 2203 } 2204 nla_nest_end(skb, nest); 2205 return 0; 2206 2207 nla_put_failure: 2208 nla_nest_cancel(skb, nest); 2209 return -EMSGSIZE; 2210 } 2211 2212 static int fl_dump_key_enc_opt(struct sk_buff *skb, 2213 struct flow_dissector_key_enc_opts *key_opts, 2214 struct flow_dissector_key_enc_opts *msk_opts) 2215 { 2216 int err; 2217 2218 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 2219 if (err) 2220 return err; 2221 2222 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 2223 } 2224 2225 static int fl_dump_key(struct sk_buff *skb, struct net *net, 2226 struct fl_flow_key *key, struct fl_flow_key *mask) 2227 { 2228 if (mask->meta.ingress_ifindex) { 2229 struct net_device *dev; 2230 2231 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 2232 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 2233 goto nla_put_failure; 2234 } 2235 2236 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 2237 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 2238 sizeof(key->eth.dst)) || 2239 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 2240 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 2241 sizeof(key->eth.src)) || 2242 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 2243 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 2244 sizeof(key->basic.n_proto))) 2245 goto nla_put_failure; 2246 2247 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 2248 goto nla_put_failure; 2249 2250 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 2251 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 2252 goto nla_put_failure; 2253 2254 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 2255 TCA_FLOWER_KEY_CVLAN_PRIO, 2256 &key->cvlan, &mask->cvlan) || 2257 (mask->cvlan.vlan_tpid && 2258 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2259 key->cvlan.vlan_tpid))) 2260 goto nla_put_failure; 2261 2262 if (mask->basic.n_proto) { 2263 if (mask->cvlan.vlan_tpid) { 2264 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 2265 key->basic.n_proto)) 2266 goto nla_put_failure; 2267 } else if (mask->vlan.vlan_tpid) { 2268 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2269 key->basic.n_proto)) 2270 goto nla_put_failure; 2271 } 2272 } 2273 2274 if ((key->basic.n_proto == htons(ETH_P_IP) || 2275 key->basic.n_proto == htons(ETH_P_IPV6)) && 2276 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 2277 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 2278 sizeof(key->basic.ip_proto)) || 2279 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 2280 goto nla_put_failure; 2281 2282 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2283 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 2284 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 2285 sizeof(key->ipv4.src)) || 2286 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 2287 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 2288 sizeof(key->ipv4.dst)))) 2289 goto nla_put_failure; 2290 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2291 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 2292 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 2293 sizeof(key->ipv6.src)) || 2294 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 2295 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 2296 sizeof(key->ipv6.dst)))) 2297 goto nla_put_failure; 2298 2299 if (key->basic.ip_proto == IPPROTO_TCP && 2300 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 2301 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 2302 sizeof(key->tp.src)) || 2303 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 2304 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 2305 sizeof(key->tp.dst)) || 2306 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 2307 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 2308 sizeof(key->tcp.flags)))) 2309 goto nla_put_failure; 2310 else if (key->basic.ip_proto == IPPROTO_UDP && 2311 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 2312 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 2313 sizeof(key->tp.src)) || 2314 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 2315 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 2316 sizeof(key->tp.dst)))) 2317 goto nla_put_failure; 2318 else if (key->basic.ip_proto == IPPROTO_SCTP && 2319 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 2320 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 2321 sizeof(key->tp.src)) || 2322 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 2323 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 2324 sizeof(key->tp.dst)))) 2325 goto nla_put_failure; 2326 else if (key->basic.n_proto == htons(ETH_P_IP) && 2327 key->basic.ip_proto == IPPROTO_ICMP && 2328 (fl_dump_key_val(skb, &key->icmp.type, 2329 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 2330 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 2331 sizeof(key->icmp.type)) || 2332 fl_dump_key_val(skb, &key->icmp.code, 2333 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 2334 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 2335 sizeof(key->icmp.code)))) 2336 goto nla_put_failure; 2337 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 2338 key->basic.ip_proto == IPPROTO_ICMPV6 && 2339 (fl_dump_key_val(skb, &key->icmp.type, 2340 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 2341 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 2342 sizeof(key->icmp.type)) || 2343 fl_dump_key_val(skb, &key->icmp.code, 2344 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 2345 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 2346 sizeof(key->icmp.code)))) 2347 goto nla_put_failure; 2348 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 2349 key->basic.n_proto == htons(ETH_P_RARP)) && 2350 (fl_dump_key_val(skb, &key->arp.sip, 2351 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 2352 TCA_FLOWER_KEY_ARP_SIP_MASK, 2353 sizeof(key->arp.sip)) || 2354 fl_dump_key_val(skb, &key->arp.tip, 2355 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 2356 TCA_FLOWER_KEY_ARP_TIP_MASK, 2357 sizeof(key->arp.tip)) || 2358 fl_dump_key_val(skb, &key->arp.op, 2359 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 2360 TCA_FLOWER_KEY_ARP_OP_MASK, 2361 sizeof(key->arp.op)) || 2362 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 2363 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 2364 sizeof(key->arp.sha)) || 2365 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 2366 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 2367 sizeof(key->arp.tha)))) 2368 goto nla_put_failure; 2369 2370 if ((key->basic.ip_proto == IPPROTO_TCP || 2371 key->basic.ip_proto == IPPROTO_UDP || 2372 key->basic.ip_proto == IPPROTO_SCTP) && 2373 fl_dump_key_port_range(skb, key, mask)) 2374 goto nla_put_failure; 2375 2376 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2377 (fl_dump_key_val(skb, &key->enc_ipv4.src, 2378 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 2379 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 2380 sizeof(key->enc_ipv4.src)) || 2381 fl_dump_key_val(skb, &key->enc_ipv4.dst, 2382 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 2383 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 2384 sizeof(key->enc_ipv4.dst)))) 2385 goto nla_put_failure; 2386 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2387 (fl_dump_key_val(skb, &key->enc_ipv6.src, 2388 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 2389 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 2390 sizeof(key->enc_ipv6.src)) || 2391 fl_dump_key_val(skb, &key->enc_ipv6.dst, 2392 TCA_FLOWER_KEY_ENC_IPV6_DST, 2393 &mask->enc_ipv6.dst, 2394 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 2395 sizeof(key->enc_ipv6.dst)))) 2396 goto nla_put_failure; 2397 2398 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 2399 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 2400 sizeof(key->enc_key_id)) || 2401 fl_dump_key_val(skb, &key->enc_tp.src, 2402 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 2403 &mask->enc_tp.src, 2404 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 2405 sizeof(key->enc_tp.src)) || 2406 fl_dump_key_val(skb, &key->enc_tp.dst, 2407 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 2408 &mask->enc_tp.dst, 2409 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 2410 sizeof(key->enc_tp.dst)) || 2411 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 2412 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 2413 goto nla_put_failure; 2414 2415 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 2416 goto nla_put_failure; 2417 2418 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 2419 goto nla_put_failure; 2420 2421 return 0; 2422 2423 nla_put_failure: 2424 return -EMSGSIZE; 2425 } 2426 2427 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 2428 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 2429 { 2430 struct cls_fl_filter *f = fh; 2431 struct nlattr *nest; 2432 struct fl_flow_key *key, *mask; 2433 bool skip_hw; 2434 2435 if (!f) 2436 return skb->len; 2437 2438 t->tcm_handle = f->handle; 2439 2440 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 2441 if (!nest) 2442 goto nla_put_failure; 2443 2444 spin_lock(&tp->lock); 2445 2446 if (f->res.classid && 2447 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 2448 goto nla_put_failure_locked; 2449 2450 key = &f->key; 2451 mask = &f->mask->key; 2452 skip_hw = tc_skip_hw(f->flags); 2453 2454 if (fl_dump_key(skb, net, key, mask)) 2455 goto nla_put_failure_locked; 2456 2457 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 2458 goto nla_put_failure_locked; 2459 2460 spin_unlock(&tp->lock); 2461 2462 if (!skip_hw) 2463 fl_hw_update_stats(tp, f, rtnl_held); 2464 2465 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 2466 goto nla_put_failure; 2467 2468 if (tcf_exts_dump(skb, &f->exts)) 2469 goto nla_put_failure; 2470 2471 nla_nest_end(skb, nest); 2472 2473 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 2474 goto nla_put_failure; 2475 2476 return skb->len; 2477 2478 nla_put_failure_locked: 2479 spin_unlock(&tp->lock); 2480 nla_put_failure: 2481 nla_nest_cancel(skb, nest); 2482 return -1; 2483 } 2484 2485 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 2486 { 2487 struct fl_flow_tmplt *tmplt = tmplt_priv; 2488 struct fl_flow_key *key, *mask; 2489 struct nlattr *nest; 2490 2491 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 2492 if (!nest) 2493 goto nla_put_failure; 2494 2495 key = &tmplt->dummy_key; 2496 mask = &tmplt->mask; 2497 2498 if (fl_dump_key(skb, net, key, mask)) 2499 goto nla_put_failure; 2500 2501 nla_nest_end(skb, nest); 2502 2503 return skb->len; 2504 2505 nla_put_failure: 2506 nla_nest_cancel(skb, nest); 2507 return -EMSGSIZE; 2508 } 2509 2510 static void fl_bind_class(void *fh, u32 classid, unsigned long cl) 2511 { 2512 struct cls_fl_filter *f = fh; 2513 2514 if (f && f->res.classid == classid) 2515 f->res.class = cl; 2516 } 2517 2518 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 2519 .kind = "flower", 2520 .classify = fl_classify, 2521 .init = fl_init, 2522 .destroy = fl_destroy, 2523 .get = fl_get, 2524 .put = fl_put, 2525 .change = fl_change, 2526 .delete = fl_delete, 2527 .walk = fl_walk, 2528 .reoffload = fl_reoffload, 2529 .dump = fl_dump, 2530 .bind_class = fl_bind_class, 2531 .tmplt_create = fl_tmplt_create, 2532 .tmplt_destroy = fl_tmplt_destroy, 2533 .tmplt_dump = fl_tmplt_dump, 2534 .owner = THIS_MODULE, 2535 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 2536 }; 2537 2538 static int __init cls_fl_init(void) 2539 { 2540 return register_tcf_proto_ops(&cls_fl_ops); 2541 } 2542 2543 static void __exit cls_fl_exit(void) 2544 { 2545 unregister_tcf_proto_ops(&cls_fl_ops); 2546 } 2547 2548 module_init(cls_fl_init); 2549 module_exit(cls_fl_exit); 2550 2551 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 2552 MODULE_DESCRIPTION("Flower classifier"); 2553 MODULE_LICENSE("GPL v2"); 2554