1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_flower.c Flower classifier 4 * 5 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us> 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/rhashtable.h> 12 #include <linux/workqueue.h> 13 #include <linux/refcount.h> 14 15 #include <linux/if_ether.h> 16 #include <linux/in6.h> 17 #include <linux/ip.h> 18 #include <linux/mpls.h> 19 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/ip.h> 23 #include <net/flow_dissector.h> 24 #include <net/geneve.h> 25 #include <net/vxlan.h> 26 #include <net/erspan.h> 27 28 #include <net/dst.h> 29 #include <net/dst_metadata.h> 30 31 #include <uapi/linux/netfilter/nf_conntrack_common.h> 32 33 struct fl_flow_key { 34 struct flow_dissector_key_meta meta; 35 struct flow_dissector_key_control control; 36 struct flow_dissector_key_control enc_control; 37 struct flow_dissector_key_basic basic; 38 struct flow_dissector_key_eth_addrs eth; 39 struct flow_dissector_key_vlan vlan; 40 struct flow_dissector_key_vlan cvlan; 41 union { 42 struct flow_dissector_key_ipv4_addrs ipv4; 43 struct flow_dissector_key_ipv6_addrs ipv6; 44 }; 45 struct flow_dissector_key_ports tp; 46 struct flow_dissector_key_icmp icmp; 47 struct flow_dissector_key_arp arp; 48 struct flow_dissector_key_keyid enc_key_id; 49 union { 50 struct flow_dissector_key_ipv4_addrs enc_ipv4; 51 struct flow_dissector_key_ipv6_addrs enc_ipv6; 52 }; 53 struct flow_dissector_key_ports enc_tp; 54 struct flow_dissector_key_mpls mpls; 55 struct flow_dissector_key_tcp tcp; 56 struct flow_dissector_key_ip ip; 57 struct flow_dissector_key_ip enc_ip; 58 struct flow_dissector_key_enc_opts enc_opts; 59 union { 60 struct flow_dissector_key_ports tp; 61 struct { 62 struct flow_dissector_key_ports tp_min; 63 struct flow_dissector_key_ports tp_max; 64 }; 65 } tp_range; 66 struct flow_dissector_key_ct ct; 67 struct flow_dissector_key_hash hash; 68 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ 69 70 struct fl_flow_mask_range { 71 unsigned short int start; 72 unsigned short int end; 73 }; 74 75 struct fl_flow_mask { 76 struct fl_flow_key key; 77 struct fl_flow_mask_range range; 78 u32 flags; 79 struct rhash_head ht_node; 80 struct rhashtable ht; 81 struct rhashtable_params filter_ht_params; 82 struct flow_dissector dissector; 83 struct list_head filters; 84 struct rcu_work rwork; 85 struct list_head list; 86 refcount_t refcnt; 87 }; 88 89 struct fl_flow_tmplt { 90 struct fl_flow_key dummy_key; 91 struct fl_flow_key mask; 92 struct flow_dissector dissector; 93 struct tcf_chain *chain; 94 }; 95 96 struct cls_fl_head { 97 struct rhashtable ht; 98 spinlock_t masks_lock; /* Protect masks list */ 99 struct list_head masks; 100 struct list_head hw_filters; 101 struct rcu_work rwork; 102 struct idr handle_idr; 103 }; 104 105 struct cls_fl_filter { 106 struct fl_flow_mask *mask; 107 struct rhash_head ht_node; 108 struct fl_flow_key mkey; 109 struct tcf_exts exts; 110 struct tcf_result res; 111 struct fl_flow_key key; 112 struct list_head list; 113 struct list_head hw_list; 114 u32 handle; 115 u32 flags; 116 u32 in_hw_count; 117 struct rcu_work rwork; 118 struct net_device *hw_dev; 119 /* Flower classifier is unlocked, which means that its reference counter 120 * can be changed concurrently without any kind of external 121 * synchronization. Use atomic reference counter to be concurrency-safe. 122 */ 123 refcount_t refcnt; 124 bool deleted; 125 }; 126 127 static const struct rhashtable_params mask_ht_params = { 128 .key_offset = offsetof(struct fl_flow_mask, key), 129 .key_len = sizeof(struct fl_flow_key), 130 .head_offset = offsetof(struct fl_flow_mask, ht_node), 131 .automatic_shrinking = true, 132 }; 133 134 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) 135 { 136 return mask->range.end - mask->range.start; 137 } 138 139 static void fl_mask_update_range(struct fl_flow_mask *mask) 140 { 141 const u8 *bytes = (const u8 *) &mask->key; 142 size_t size = sizeof(mask->key); 143 size_t i, first = 0, last; 144 145 for (i = 0; i < size; i++) { 146 if (bytes[i]) { 147 first = i; 148 break; 149 } 150 } 151 last = first; 152 for (i = size - 1; i != first; i--) { 153 if (bytes[i]) { 154 last = i; 155 break; 156 } 157 } 158 mask->range.start = rounddown(first, sizeof(long)); 159 mask->range.end = roundup(last + 1, sizeof(long)); 160 } 161 162 static void *fl_key_get_start(struct fl_flow_key *key, 163 const struct fl_flow_mask *mask) 164 { 165 return (u8 *) key + mask->range.start; 166 } 167 168 static void fl_set_masked_key(struct fl_flow_key *mkey, struct fl_flow_key *key, 169 struct fl_flow_mask *mask) 170 { 171 const long *lkey = fl_key_get_start(key, mask); 172 const long *lmask = fl_key_get_start(&mask->key, mask); 173 long *lmkey = fl_key_get_start(mkey, mask); 174 int i; 175 176 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) 177 *lmkey++ = *lkey++ & *lmask++; 178 } 179 180 static bool fl_mask_fits_tmplt(struct fl_flow_tmplt *tmplt, 181 struct fl_flow_mask *mask) 182 { 183 const long *lmask = fl_key_get_start(&mask->key, mask); 184 const long *ltmplt; 185 int i; 186 187 if (!tmplt) 188 return true; 189 ltmplt = fl_key_get_start(&tmplt->mask, mask); 190 for (i = 0; i < fl_mask_range(mask); i += sizeof(long)) { 191 if (~*ltmplt++ & *lmask++) 192 return false; 193 } 194 return true; 195 } 196 197 static void fl_clear_masked_range(struct fl_flow_key *key, 198 struct fl_flow_mask *mask) 199 { 200 memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask)); 201 } 202 203 static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter, 204 struct fl_flow_key *key, 205 struct fl_flow_key *mkey) 206 { 207 __be16 min_mask, max_mask, min_val, max_val; 208 209 min_mask = htons(filter->mask->key.tp_range.tp_min.dst); 210 max_mask = htons(filter->mask->key.tp_range.tp_max.dst); 211 min_val = htons(filter->key.tp_range.tp_min.dst); 212 max_val = htons(filter->key.tp_range.tp_max.dst); 213 214 if (min_mask && max_mask) { 215 if (htons(key->tp_range.tp.dst) < min_val || 216 htons(key->tp_range.tp.dst) > max_val) 217 return false; 218 219 /* skb does not have min and max values */ 220 mkey->tp_range.tp_min.dst = filter->mkey.tp_range.tp_min.dst; 221 mkey->tp_range.tp_max.dst = filter->mkey.tp_range.tp_max.dst; 222 } 223 return true; 224 } 225 226 static bool fl_range_port_src_cmp(struct cls_fl_filter *filter, 227 struct fl_flow_key *key, 228 struct fl_flow_key *mkey) 229 { 230 __be16 min_mask, max_mask, min_val, max_val; 231 232 min_mask = htons(filter->mask->key.tp_range.tp_min.src); 233 max_mask = htons(filter->mask->key.tp_range.tp_max.src); 234 min_val = htons(filter->key.tp_range.tp_min.src); 235 max_val = htons(filter->key.tp_range.tp_max.src); 236 237 if (min_mask && max_mask) { 238 if (htons(key->tp_range.tp.src) < min_val || 239 htons(key->tp_range.tp.src) > max_val) 240 return false; 241 242 /* skb does not have min and max values */ 243 mkey->tp_range.tp_min.src = filter->mkey.tp_range.tp_min.src; 244 mkey->tp_range.tp_max.src = filter->mkey.tp_range.tp_max.src; 245 } 246 return true; 247 } 248 249 static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask, 250 struct fl_flow_key *mkey) 251 { 252 return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask), 253 mask->filter_ht_params); 254 } 255 256 static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask, 257 struct fl_flow_key *mkey, 258 struct fl_flow_key *key) 259 { 260 struct cls_fl_filter *filter, *f; 261 262 list_for_each_entry_rcu(filter, &mask->filters, list) { 263 if (!fl_range_port_dst_cmp(filter, key, mkey)) 264 continue; 265 266 if (!fl_range_port_src_cmp(filter, key, mkey)) 267 continue; 268 269 f = __fl_lookup(mask, mkey); 270 if (f) 271 return f; 272 } 273 return NULL; 274 } 275 276 static noinline_for_stack 277 struct cls_fl_filter *fl_mask_lookup(struct fl_flow_mask *mask, struct fl_flow_key *key) 278 { 279 struct fl_flow_key mkey; 280 281 fl_set_masked_key(&mkey, key, mask); 282 if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE)) 283 return fl_lookup_range(mask, &mkey, key); 284 285 return __fl_lookup(mask, &mkey); 286 } 287 288 static u16 fl_ct_info_to_flower_map[] = { 289 [IP_CT_ESTABLISHED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 290 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 291 [IP_CT_RELATED] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 292 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 293 [IP_CT_ESTABLISHED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 294 TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED, 295 [IP_CT_RELATED_REPLY] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 296 TCA_FLOWER_KEY_CT_FLAGS_RELATED, 297 [IP_CT_NEW] = TCA_FLOWER_KEY_CT_FLAGS_TRACKED | 298 TCA_FLOWER_KEY_CT_FLAGS_NEW, 299 }; 300 301 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, 302 struct tcf_result *res) 303 { 304 struct cls_fl_head *head = rcu_dereference_bh(tp->root); 305 struct fl_flow_key skb_key; 306 struct fl_flow_mask *mask; 307 struct cls_fl_filter *f; 308 309 list_for_each_entry_rcu(mask, &head->masks, list) { 310 flow_dissector_init_keys(&skb_key.control, &skb_key.basic); 311 fl_clear_masked_range(&skb_key, mask); 312 313 skb_flow_dissect_meta(skb, &mask->dissector, &skb_key); 314 /* skb_flow_dissect() does not set n_proto in case an unknown 315 * protocol, so do it rather here. 316 */ 317 skb_key.basic.n_proto = skb_protocol(skb, false); 318 skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key); 319 skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, 320 fl_ct_info_to_flower_map, 321 ARRAY_SIZE(fl_ct_info_to_flower_map)); 322 skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); 323 skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); 324 325 f = fl_mask_lookup(mask, &skb_key); 326 if (f && !tc_skip_sw(f->flags)) { 327 *res = f->res; 328 return tcf_exts_exec(skb, &f->exts, res); 329 } 330 } 331 return -1; 332 } 333 334 static int fl_init(struct tcf_proto *tp) 335 { 336 struct cls_fl_head *head; 337 338 head = kzalloc(sizeof(*head), GFP_KERNEL); 339 if (!head) 340 return -ENOBUFS; 341 342 spin_lock_init(&head->masks_lock); 343 INIT_LIST_HEAD_RCU(&head->masks); 344 INIT_LIST_HEAD(&head->hw_filters); 345 rcu_assign_pointer(tp->root, head); 346 idr_init(&head->handle_idr); 347 348 return rhashtable_init(&head->ht, &mask_ht_params); 349 } 350 351 static void fl_mask_free(struct fl_flow_mask *mask, bool mask_init_done) 352 { 353 /* temporary masks don't have their filters list and ht initialized */ 354 if (mask_init_done) { 355 WARN_ON(!list_empty(&mask->filters)); 356 rhashtable_destroy(&mask->ht); 357 } 358 kfree(mask); 359 } 360 361 static void fl_mask_free_work(struct work_struct *work) 362 { 363 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 364 struct fl_flow_mask, rwork); 365 366 fl_mask_free(mask, true); 367 } 368 369 static void fl_uninit_mask_free_work(struct work_struct *work) 370 { 371 struct fl_flow_mask *mask = container_of(to_rcu_work(work), 372 struct fl_flow_mask, rwork); 373 374 fl_mask_free(mask, false); 375 } 376 377 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask) 378 { 379 if (!refcount_dec_and_test(&mask->refcnt)) 380 return false; 381 382 rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params); 383 384 spin_lock(&head->masks_lock); 385 list_del_rcu(&mask->list); 386 spin_unlock(&head->masks_lock); 387 388 tcf_queue_work(&mask->rwork, fl_mask_free_work); 389 390 return true; 391 } 392 393 static struct cls_fl_head *fl_head_dereference(struct tcf_proto *tp) 394 { 395 /* Flower classifier only changes root pointer during init and destroy. 396 * Users must obtain reference to tcf_proto instance before calling its 397 * API, so tp->root pointer is protected from concurrent call to 398 * fl_destroy() by reference counting. 399 */ 400 return rcu_dereference_raw(tp->root); 401 } 402 403 static void __fl_destroy_filter(struct cls_fl_filter *f) 404 { 405 tcf_exts_destroy(&f->exts); 406 tcf_exts_put_net(&f->exts); 407 kfree(f); 408 } 409 410 static void fl_destroy_filter_work(struct work_struct *work) 411 { 412 struct cls_fl_filter *f = container_of(to_rcu_work(work), 413 struct cls_fl_filter, rwork); 414 415 __fl_destroy_filter(f); 416 } 417 418 static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, 419 bool rtnl_held, struct netlink_ext_ack *extack) 420 { 421 struct tcf_block *block = tp->chain->block; 422 struct flow_cls_offload cls_flower = {}; 423 424 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 425 cls_flower.command = FLOW_CLS_DESTROY; 426 cls_flower.cookie = (unsigned long) f; 427 428 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, false, 429 &f->flags, &f->in_hw_count, rtnl_held); 430 431 } 432 433 static int fl_hw_replace_filter(struct tcf_proto *tp, 434 struct cls_fl_filter *f, bool rtnl_held, 435 struct netlink_ext_ack *extack) 436 { 437 struct tcf_block *block = tp->chain->block; 438 struct flow_cls_offload cls_flower = {}; 439 bool skip_sw = tc_skip_sw(f->flags); 440 int err = 0; 441 442 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 443 if (!cls_flower.rule) 444 return -ENOMEM; 445 446 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, extack); 447 cls_flower.command = FLOW_CLS_REPLACE; 448 cls_flower.cookie = (unsigned long) f; 449 cls_flower.rule->match.dissector = &f->mask->dissector; 450 cls_flower.rule->match.mask = &f->mask->key; 451 cls_flower.rule->match.key = &f->mkey; 452 cls_flower.classid = f->res.classid; 453 454 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 455 if (err) { 456 kfree(cls_flower.rule); 457 if (skip_sw) { 458 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 459 return err; 460 } 461 return 0; 462 } 463 464 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, 465 skip_sw, &f->flags, &f->in_hw_count, rtnl_held); 466 tc_cleanup_flow_action(&cls_flower.rule->action); 467 kfree(cls_flower.rule); 468 469 if (err) { 470 fl_hw_destroy_filter(tp, f, rtnl_held, NULL); 471 return err; 472 } 473 474 if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) 475 return -EINVAL; 476 477 return 0; 478 } 479 480 static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, 481 bool rtnl_held) 482 { 483 struct tcf_block *block = tp->chain->block; 484 struct flow_cls_offload cls_flower = {}; 485 486 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, NULL); 487 cls_flower.command = FLOW_CLS_STATS; 488 cls_flower.cookie = (unsigned long) f; 489 cls_flower.classid = f->res.classid; 490 491 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, 492 rtnl_held); 493 494 tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, 495 cls_flower.stats.pkts, 496 cls_flower.stats.drops, 497 cls_flower.stats.lastused, 498 cls_flower.stats.used_hw_stats, 499 cls_flower.stats.used_hw_stats_valid); 500 } 501 502 static void __fl_put(struct cls_fl_filter *f) 503 { 504 if (!refcount_dec_and_test(&f->refcnt)) 505 return; 506 507 if (tcf_exts_get_net(&f->exts)) 508 tcf_queue_work(&f->rwork, fl_destroy_filter_work); 509 else 510 __fl_destroy_filter(f); 511 } 512 513 static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle) 514 { 515 struct cls_fl_filter *f; 516 517 rcu_read_lock(); 518 f = idr_find(&head->handle_idr, handle); 519 if (f && !refcount_inc_not_zero(&f->refcnt)) 520 f = NULL; 521 rcu_read_unlock(); 522 523 return f; 524 } 525 526 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, 527 bool *last, bool rtnl_held, 528 struct netlink_ext_ack *extack) 529 { 530 struct cls_fl_head *head = fl_head_dereference(tp); 531 532 *last = false; 533 534 spin_lock(&tp->lock); 535 if (f->deleted) { 536 spin_unlock(&tp->lock); 537 return -ENOENT; 538 } 539 540 f->deleted = true; 541 rhashtable_remove_fast(&f->mask->ht, &f->ht_node, 542 f->mask->filter_ht_params); 543 idr_remove(&head->handle_idr, f->handle); 544 list_del_rcu(&f->list); 545 spin_unlock(&tp->lock); 546 547 *last = fl_mask_put(head, f->mask); 548 if (!tc_skip_hw(f->flags)) 549 fl_hw_destroy_filter(tp, f, rtnl_held, extack); 550 tcf_unbind_filter(tp, &f->res); 551 __fl_put(f); 552 553 return 0; 554 } 555 556 static void fl_destroy_sleepable(struct work_struct *work) 557 { 558 struct cls_fl_head *head = container_of(to_rcu_work(work), 559 struct cls_fl_head, 560 rwork); 561 562 rhashtable_destroy(&head->ht); 563 kfree(head); 564 module_put(THIS_MODULE); 565 } 566 567 static void fl_destroy(struct tcf_proto *tp, bool rtnl_held, 568 struct netlink_ext_ack *extack) 569 { 570 struct cls_fl_head *head = fl_head_dereference(tp); 571 struct fl_flow_mask *mask, *next_mask; 572 struct cls_fl_filter *f, *next; 573 bool last; 574 575 list_for_each_entry_safe(mask, next_mask, &head->masks, list) { 576 list_for_each_entry_safe(f, next, &mask->filters, list) { 577 __fl_delete(tp, f, &last, rtnl_held, extack); 578 if (last) 579 break; 580 } 581 } 582 idr_destroy(&head->handle_idr); 583 584 __module_get(THIS_MODULE); 585 tcf_queue_work(&head->rwork, fl_destroy_sleepable); 586 } 587 588 static void fl_put(struct tcf_proto *tp, void *arg) 589 { 590 struct cls_fl_filter *f = arg; 591 592 __fl_put(f); 593 } 594 595 static void *fl_get(struct tcf_proto *tp, u32 handle) 596 { 597 struct cls_fl_head *head = fl_head_dereference(tp); 598 599 return __fl_get(head, handle); 600 } 601 602 static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { 603 [TCA_FLOWER_UNSPEC] = { .type = NLA_UNSPEC }, 604 [TCA_FLOWER_CLASSID] = { .type = NLA_U32 }, 605 [TCA_FLOWER_INDEV] = { .type = NLA_STRING, 606 .len = IFNAMSIZ }, 607 [TCA_FLOWER_KEY_ETH_DST] = { .len = ETH_ALEN }, 608 [TCA_FLOWER_KEY_ETH_DST_MASK] = { .len = ETH_ALEN }, 609 [TCA_FLOWER_KEY_ETH_SRC] = { .len = ETH_ALEN }, 610 [TCA_FLOWER_KEY_ETH_SRC_MASK] = { .len = ETH_ALEN }, 611 [TCA_FLOWER_KEY_ETH_TYPE] = { .type = NLA_U16 }, 612 [TCA_FLOWER_KEY_IP_PROTO] = { .type = NLA_U8 }, 613 [TCA_FLOWER_KEY_IPV4_SRC] = { .type = NLA_U32 }, 614 [TCA_FLOWER_KEY_IPV4_SRC_MASK] = { .type = NLA_U32 }, 615 [TCA_FLOWER_KEY_IPV4_DST] = { .type = NLA_U32 }, 616 [TCA_FLOWER_KEY_IPV4_DST_MASK] = { .type = NLA_U32 }, 617 [TCA_FLOWER_KEY_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 618 [TCA_FLOWER_KEY_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 619 [TCA_FLOWER_KEY_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 620 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 621 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 622 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 623 [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 624 [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 625 [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, 626 [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, 627 [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, 628 [TCA_FLOWER_KEY_ENC_KEY_ID] = { .type = NLA_U32 }, 629 [TCA_FLOWER_KEY_ENC_IPV4_SRC] = { .type = NLA_U32 }, 630 [TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK] = { .type = NLA_U32 }, 631 [TCA_FLOWER_KEY_ENC_IPV4_DST] = { .type = NLA_U32 }, 632 [TCA_FLOWER_KEY_ENC_IPV4_DST_MASK] = { .type = NLA_U32 }, 633 [TCA_FLOWER_KEY_ENC_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, 634 [TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK] = { .len = sizeof(struct in6_addr) }, 635 [TCA_FLOWER_KEY_ENC_IPV6_DST] = { .len = sizeof(struct in6_addr) }, 636 [TCA_FLOWER_KEY_ENC_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 637 [TCA_FLOWER_KEY_TCP_SRC_MASK] = { .type = NLA_U16 }, 638 [TCA_FLOWER_KEY_TCP_DST_MASK] = { .type = NLA_U16 }, 639 [TCA_FLOWER_KEY_UDP_SRC_MASK] = { .type = NLA_U16 }, 640 [TCA_FLOWER_KEY_UDP_DST_MASK] = { .type = NLA_U16 }, 641 [TCA_FLOWER_KEY_SCTP_SRC_MASK] = { .type = NLA_U16 }, 642 [TCA_FLOWER_KEY_SCTP_DST_MASK] = { .type = NLA_U16 }, 643 [TCA_FLOWER_KEY_SCTP_SRC] = { .type = NLA_U16 }, 644 [TCA_FLOWER_KEY_SCTP_DST] = { .type = NLA_U16 }, 645 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT] = { .type = NLA_U16 }, 646 [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, 647 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, 648 [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, 649 [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, 650 [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, 651 [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, 652 [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, 653 [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, 654 [TCA_FLOWER_KEY_ICMPV4_CODE_MASK] = { .type = NLA_U8 }, 655 [TCA_FLOWER_KEY_ICMPV6_TYPE] = { .type = NLA_U8 }, 656 [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 }, 657 [TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 }, 658 [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 }, 659 [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 }, 660 [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 }, 661 [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 }, 662 [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 }, 663 [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 }, 664 [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 }, 665 [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN }, 666 [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN }, 667 [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN }, 668 [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN }, 669 [TCA_FLOWER_KEY_MPLS_TTL] = { .type = NLA_U8 }, 670 [TCA_FLOWER_KEY_MPLS_BOS] = { .type = NLA_U8 }, 671 [TCA_FLOWER_KEY_MPLS_TC] = { .type = NLA_U8 }, 672 [TCA_FLOWER_KEY_MPLS_LABEL] = { .type = NLA_U32 }, 673 [TCA_FLOWER_KEY_MPLS_OPTS] = { .type = NLA_NESTED }, 674 [TCA_FLOWER_KEY_TCP_FLAGS] = { .type = NLA_U16 }, 675 [TCA_FLOWER_KEY_TCP_FLAGS_MASK] = { .type = NLA_U16 }, 676 [TCA_FLOWER_KEY_IP_TOS] = { .type = NLA_U8 }, 677 [TCA_FLOWER_KEY_IP_TOS_MASK] = { .type = NLA_U8 }, 678 [TCA_FLOWER_KEY_IP_TTL] = { .type = NLA_U8 }, 679 [TCA_FLOWER_KEY_IP_TTL_MASK] = { .type = NLA_U8 }, 680 [TCA_FLOWER_KEY_CVLAN_ID] = { .type = NLA_U16 }, 681 [TCA_FLOWER_KEY_CVLAN_PRIO] = { .type = NLA_U8 }, 682 [TCA_FLOWER_KEY_CVLAN_ETH_TYPE] = { .type = NLA_U16 }, 683 [TCA_FLOWER_KEY_ENC_IP_TOS] = { .type = NLA_U8 }, 684 [TCA_FLOWER_KEY_ENC_IP_TOS_MASK] = { .type = NLA_U8 }, 685 [TCA_FLOWER_KEY_ENC_IP_TTL] = { .type = NLA_U8 }, 686 [TCA_FLOWER_KEY_ENC_IP_TTL_MASK] = { .type = NLA_U8 }, 687 [TCA_FLOWER_KEY_ENC_OPTS] = { .type = NLA_NESTED }, 688 [TCA_FLOWER_KEY_ENC_OPTS_MASK] = { .type = NLA_NESTED }, 689 [TCA_FLOWER_KEY_CT_STATE] = { .type = NLA_U16 }, 690 [TCA_FLOWER_KEY_CT_STATE_MASK] = { .type = NLA_U16 }, 691 [TCA_FLOWER_KEY_CT_ZONE] = { .type = NLA_U16 }, 692 [TCA_FLOWER_KEY_CT_ZONE_MASK] = { .type = NLA_U16 }, 693 [TCA_FLOWER_KEY_CT_MARK] = { .type = NLA_U32 }, 694 [TCA_FLOWER_KEY_CT_MARK_MASK] = { .type = NLA_U32 }, 695 [TCA_FLOWER_KEY_CT_LABELS] = { .type = NLA_BINARY, 696 .len = 128 / BITS_PER_BYTE }, 697 [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, 698 .len = 128 / BITS_PER_BYTE }, 699 [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, 700 [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, 701 [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, 702 703 }; 704 705 static const struct nla_policy 706 enc_opts_policy[TCA_FLOWER_KEY_ENC_OPTS_MAX + 1] = { 707 [TCA_FLOWER_KEY_ENC_OPTS_UNSPEC] = { 708 .strict_start_type = TCA_FLOWER_KEY_ENC_OPTS_VXLAN }, 709 [TCA_FLOWER_KEY_ENC_OPTS_GENEVE] = { .type = NLA_NESTED }, 710 [TCA_FLOWER_KEY_ENC_OPTS_VXLAN] = { .type = NLA_NESTED }, 711 [TCA_FLOWER_KEY_ENC_OPTS_ERSPAN] = { .type = NLA_NESTED }, 712 }; 713 714 static const struct nla_policy 715 geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { 716 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, 717 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, 718 [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, 719 .len = 128 }, 720 }; 721 722 static const struct nla_policy 723 vxlan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1] = { 724 [TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP] = { .type = NLA_U32 }, 725 }; 726 727 static const struct nla_policy 728 erspan_opt_policy[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1] = { 729 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER] = { .type = NLA_U8 }, 730 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX] = { .type = NLA_U32 }, 731 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] = { .type = NLA_U8 }, 732 [TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID] = { .type = NLA_U8 }, 733 }; 734 735 static const struct nla_policy 736 mpls_stack_entry_policy[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1] = { 737 [TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH] = { .type = NLA_U8 }, 738 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL] = { .type = NLA_U8 }, 739 [TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS] = { .type = NLA_U8 }, 740 [TCA_FLOWER_KEY_MPLS_OPT_LSE_TC] = { .type = NLA_U8 }, 741 [TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL] = { .type = NLA_U32 }, 742 }; 743 744 static void fl_set_key_val(struct nlattr **tb, 745 void *val, int val_type, 746 void *mask, int mask_type, int len) 747 { 748 if (!tb[val_type]) 749 return; 750 nla_memcpy(val, tb[val_type], len); 751 if (mask_type == TCA_FLOWER_UNSPEC || !tb[mask_type]) 752 memset(mask, 0xff, len); 753 else 754 nla_memcpy(mask, tb[mask_type], len); 755 } 756 757 static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key, 758 struct fl_flow_key *mask, 759 struct netlink_ext_ack *extack) 760 { 761 fl_set_key_val(tb, &key->tp_range.tp_min.dst, 762 TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_range.tp_min.dst, 763 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.dst)); 764 fl_set_key_val(tb, &key->tp_range.tp_max.dst, 765 TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_range.tp_max.dst, 766 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.dst)); 767 fl_set_key_val(tb, &key->tp_range.tp_min.src, 768 TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_range.tp_min.src, 769 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_min.src)); 770 fl_set_key_val(tb, &key->tp_range.tp_max.src, 771 TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_range.tp_max.src, 772 TCA_FLOWER_UNSPEC, sizeof(key->tp_range.tp_max.src)); 773 774 if (mask->tp_range.tp_min.dst && mask->tp_range.tp_max.dst && 775 htons(key->tp_range.tp_max.dst) <= 776 htons(key->tp_range.tp_min.dst)) { 777 NL_SET_ERR_MSG_ATTR(extack, 778 tb[TCA_FLOWER_KEY_PORT_DST_MIN], 779 "Invalid destination port range (min must be strictly smaller than max)"); 780 return -EINVAL; 781 } 782 if (mask->tp_range.tp_min.src && mask->tp_range.tp_max.src && 783 htons(key->tp_range.tp_max.src) <= 784 htons(key->tp_range.tp_min.src)) { 785 NL_SET_ERR_MSG_ATTR(extack, 786 tb[TCA_FLOWER_KEY_PORT_SRC_MIN], 787 "Invalid source port range (min must be strictly smaller than max)"); 788 return -EINVAL; 789 } 790 791 return 0; 792 } 793 794 static int fl_set_key_mpls_lse(const struct nlattr *nla_lse, 795 struct flow_dissector_key_mpls *key_val, 796 struct flow_dissector_key_mpls *key_mask, 797 struct netlink_ext_ack *extack) 798 { 799 struct nlattr *tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX + 1]; 800 struct flow_dissector_mpls_lse *lse_mask; 801 struct flow_dissector_mpls_lse *lse_val; 802 u8 lse_index; 803 u8 depth; 804 int err; 805 806 err = nla_parse_nested(tb, TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, nla_lse, 807 mpls_stack_entry_policy, extack); 808 if (err < 0) 809 return err; 810 811 if (!tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]) { 812 NL_SET_ERR_MSG(extack, "Missing MPLS option \"depth\""); 813 return -EINVAL; 814 } 815 816 depth = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH]); 817 818 /* LSE depth starts at 1, for consistency with terminology used by 819 * RFC 3031 (section 3.9), where depth 0 refers to unlabeled packets. 820 */ 821 if (depth < 1 || depth > FLOW_DIS_MPLS_MAX) { 822 NL_SET_ERR_MSG_ATTR(extack, 823 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH], 824 "Invalid MPLS depth"); 825 return -EINVAL; 826 } 827 lse_index = depth - 1; 828 829 dissector_set_mpls_lse(key_val, lse_index); 830 dissector_set_mpls_lse(key_mask, lse_index); 831 832 lse_val = &key_val->ls[lse_index]; 833 lse_mask = &key_mask->ls[lse_index]; 834 835 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]) { 836 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL]); 837 lse_mask->mpls_ttl = MPLS_TTL_MASK; 838 } 839 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]) { 840 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS]); 841 842 if (bos & ~MPLS_BOS_MASK) { 843 NL_SET_ERR_MSG_ATTR(extack, 844 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS], 845 "Bottom Of Stack (BOS) must be 0 or 1"); 846 return -EINVAL; 847 } 848 lse_val->mpls_bos = bos; 849 lse_mask->mpls_bos = MPLS_BOS_MASK; 850 } 851 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]) { 852 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC]); 853 854 if (tc & ~MPLS_TC_MASK) { 855 NL_SET_ERR_MSG_ATTR(extack, 856 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_TC], 857 "Traffic Class (TC) must be between 0 and 7"); 858 return -EINVAL; 859 } 860 lse_val->mpls_tc = tc; 861 lse_mask->mpls_tc = MPLS_TC_MASK; 862 } 863 if (tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]) { 864 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL]); 865 866 if (label & ~MPLS_LABEL_MASK) { 867 NL_SET_ERR_MSG_ATTR(extack, 868 tb[TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL], 869 "Label must be between 0 and 1048575"); 870 return -EINVAL; 871 } 872 lse_val->mpls_label = label; 873 lse_mask->mpls_label = MPLS_LABEL_MASK; 874 } 875 876 return 0; 877 } 878 879 static int fl_set_key_mpls_opts(const struct nlattr *nla_mpls_opts, 880 struct flow_dissector_key_mpls *key_val, 881 struct flow_dissector_key_mpls *key_mask, 882 struct netlink_ext_ack *extack) 883 { 884 struct nlattr *nla_lse; 885 int rem; 886 int err; 887 888 if (!(nla_mpls_opts->nla_type & NLA_F_NESTED)) { 889 NL_SET_ERR_MSG_ATTR(extack, nla_mpls_opts, 890 "NLA_F_NESTED is missing"); 891 return -EINVAL; 892 } 893 894 nla_for_each_nested(nla_lse, nla_mpls_opts, rem) { 895 if (nla_type(nla_lse) != TCA_FLOWER_KEY_MPLS_OPTS_LSE) { 896 NL_SET_ERR_MSG_ATTR(extack, nla_lse, 897 "Invalid MPLS option type"); 898 return -EINVAL; 899 } 900 901 err = fl_set_key_mpls_lse(nla_lse, key_val, key_mask, extack); 902 if (err < 0) 903 return err; 904 } 905 if (rem) { 906 NL_SET_ERR_MSG(extack, 907 "Bytes leftover after parsing MPLS options"); 908 return -EINVAL; 909 } 910 911 return 0; 912 } 913 914 static int fl_set_key_mpls(struct nlattr **tb, 915 struct flow_dissector_key_mpls *key_val, 916 struct flow_dissector_key_mpls *key_mask, 917 struct netlink_ext_ack *extack) 918 { 919 struct flow_dissector_mpls_lse *lse_mask; 920 struct flow_dissector_mpls_lse *lse_val; 921 922 if (tb[TCA_FLOWER_KEY_MPLS_OPTS]) { 923 if (tb[TCA_FLOWER_KEY_MPLS_TTL] || 924 tb[TCA_FLOWER_KEY_MPLS_BOS] || 925 tb[TCA_FLOWER_KEY_MPLS_TC] || 926 tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 927 NL_SET_ERR_MSG_ATTR(extack, 928 tb[TCA_FLOWER_KEY_MPLS_OPTS], 929 "MPLS label, Traffic Class, Bottom Of Stack and Time To Live must be encapsulated in the MPLS options attribute"); 930 return -EBADMSG; 931 } 932 933 return fl_set_key_mpls_opts(tb[TCA_FLOWER_KEY_MPLS_OPTS], 934 key_val, key_mask, extack); 935 } 936 937 lse_val = &key_val->ls[0]; 938 lse_mask = &key_mask->ls[0]; 939 940 if (tb[TCA_FLOWER_KEY_MPLS_TTL]) { 941 lse_val->mpls_ttl = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TTL]); 942 lse_mask->mpls_ttl = MPLS_TTL_MASK; 943 dissector_set_mpls_lse(key_val, 0); 944 dissector_set_mpls_lse(key_mask, 0); 945 } 946 if (tb[TCA_FLOWER_KEY_MPLS_BOS]) { 947 u8 bos = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_BOS]); 948 949 if (bos & ~MPLS_BOS_MASK) { 950 NL_SET_ERR_MSG_ATTR(extack, 951 tb[TCA_FLOWER_KEY_MPLS_BOS], 952 "Bottom Of Stack (BOS) must be 0 or 1"); 953 return -EINVAL; 954 } 955 lse_val->mpls_bos = bos; 956 lse_mask->mpls_bos = MPLS_BOS_MASK; 957 dissector_set_mpls_lse(key_val, 0); 958 dissector_set_mpls_lse(key_mask, 0); 959 } 960 if (tb[TCA_FLOWER_KEY_MPLS_TC]) { 961 u8 tc = nla_get_u8(tb[TCA_FLOWER_KEY_MPLS_TC]); 962 963 if (tc & ~MPLS_TC_MASK) { 964 NL_SET_ERR_MSG_ATTR(extack, 965 tb[TCA_FLOWER_KEY_MPLS_TC], 966 "Traffic Class (TC) must be between 0 and 7"); 967 return -EINVAL; 968 } 969 lse_val->mpls_tc = tc; 970 lse_mask->mpls_tc = MPLS_TC_MASK; 971 dissector_set_mpls_lse(key_val, 0); 972 dissector_set_mpls_lse(key_mask, 0); 973 } 974 if (tb[TCA_FLOWER_KEY_MPLS_LABEL]) { 975 u32 label = nla_get_u32(tb[TCA_FLOWER_KEY_MPLS_LABEL]); 976 977 if (label & ~MPLS_LABEL_MASK) { 978 NL_SET_ERR_MSG_ATTR(extack, 979 tb[TCA_FLOWER_KEY_MPLS_LABEL], 980 "Label must be between 0 and 1048575"); 981 return -EINVAL; 982 } 983 lse_val->mpls_label = label; 984 lse_mask->mpls_label = MPLS_LABEL_MASK; 985 dissector_set_mpls_lse(key_val, 0); 986 dissector_set_mpls_lse(key_mask, 0); 987 } 988 return 0; 989 } 990 991 static void fl_set_key_vlan(struct nlattr **tb, 992 __be16 ethertype, 993 int vlan_id_key, int vlan_prio_key, 994 struct flow_dissector_key_vlan *key_val, 995 struct flow_dissector_key_vlan *key_mask) 996 { 997 #define VLAN_PRIORITY_MASK 0x7 998 999 if (tb[vlan_id_key]) { 1000 key_val->vlan_id = 1001 nla_get_u16(tb[vlan_id_key]) & VLAN_VID_MASK; 1002 key_mask->vlan_id = VLAN_VID_MASK; 1003 } 1004 if (tb[vlan_prio_key]) { 1005 key_val->vlan_priority = 1006 nla_get_u8(tb[vlan_prio_key]) & 1007 VLAN_PRIORITY_MASK; 1008 key_mask->vlan_priority = VLAN_PRIORITY_MASK; 1009 } 1010 key_val->vlan_tpid = ethertype; 1011 key_mask->vlan_tpid = cpu_to_be16(~0); 1012 } 1013 1014 static void fl_set_key_flag(u32 flower_key, u32 flower_mask, 1015 u32 *dissector_key, u32 *dissector_mask, 1016 u32 flower_flag_bit, u32 dissector_flag_bit) 1017 { 1018 if (flower_mask & flower_flag_bit) { 1019 *dissector_mask |= dissector_flag_bit; 1020 if (flower_key & flower_flag_bit) 1021 *dissector_key |= dissector_flag_bit; 1022 } 1023 } 1024 1025 static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, 1026 u32 *flags_mask, struct netlink_ext_ack *extack) 1027 { 1028 u32 key, mask; 1029 1030 /* mask is mandatory for flags */ 1031 if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { 1032 NL_SET_ERR_MSG(extack, "Missing flags mask"); 1033 return -EINVAL; 1034 } 1035 1036 key = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS])); 1037 mask = be32_to_cpu(nla_get_u32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); 1038 1039 *flags_key = 0; 1040 *flags_mask = 0; 1041 1042 fl_set_key_flag(key, mask, flags_key, flags_mask, 1043 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 1044 fl_set_key_flag(key, mask, flags_key, flags_mask, 1045 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 1046 FLOW_DIS_FIRST_FRAG); 1047 1048 return 0; 1049 } 1050 1051 static void fl_set_key_ip(struct nlattr **tb, bool encap, 1052 struct flow_dissector_key_ip *key, 1053 struct flow_dissector_key_ip *mask) 1054 { 1055 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 1056 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 1057 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 1058 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 1059 1060 fl_set_key_val(tb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)); 1061 fl_set_key_val(tb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl)); 1062 } 1063 1064 static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key, 1065 int depth, int option_len, 1066 struct netlink_ext_ack *extack) 1067 { 1068 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1]; 1069 struct nlattr *class = NULL, *type = NULL, *data = NULL; 1070 struct geneve_opt *opt; 1071 int err, data_len = 0; 1072 1073 if (option_len > sizeof(struct geneve_opt)) 1074 data_len = option_len - sizeof(struct geneve_opt); 1075 1076 opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len]; 1077 memset(opt, 0xff, option_len); 1078 opt->length = data_len / 4; 1079 opt->r1 = 0; 1080 opt->r2 = 0; 1081 opt->r3 = 0; 1082 1083 /* If no mask has been prodived we assume an exact match. */ 1084 if (!depth) 1085 return sizeof(struct geneve_opt) + data_len; 1086 1087 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_GENEVE) { 1088 NL_SET_ERR_MSG(extack, "Non-geneve option type for mask"); 1089 return -EINVAL; 1090 } 1091 1092 err = nla_parse_nested_deprecated(tb, 1093 TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX, 1094 nla, geneve_opt_policy, extack); 1095 if (err < 0) 1096 return err; 1097 1098 /* We are not allowed to omit any of CLASS, TYPE or DATA 1099 * fields from the key. 1100 */ 1101 if (!option_len && 1102 (!tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] || 1103 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] || 1104 !tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA])) { 1105 NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data"); 1106 return -EINVAL; 1107 } 1108 1109 /* Omitting any of CLASS, TYPE or DATA fields is allowed 1110 * for the mask. 1111 */ 1112 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]) { 1113 int new_len = key->enc_opts.len; 1114 1115 data = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA]; 1116 data_len = nla_len(data); 1117 if (data_len < 4) { 1118 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long"); 1119 return -ERANGE; 1120 } 1121 if (data_len % 4) { 1122 NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long"); 1123 return -ERANGE; 1124 } 1125 1126 new_len += sizeof(struct geneve_opt) + data_len; 1127 BUILD_BUG_ON(FLOW_DIS_TUN_OPTS_MAX != IP_TUNNEL_OPTS_MAX); 1128 if (new_len > FLOW_DIS_TUN_OPTS_MAX) { 1129 NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 1130 return -ERANGE; 1131 } 1132 opt->length = data_len / 4; 1133 memcpy(opt->opt_data, nla_data(data), data_len); 1134 } 1135 1136 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]) { 1137 class = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS]; 1138 opt->opt_class = nla_get_be16(class); 1139 } 1140 1141 if (tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]) { 1142 type = tb[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE]; 1143 opt->type = nla_get_u8(type); 1144 } 1145 1146 return sizeof(struct geneve_opt) + data_len; 1147 } 1148 1149 static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1150 int depth, int option_len, 1151 struct netlink_ext_ack *extack) 1152 { 1153 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX + 1]; 1154 struct vxlan_metadata *md; 1155 int err; 1156 1157 md = (struct vxlan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1158 memset(md, 0xff, sizeof(*md)); 1159 1160 if (!depth) 1161 return sizeof(*md); 1162 1163 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_VXLAN) { 1164 NL_SET_ERR_MSG(extack, "Non-vxlan option type for mask"); 1165 return -EINVAL; 1166 } 1167 1168 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX, nla, 1169 vxlan_opt_policy, extack); 1170 if (err < 0) 1171 return err; 1172 1173 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { 1174 NL_SET_ERR_MSG(extack, "Missing tunnel key vxlan option gbp"); 1175 return -EINVAL; 1176 } 1177 1178 if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) 1179 md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); 1180 1181 return sizeof(*md); 1182 } 1183 1184 static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, 1185 int depth, int option_len, 1186 struct netlink_ext_ack *extack) 1187 { 1188 struct nlattr *tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX + 1]; 1189 struct erspan_metadata *md; 1190 int err; 1191 1192 md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; 1193 memset(md, 0xff, sizeof(*md)); 1194 md->version = 1; 1195 1196 if (!depth) 1197 return sizeof(*md); 1198 1199 if (nla_type(nla) != TCA_FLOWER_KEY_ENC_OPTS_ERSPAN) { 1200 NL_SET_ERR_MSG(extack, "Non-erspan option type for mask"); 1201 return -EINVAL; 1202 } 1203 1204 err = nla_parse_nested(tb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX, nla, 1205 erspan_opt_policy, extack); 1206 if (err < 0) 1207 return err; 1208 1209 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) { 1210 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option ver"); 1211 return -EINVAL; 1212 } 1213 1214 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]) 1215 md->version = nla_get_u8(tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER]); 1216 1217 if (md->version == 1) { 1218 if (!option_len && !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1219 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); 1220 return -EINVAL; 1221 } 1222 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { 1223 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; 1224 md->u.index = nla_get_be32(nla); 1225 } 1226 } else if (md->version == 2) { 1227 if (!option_len && (!tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR] || 1228 !tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID])) { 1229 NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); 1230 return -EINVAL; 1231 } 1232 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { 1233 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; 1234 md->u.md2.dir = nla_get_u8(nla); 1235 } 1236 if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { 1237 nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; 1238 set_hwid(&md->u.md2, nla_get_u8(nla)); 1239 } 1240 } else { 1241 NL_SET_ERR_MSG(extack, "Tunnel key erspan option ver is incorrect"); 1242 return -EINVAL; 1243 } 1244 1245 return sizeof(*md); 1246 } 1247 1248 static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key, 1249 struct fl_flow_key *mask, 1250 struct netlink_ext_ack *extack) 1251 { 1252 const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL; 1253 int err, option_len, key_depth, msk_depth = 0; 1254 1255 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS], 1256 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1257 enc_opts_policy, extack); 1258 if (err) 1259 return err; 1260 1261 nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]); 1262 1263 if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) { 1264 err = nla_validate_nested_deprecated(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK], 1265 TCA_FLOWER_KEY_ENC_OPTS_MAX, 1266 enc_opts_policy, extack); 1267 if (err) 1268 return err; 1269 1270 nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1271 msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]); 1272 } 1273 1274 nla_for_each_attr(nla_opt_key, nla_enc_key, 1275 nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS]), key_depth) { 1276 switch (nla_type(nla_opt_key)) { 1277 case TCA_FLOWER_KEY_ENC_OPTS_GENEVE: 1278 if (key->enc_opts.dst_opt_type && 1279 key->enc_opts.dst_opt_type != TUNNEL_GENEVE_OPT) { 1280 NL_SET_ERR_MSG(extack, "Duplicate type for geneve options"); 1281 return -EINVAL; 1282 } 1283 option_len = 0; 1284 key->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1285 option_len = fl_set_geneve_opt(nla_opt_key, key, 1286 key_depth, option_len, 1287 extack); 1288 if (option_len < 0) 1289 return option_len; 1290 1291 key->enc_opts.len += option_len; 1292 /* At the same time we need to parse through the mask 1293 * in order to verify exact and mask attribute lengths. 1294 */ 1295 mask->enc_opts.dst_opt_type = TUNNEL_GENEVE_OPT; 1296 option_len = fl_set_geneve_opt(nla_opt_msk, mask, 1297 msk_depth, option_len, 1298 extack); 1299 if (option_len < 0) 1300 return option_len; 1301 1302 mask->enc_opts.len += option_len; 1303 if (key->enc_opts.len != mask->enc_opts.len) { 1304 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1305 return -EINVAL; 1306 } 1307 1308 if (msk_depth) 1309 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1310 break; 1311 case TCA_FLOWER_KEY_ENC_OPTS_VXLAN: 1312 if (key->enc_opts.dst_opt_type) { 1313 NL_SET_ERR_MSG(extack, "Duplicate type for vxlan options"); 1314 return -EINVAL; 1315 } 1316 option_len = 0; 1317 key->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1318 option_len = fl_set_vxlan_opt(nla_opt_key, key, 1319 key_depth, option_len, 1320 extack); 1321 if (option_len < 0) 1322 return option_len; 1323 1324 key->enc_opts.len += option_len; 1325 /* At the same time we need to parse through the mask 1326 * in order to verify exact and mask attribute lengths. 1327 */ 1328 mask->enc_opts.dst_opt_type = TUNNEL_VXLAN_OPT; 1329 option_len = fl_set_vxlan_opt(nla_opt_msk, mask, 1330 msk_depth, option_len, 1331 extack); 1332 if (option_len < 0) 1333 return option_len; 1334 1335 mask->enc_opts.len += option_len; 1336 if (key->enc_opts.len != mask->enc_opts.len) { 1337 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1338 return -EINVAL; 1339 } 1340 1341 if (msk_depth) 1342 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1343 break; 1344 case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN: 1345 if (key->enc_opts.dst_opt_type) { 1346 NL_SET_ERR_MSG(extack, "Duplicate type for erspan options"); 1347 return -EINVAL; 1348 } 1349 option_len = 0; 1350 key->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1351 option_len = fl_set_erspan_opt(nla_opt_key, key, 1352 key_depth, option_len, 1353 extack); 1354 if (option_len < 0) 1355 return option_len; 1356 1357 key->enc_opts.len += option_len; 1358 /* At the same time we need to parse through the mask 1359 * in order to verify exact and mask attribute lengths. 1360 */ 1361 mask->enc_opts.dst_opt_type = TUNNEL_ERSPAN_OPT; 1362 option_len = fl_set_erspan_opt(nla_opt_msk, mask, 1363 msk_depth, option_len, 1364 extack); 1365 if (option_len < 0) 1366 return option_len; 1367 1368 mask->enc_opts.len += option_len; 1369 if (key->enc_opts.len != mask->enc_opts.len) { 1370 NL_SET_ERR_MSG(extack, "Key and mask miss aligned"); 1371 return -EINVAL; 1372 } 1373 1374 if (msk_depth) 1375 nla_opt_msk = nla_next(nla_opt_msk, &msk_depth); 1376 break; 1377 default: 1378 NL_SET_ERR_MSG(extack, "Unknown tunnel option type"); 1379 return -EINVAL; 1380 } 1381 } 1382 1383 return 0; 1384 } 1385 1386 static int fl_set_key_ct(struct nlattr **tb, 1387 struct flow_dissector_key_ct *key, 1388 struct flow_dissector_key_ct *mask, 1389 struct netlink_ext_ack *extack) 1390 { 1391 if (tb[TCA_FLOWER_KEY_CT_STATE]) { 1392 if (!IS_ENABLED(CONFIG_NF_CONNTRACK)) { 1393 NL_SET_ERR_MSG(extack, "Conntrack isn't enabled"); 1394 return -EOPNOTSUPP; 1395 } 1396 fl_set_key_val(tb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 1397 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 1398 sizeof(key->ct_state)); 1399 } 1400 if (tb[TCA_FLOWER_KEY_CT_ZONE]) { 1401 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) { 1402 NL_SET_ERR_MSG(extack, "Conntrack zones isn't enabled"); 1403 return -EOPNOTSUPP; 1404 } 1405 fl_set_key_val(tb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 1406 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 1407 sizeof(key->ct_zone)); 1408 } 1409 if (tb[TCA_FLOWER_KEY_CT_MARK]) { 1410 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) { 1411 NL_SET_ERR_MSG(extack, "Conntrack mark isn't enabled"); 1412 return -EOPNOTSUPP; 1413 } 1414 fl_set_key_val(tb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 1415 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 1416 sizeof(key->ct_mark)); 1417 } 1418 if (tb[TCA_FLOWER_KEY_CT_LABELS]) { 1419 if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) { 1420 NL_SET_ERR_MSG(extack, "Conntrack labels aren't enabled"); 1421 return -EOPNOTSUPP; 1422 } 1423 fl_set_key_val(tb, key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 1424 mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 1425 sizeof(key->ct_labels)); 1426 } 1427 1428 return 0; 1429 } 1430 1431 static int fl_set_key(struct net *net, struct nlattr **tb, 1432 struct fl_flow_key *key, struct fl_flow_key *mask, 1433 struct netlink_ext_ack *extack) 1434 { 1435 __be16 ethertype; 1436 int ret = 0; 1437 1438 if (tb[TCA_FLOWER_INDEV]) { 1439 int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV], extack); 1440 if (err < 0) 1441 return err; 1442 key->meta.ingress_ifindex = err; 1443 mask->meta.ingress_ifindex = 0xffffffff; 1444 } 1445 1446 fl_set_key_val(tb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 1447 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 1448 sizeof(key->eth.dst)); 1449 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 1450 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 1451 sizeof(key->eth.src)); 1452 1453 if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { 1454 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); 1455 1456 if (eth_type_vlan(ethertype)) { 1457 fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, 1458 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, 1459 &mask->vlan); 1460 1461 if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { 1462 ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); 1463 if (eth_type_vlan(ethertype)) { 1464 fl_set_key_vlan(tb, ethertype, 1465 TCA_FLOWER_KEY_CVLAN_ID, 1466 TCA_FLOWER_KEY_CVLAN_PRIO, 1467 &key->cvlan, &mask->cvlan); 1468 fl_set_key_val(tb, &key->basic.n_proto, 1469 TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 1470 &mask->basic.n_proto, 1471 TCA_FLOWER_UNSPEC, 1472 sizeof(key->basic.n_proto)); 1473 } else { 1474 key->basic.n_proto = ethertype; 1475 mask->basic.n_proto = cpu_to_be16(~0); 1476 } 1477 } 1478 } else { 1479 key->basic.n_proto = ethertype; 1480 mask->basic.n_proto = cpu_to_be16(~0); 1481 } 1482 } 1483 1484 if (key->basic.n_proto == htons(ETH_P_IP) || 1485 key->basic.n_proto == htons(ETH_P_IPV6)) { 1486 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 1487 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 1488 sizeof(key->basic.ip_proto)); 1489 fl_set_key_ip(tb, false, &key->ip, &mask->ip); 1490 } 1491 1492 if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) { 1493 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1494 mask->control.addr_type = ~0; 1495 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 1496 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 1497 sizeof(key->ipv4.src)); 1498 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 1499 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 1500 sizeof(key->ipv4.dst)); 1501 } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) { 1502 key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1503 mask->control.addr_type = ~0; 1504 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 1505 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 1506 sizeof(key->ipv6.src)); 1507 fl_set_key_val(tb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 1508 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 1509 sizeof(key->ipv6.dst)); 1510 } 1511 1512 if (key->basic.ip_proto == IPPROTO_TCP) { 1513 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 1514 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 1515 sizeof(key->tp.src)); 1516 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 1517 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 1518 sizeof(key->tp.dst)); 1519 fl_set_key_val(tb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 1520 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 1521 sizeof(key->tcp.flags)); 1522 } else if (key->basic.ip_proto == IPPROTO_UDP) { 1523 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 1524 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 1525 sizeof(key->tp.src)); 1526 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 1527 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 1528 sizeof(key->tp.dst)); 1529 } else if (key->basic.ip_proto == IPPROTO_SCTP) { 1530 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 1531 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 1532 sizeof(key->tp.src)); 1533 fl_set_key_val(tb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 1534 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 1535 sizeof(key->tp.dst)); 1536 } else if (key->basic.n_proto == htons(ETH_P_IP) && 1537 key->basic.ip_proto == IPPROTO_ICMP) { 1538 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV4_TYPE, 1539 &mask->icmp.type, 1540 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 1541 sizeof(key->icmp.type)); 1542 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV4_CODE, 1543 &mask->icmp.code, 1544 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 1545 sizeof(key->icmp.code)); 1546 } else if (key->basic.n_proto == htons(ETH_P_IPV6) && 1547 key->basic.ip_proto == IPPROTO_ICMPV6) { 1548 fl_set_key_val(tb, &key->icmp.type, TCA_FLOWER_KEY_ICMPV6_TYPE, 1549 &mask->icmp.type, 1550 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 1551 sizeof(key->icmp.type)); 1552 fl_set_key_val(tb, &key->icmp.code, TCA_FLOWER_KEY_ICMPV6_CODE, 1553 &mask->icmp.code, 1554 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 1555 sizeof(key->icmp.code)); 1556 } else if (key->basic.n_proto == htons(ETH_P_MPLS_UC) || 1557 key->basic.n_proto == htons(ETH_P_MPLS_MC)) { 1558 ret = fl_set_key_mpls(tb, &key->mpls, &mask->mpls, extack); 1559 if (ret) 1560 return ret; 1561 } else if (key->basic.n_proto == htons(ETH_P_ARP) || 1562 key->basic.n_proto == htons(ETH_P_RARP)) { 1563 fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP, 1564 &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK, 1565 sizeof(key->arp.sip)); 1566 fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP, 1567 &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK, 1568 sizeof(key->arp.tip)); 1569 fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP, 1570 &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK, 1571 sizeof(key->arp.op)); 1572 fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 1573 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 1574 sizeof(key->arp.sha)); 1575 fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 1576 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 1577 sizeof(key->arp.tha)); 1578 } 1579 1580 if (key->basic.ip_proto == IPPROTO_TCP || 1581 key->basic.ip_proto == IPPROTO_UDP || 1582 key->basic.ip_proto == IPPROTO_SCTP) { 1583 ret = fl_set_key_port_range(tb, key, mask, extack); 1584 if (ret) 1585 return ret; 1586 } 1587 1588 if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] || 1589 tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) { 1590 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 1591 mask->enc_control.addr_type = ~0; 1592 fl_set_key_val(tb, &key->enc_ipv4.src, 1593 TCA_FLOWER_KEY_ENC_IPV4_SRC, 1594 &mask->enc_ipv4.src, 1595 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 1596 sizeof(key->enc_ipv4.src)); 1597 fl_set_key_val(tb, &key->enc_ipv4.dst, 1598 TCA_FLOWER_KEY_ENC_IPV4_DST, 1599 &mask->enc_ipv4.dst, 1600 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 1601 sizeof(key->enc_ipv4.dst)); 1602 } 1603 1604 if (tb[TCA_FLOWER_KEY_ENC_IPV6_SRC] || 1605 tb[TCA_FLOWER_KEY_ENC_IPV6_DST]) { 1606 key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 1607 mask->enc_control.addr_type = ~0; 1608 fl_set_key_val(tb, &key->enc_ipv6.src, 1609 TCA_FLOWER_KEY_ENC_IPV6_SRC, 1610 &mask->enc_ipv6.src, 1611 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 1612 sizeof(key->enc_ipv6.src)); 1613 fl_set_key_val(tb, &key->enc_ipv6.dst, 1614 TCA_FLOWER_KEY_ENC_IPV6_DST, 1615 &mask->enc_ipv6.dst, 1616 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 1617 sizeof(key->enc_ipv6.dst)); 1618 } 1619 1620 fl_set_key_val(tb, &key->enc_key_id.keyid, TCA_FLOWER_KEY_ENC_KEY_ID, 1621 &mask->enc_key_id.keyid, TCA_FLOWER_UNSPEC, 1622 sizeof(key->enc_key_id.keyid)); 1623 1624 fl_set_key_val(tb, &key->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 1625 &mask->enc_tp.src, TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 1626 sizeof(key->enc_tp.src)); 1627 1628 fl_set_key_val(tb, &key->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 1629 &mask->enc_tp.dst, TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 1630 sizeof(key->enc_tp.dst)); 1631 1632 fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); 1633 1634 fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 1635 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 1636 sizeof(key->hash.hash)); 1637 1638 if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { 1639 ret = fl_set_enc_opt(tb, key, mask, extack); 1640 if (ret) 1641 return ret; 1642 } 1643 1644 ret = fl_set_key_ct(tb, &key->ct, &mask->ct, extack); 1645 if (ret) 1646 return ret; 1647 1648 if (tb[TCA_FLOWER_KEY_FLAGS]) 1649 ret = fl_set_key_flags(tb, &key->control.flags, 1650 &mask->control.flags, extack); 1651 1652 return ret; 1653 } 1654 1655 static void fl_mask_copy(struct fl_flow_mask *dst, 1656 struct fl_flow_mask *src) 1657 { 1658 const void *psrc = fl_key_get_start(&src->key, src); 1659 void *pdst = fl_key_get_start(&dst->key, src); 1660 1661 memcpy(pdst, psrc, fl_mask_range(src)); 1662 dst->range = src->range; 1663 } 1664 1665 static const struct rhashtable_params fl_ht_params = { 1666 .key_offset = offsetof(struct cls_fl_filter, mkey), /* base offset */ 1667 .head_offset = offsetof(struct cls_fl_filter, ht_node), 1668 .automatic_shrinking = true, 1669 }; 1670 1671 static int fl_init_mask_hashtable(struct fl_flow_mask *mask) 1672 { 1673 mask->filter_ht_params = fl_ht_params; 1674 mask->filter_ht_params.key_len = fl_mask_range(mask); 1675 mask->filter_ht_params.key_offset += mask->range.start; 1676 1677 return rhashtable_init(&mask->ht, &mask->filter_ht_params); 1678 } 1679 1680 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 1681 #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 1682 1683 #define FL_KEY_IS_MASKED(mask, member) \ 1684 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ 1685 0, FL_KEY_MEMBER_SIZE(member)) \ 1686 1687 #define FL_KEY_SET(keys, cnt, id, member) \ 1688 do { \ 1689 keys[cnt].key_id = id; \ 1690 keys[cnt].offset = FL_KEY_MEMBER_OFFSET(member); \ 1691 cnt++; \ 1692 } while(0); 1693 1694 #define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ 1695 do { \ 1696 if (FL_KEY_IS_MASKED(mask, member)) \ 1697 FL_KEY_SET(keys, cnt, id, member); \ 1698 } while(0); 1699 1700 static void fl_init_dissector(struct flow_dissector *dissector, 1701 struct fl_flow_key *mask) 1702 { 1703 struct flow_dissector_key keys[FLOW_DISSECTOR_KEY_MAX]; 1704 size_t cnt = 0; 1705 1706 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1707 FLOW_DISSECTOR_KEY_META, meta); 1708 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); 1709 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); 1710 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1711 FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); 1712 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1713 FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); 1714 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1715 FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); 1716 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1717 FLOW_DISSECTOR_KEY_PORTS, tp); 1718 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1719 FLOW_DISSECTOR_KEY_PORTS_RANGE, tp_range); 1720 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1721 FLOW_DISSECTOR_KEY_IP, ip); 1722 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1723 FLOW_DISSECTOR_KEY_TCP, tcp); 1724 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1725 FLOW_DISSECTOR_KEY_ICMP, icmp); 1726 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1727 FLOW_DISSECTOR_KEY_ARP, arp); 1728 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1729 FLOW_DISSECTOR_KEY_MPLS, mpls); 1730 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1731 FLOW_DISSECTOR_KEY_VLAN, vlan); 1732 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1733 FLOW_DISSECTOR_KEY_CVLAN, cvlan); 1734 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1735 FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id); 1736 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1737 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, enc_ipv4); 1738 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1739 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); 1740 if (FL_KEY_IS_MASKED(mask, enc_ipv4) || 1741 FL_KEY_IS_MASKED(mask, enc_ipv6)) 1742 FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, 1743 enc_control); 1744 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1745 FLOW_DISSECTOR_KEY_ENC_PORTS, enc_tp); 1746 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1747 FLOW_DISSECTOR_KEY_ENC_IP, enc_ip); 1748 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1749 FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); 1750 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1751 FLOW_DISSECTOR_KEY_CT, ct); 1752 FL_KEY_SET_IF_MASKED(mask, keys, cnt, 1753 FLOW_DISSECTOR_KEY_HASH, hash); 1754 1755 skb_flow_dissector_init(dissector, keys, cnt); 1756 } 1757 1758 static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head, 1759 struct fl_flow_mask *mask) 1760 { 1761 struct fl_flow_mask *newmask; 1762 int err; 1763 1764 newmask = kzalloc(sizeof(*newmask), GFP_KERNEL); 1765 if (!newmask) 1766 return ERR_PTR(-ENOMEM); 1767 1768 fl_mask_copy(newmask, mask); 1769 1770 if ((newmask->key.tp_range.tp_min.dst && 1771 newmask->key.tp_range.tp_max.dst) || 1772 (newmask->key.tp_range.tp_min.src && 1773 newmask->key.tp_range.tp_max.src)) 1774 newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE; 1775 1776 err = fl_init_mask_hashtable(newmask); 1777 if (err) 1778 goto errout_free; 1779 1780 fl_init_dissector(&newmask->dissector, &newmask->key); 1781 1782 INIT_LIST_HEAD_RCU(&newmask->filters); 1783 1784 refcount_set(&newmask->refcnt, 1); 1785 err = rhashtable_replace_fast(&head->ht, &mask->ht_node, 1786 &newmask->ht_node, mask_ht_params); 1787 if (err) 1788 goto errout_destroy; 1789 1790 spin_lock(&head->masks_lock); 1791 list_add_tail_rcu(&newmask->list, &head->masks); 1792 spin_unlock(&head->masks_lock); 1793 1794 return newmask; 1795 1796 errout_destroy: 1797 rhashtable_destroy(&newmask->ht); 1798 errout_free: 1799 kfree(newmask); 1800 1801 return ERR_PTR(err); 1802 } 1803 1804 static int fl_check_assign_mask(struct cls_fl_head *head, 1805 struct cls_fl_filter *fnew, 1806 struct cls_fl_filter *fold, 1807 struct fl_flow_mask *mask) 1808 { 1809 struct fl_flow_mask *newmask; 1810 int ret = 0; 1811 1812 rcu_read_lock(); 1813 1814 /* Insert mask as temporary node to prevent concurrent creation of mask 1815 * with same key. Any concurrent lookups with same key will return 1816 * -EAGAIN because mask's refcnt is zero. 1817 */ 1818 fnew->mask = rhashtable_lookup_get_insert_fast(&head->ht, 1819 &mask->ht_node, 1820 mask_ht_params); 1821 if (!fnew->mask) { 1822 rcu_read_unlock(); 1823 1824 if (fold) { 1825 ret = -EINVAL; 1826 goto errout_cleanup; 1827 } 1828 1829 newmask = fl_create_new_mask(head, mask); 1830 if (IS_ERR(newmask)) { 1831 ret = PTR_ERR(newmask); 1832 goto errout_cleanup; 1833 } 1834 1835 fnew->mask = newmask; 1836 return 0; 1837 } else if (IS_ERR(fnew->mask)) { 1838 ret = PTR_ERR(fnew->mask); 1839 } else if (fold && fold->mask != fnew->mask) { 1840 ret = -EINVAL; 1841 } else if (!refcount_inc_not_zero(&fnew->mask->refcnt)) { 1842 /* Mask was deleted concurrently, try again */ 1843 ret = -EAGAIN; 1844 } 1845 rcu_read_unlock(); 1846 return ret; 1847 1848 errout_cleanup: 1849 rhashtable_remove_fast(&head->ht, &mask->ht_node, 1850 mask_ht_params); 1851 return ret; 1852 } 1853 1854 static int fl_set_parms(struct net *net, struct tcf_proto *tp, 1855 struct cls_fl_filter *f, struct fl_flow_mask *mask, 1856 unsigned long base, struct nlattr **tb, 1857 struct nlattr *est, bool ovr, 1858 struct fl_flow_tmplt *tmplt, bool rtnl_held, 1859 struct netlink_ext_ack *extack) 1860 { 1861 int err; 1862 1863 err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, rtnl_held, 1864 extack); 1865 if (err < 0) 1866 return err; 1867 1868 if (tb[TCA_FLOWER_CLASSID]) { 1869 f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); 1870 if (!rtnl_held) 1871 rtnl_lock(); 1872 tcf_bind_filter(tp, &f->res, base); 1873 if (!rtnl_held) 1874 rtnl_unlock(); 1875 } 1876 1877 err = fl_set_key(net, tb, &f->key, &mask->key, extack); 1878 if (err) 1879 return err; 1880 1881 fl_mask_update_range(mask); 1882 fl_set_masked_key(&f->mkey, &f->key, mask); 1883 1884 if (!fl_mask_fits_tmplt(tmplt, mask)) { 1885 NL_SET_ERR_MSG_MOD(extack, "Mask does not fit the template"); 1886 return -EINVAL; 1887 } 1888 1889 return 0; 1890 } 1891 1892 static int fl_ht_insert_unique(struct cls_fl_filter *fnew, 1893 struct cls_fl_filter *fold, 1894 bool *in_ht) 1895 { 1896 struct fl_flow_mask *mask = fnew->mask; 1897 int err; 1898 1899 err = rhashtable_lookup_insert_fast(&mask->ht, 1900 &fnew->ht_node, 1901 mask->filter_ht_params); 1902 if (err) { 1903 *in_ht = false; 1904 /* It is okay if filter with same key exists when 1905 * overwriting. 1906 */ 1907 return fold && err == -EEXIST ? 0 : err; 1908 } 1909 1910 *in_ht = true; 1911 return 0; 1912 } 1913 1914 static int fl_change(struct net *net, struct sk_buff *in_skb, 1915 struct tcf_proto *tp, unsigned long base, 1916 u32 handle, struct nlattr **tca, 1917 void **arg, bool ovr, bool rtnl_held, 1918 struct netlink_ext_ack *extack) 1919 { 1920 struct cls_fl_head *head = fl_head_dereference(tp); 1921 struct cls_fl_filter *fold = *arg; 1922 struct cls_fl_filter *fnew; 1923 struct fl_flow_mask *mask; 1924 struct nlattr **tb; 1925 bool in_ht; 1926 int err; 1927 1928 if (!tca[TCA_OPTIONS]) { 1929 err = -EINVAL; 1930 goto errout_fold; 1931 } 1932 1933 mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); 1934 if (!mask) { 1935 err = -ENOBUFS; 1936 goto errout_fold; 1937 } 1938 1939 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 1940 if (!tb) { 1941 err = -ENOBUFS; 1942 goto errout_mask_alloc; 1943 } 1944 1945 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 1946 tca[TCA_OPTIONS], fl_policy, NULL); 1947 if (err < 0) 1948 goto errout_tb; 1949 1950 if (fold && handle && fold->handle != handle) { 1951 err = -EINVAL; 1952 goto errout_tb; 1953 } 1954 1955 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 1956 if (!fnew) { 1957 err = -ENOBUFS; 1958 goto errout_tb; 1959 } 1960 INIT_LIST_HEAD(&fnew->hw_list); 1961 refcount_set(&fnew->refcnt, 1); 1962 1963 err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); 1964 if (err < 0) 1965 goto errout; 1966 1967 if (tb[TCA_FLOWER_FLAGS]) { 1968 fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); 1969 1970 if (!tc_flags_valid(fnew->flags)) { 1971 err = -EINVAL; 1972 goto errout; 1973 } 1974 } 1975 1976 err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, 1977 tp->chain->tmplt_priv, rtnl_held, extack); 1978 if (err) 1979 goto errout; 1980 1981 err = fl_check_assign_mask(head, fnew, fold, mask); 1982 if (err) 1983 goto errout; 1984 1985 err = fl_ht_insert_unique(fnew, fold, &in_ht); 1986 if (err) 1987 goto errout_mask; 1988 1989 if (!tc_skip_hw(fnew->flags)) { 1990 err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack); 1991 if (err) 1992 goto errout_ht; 1993 } 1994 1995 if (!tc_in_hw(fnew->flags)) 1996 fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; 1997 1998 spin_lock(&tp->lock); 1999 2000 /* tp was deleted concurrently. -EAGAIN will cause caller to lookup 2001 * proto again or create new one, if necessary. 2002 */ 2003 if (tp->deleting) { 2004 err = -EAGAIN; 2005 goto errout_hw; 2006 } 2007 2008 if (fold) { 2009 /* Fold filter was deleted concurrently. Retry lookup. */ 2010 if (fold->deleted) { 2011 err = -EAGAIN; 2012 goto errout_hw; 2013 } 2014 2015 fnew->handle = handle; 2016 2017 if (!in_ht) { 2018 struct rhashtable_params params = 2019 fnew->mask->filter_ht_params; 2020 2021 err = rhashtable_insert_fast(&fnew->mask->ht, 2022 &fnew->ht_node, 2023 params); 2024 if (err) 2025 goto errout_hw; 2026 in_ht = true; 2027 } 2028 2029 refcount_inc(&fnew->refcnt); 2030 rhashtable_remove_fast(&fold->mask->ht, 2031 &fold->ht_node, 2032 fold->mask->filter_ht_params); 2033 idr_replace(&head->handle_idr, fnew, fnew->handle); 2034 list_replace_rcu(&fold->list, &fnew->list); 2035 fold->deleted = true; 2036 2037 spin_unlock(&tp->lock); 2038 2039 fl_mask_put(head, fold->mask); 2040 if (!tc_skip_hw(fold->flags)) 2041 fl_hw_destroy_filter(tp, fold, rtnl_held, NULL); 2042 tcf_unbind_filter(tp, &fold->res); 2043 /* Caller holds reference to fold, so refcnt is always > 0 2044 * after this. 2045 */ 2046 refcount_dec(&fold->refcnt); 2047 __fl_put(fold); 2048 } else { 2049 if (handle) { 2050 /* user specifies a handle and it doesn't exist */ 2051 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 2052 handle, GFP_ATOMIC); 2053 2054 /* Filter with specified handle was concurrently 2055 * inserted after initial check in cls_api. This is not 2056 * necessarily an error if NLM_F_EXCL is not set in 2057 * message flags. Returning EAGAIN will cause cls_api to 2058 * try to update concurrently inserted rule. 2059 */ 2060 if (err == -ENOSPC) 2061 err = -EAGAIN; 2062 } else { 2063 handle = 1; 2064 err = idr_alloc_u32(&head->handle_idr, fnew, &handle, 2065 INT_MAX, GFP_ATOMIC); 2066 } 2067 if (err) 2068 goto errout_hw; 2069 2070 refcount_inc(&fnew->refcnt); 2071 fnew->handle = handle; 2072 list_add_tail_rcu(&fnew->list, &fnew->mask->filters); 2073 spin_unlock(&tp->lock); 2074 } 2075 2076 *arg = fnew; 2077 2078 kfree(tb); 2079 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2080 return 0; 2081 2082 errout_ht: 2083 spin_lock(&tp->lock); 2084 errout_hw: 2085 fnew->deleted = true; 2086 spin_unlock(&tp->lock); 2087 if (!tc_skip_hw(fnew->flags)) 2088 fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL); 2089 if (in_ht) 2090 rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, 2091 fnew->mask->filter_ht_params); 2092 errout_mask: 2093 fl_mask_put(head, fnew->mask); 2094 errout: 2095 __fl_put(fnew); 2096 errout_tb: 2097 kfree(tb); 2098 errout_mask_alloc: 2099 tcf_queue_work(&mask->rwork, fl_uninit_mask_free_work); 2100 errout_fold: 2101 if (fold) 2102 __fl_put(fold); 2103 return err; 2104 } 2105 2106 static int fl_delete(struct tcf_proto *tp, void *arg, bool *last, 2107 bool rtnl_held, struct netlink_ext_ack *extack) 2108 { 2109 struct cls_fl_head *head = fl_head_dereference(tp); 2110 struct cls_fl_filter *f = arg; 2111 bool last_on_mask; 2112 int err = 0; 2113 2114 err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack); 2115 *last = list_empty(&head->masks); 2116 __fl_put(f); 2117 2118 return err; 2119 } 2120 2121 static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg, 2122 bool rtnl_held) 2123 { 2124 struct cls_fl_head *head = fl_head_dereference(tp); 2125 unsigned long id = arg->cookie, tmp; 2126 struct cls_fl_filter *f; 2127 2128 arg->count = arg->skip; 2129 2130 idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) { 2131 /* don't return filters that are being deleted */ 2132 if (!refcount_inc_not_zero(&f->refcnt)) 2133 continue; 2134 if (arg->fn(tp, f, arg) < 0) { 2135 __fl_put(f); 2136 arg->stop = 1; 2137 break; 2138 } 2139 __fl_put(f); 2140 arg->count++; 2141 } 2142 arg->cookie = id; 2143 } 2144 2145 static struct cls_fl_filter * 2146 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) 2147 { 2148 struct cls_fl_head *head = fl_head_dereference(tp); 2149 2150 spin_lock(&tp->lock); 2151 if (list_empty(&head->hw_filters)) { 2152 spin_unlock(&tp->lock); 2153 return NULL; 2154 } 2155 2156 if (!f) 2157 f = list_entry(&head->hw_filters, struct cls_fl_filter, 2158 hw_list); 2159 list_for_each_entry_continue(f, &head->hw_filters, hw_list) { 2160 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { 2161 spin_unlock(&tp->lock); 2162 return f; 2163 } 2164 } 2165 2166 spin_unlock(&tp->lock); 2167 return NULL; 2168 } 2169 2170 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 2171 void *cb_priv, struct netlink_ext_ack *extack) 2172 { 2173 struct tcf_block *block = tp->chain->block; 2174 struct flow_cls_offload cls_flower = {}; 2175 struct cls_fl_filter *f = NULL; 2176 int err; 2177 2178 /* hw_filters list can only be changed by hw offload functions after 2179 * obtaining rtnl lock. Make sure it is not changed while reoffload is 2180 * iterating it. 2181 */ 2182 ASSERT_RTNL(); 2183 2184 while ((f = fl_get_next_hw_filter(tp, f, add))) { 2185 cls_flower.rule = 2186 flow_rule_alloc(tcf_exts_num_actions(&f->exts)); 2187 if (!cls_flower.rule) { 2188 __fl_put(f); 2189 return -ENOMEM; 2190 } 2191 2192 tc_cls_common_offload_init(&cls_flower.common, tp, f->flags, 2193 extack); 2194 cls_flower.command = add ? 2195 FLOW_CLS_REPLACE : FLOW_CLS_DESTROY; 2196 cls_flower.cookie = (unsigned long)f; 2197 cls_flower.rule->match.dissector = &f->mask->dissector; 2198 cls_flower.rule->match.mask = &f->mask->key; 2199 cls_flower.rule->match.key = &f->mkey; 2200 2201 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); 2202 if (err) { 2203 kfree(cls_flower.rule); 2204 if (tc_skip_sw(f->flags)) { 2205 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); 2206 __fl_put(f); 2207 return err; 2208 } 2209 goto next_flow; 2210 } 2211 2212 cls_flower.classid = f->res.classid; 2213 2214 err = tc_setup_cb_reoffload(block, tp, add, cb, 2215 TC_SETUP_CLSFLOWER, &cls_flower, 2216 cb_priv, &f->flags, 2217 &f->in_hw_count); 2218 tc_cleanup_flow_action(&cls_flower.rule->action); 2219 kfree(cls_flower.rule); 2220 2221 if (err) { 2222 __fl_put(f); 2223 return err; 2224 } 2225 next_flow: 2226 __fl_put(f); 2227 } 2228 2229 return 0; 2230 } 2231 2232 static void fl_hw_add(struct tcf_proto *tp, void *type_data) 2233 { 2234 struct flow_cls_offload *cls_flower = type_data; 2235 struct cls_fl_filter *f = 2236 (struct cls_fl_filter *) cls_flower->cookie; 2237 struct cls_fl_head *head = fl_head_dereference(tp); 2238 2239 spin_lock(&tp->lock); 2240 list_add(&f->hw_list, &head->hw_filters); 2241 spin_unlock(&tp->lock); 2242 } 2243 2244 static void fl_hw_del(struct tcf_proto *tp, void *type_data) 2245 { 2246 struct flow_cls_offload *cls_flower = type_data; 2247 struct cls_fl_filter *f = 2248 (struct cls_fl_filter *) cls_flower->cookie; 2249 2250 spin_lock(&tp->lock); 2251 if (!list_empty(&f->hw_list)) 2252 list_del_init(&f->hw_list); 2253 spin_unlock(&tp->lock); 2254 } 2255 2256 static int fl_hw_create_tmplt(struct tcf_chain *chain, 2257 struct fl_flow_tmplt *tmplt) 2258 { 2259 struct flow_cls_offload cls_flower = {}; 2260 struct tcf_block *block = chain->block; 2261 2262 cls_flower.rule = flow_rule_alloc(0); 2263 if (!cls_flower.rule) 2264 return -ENOMEM; 2265 2266 cls_flower.common.chain_index = chain->index; 2267 cls_flower.command = FLOW_CLS_TMPLT_CREATE; 2268 cls_flower.cookie = (unsigned long) tmplt; 2269 cls_flower.rule->match.dissector = &tmplt->dissector; 2270 cls_flower.rule->match.mask = &tmplt->mask; 2271 cls_flower.rule->match.key = &tmplt->dummy_key; 2272 2273 /* We don't care if driver (any of them) fails to handle this 2274 * call. It serves just as a hint for it. 2275 */ 2276 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2277 kfree(cls_flower.rule); 2278 2279 return 0; 2280 } 2281 2282 static void fl_hw_destroy_tmplt(struct tcf_chain *chain, 2283 struct fl_flow_tmplt *tmplt) 2284 { 2285 struct flow_cls_offload cls_flower = {}; 2286 struct tcf_block *block = chain->block; 2287 2288 cls_flower.common.chain_index = chain->index; 2289 cls_flower.command = FLOW_CLS_TMPLT_DESTROY; 2290 cls_flower.cookie = (unsigned long) tmplt; 2291 2292 tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false, true); 2293 } 2294 2295 static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, 2296 struct nlattr **tca, 2297 struct netlink_ext_ack *extack) 2298 { 2299 struct fl_flow_tmplt *tmplt; 2300 struct nlattr **tb; 2301 int err; 2302 2303 if (!tca[TCA_OPTIONS]) 2304 return ERR_PTR(-EINVAL); 2305 2306 tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); 2307 if (!tb) 2308 return ERR_PTR(-ENOBUFS); 2309 err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, 2310 tca[TCA_OPTIONS], fl_policy, NULL); 2311 if (err) 2312 goto errout_tb; 2313 2314 tmplt = kzalloc(sizeof(*tmplt), GFP_KERNEL); 2315 if (!tmplt) { 2316 err = -ENOMEM; 2317 goto errout_tb; 2318 } 2319 tmplt->chain = chain; 2320 err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); 2321 if (err) 2322 goto errout_tmplt; 2323 2324 fl_init_dissector(&tmplt->dissector, &tmplt->mask); 2325 2326 err = fl_hw_create_tmplt(chain, tmplt); 2327 if (err) 2328 goto errout_tmplt; 2329 2330 kfree(tb); 2331 return tmplt; 2332 2333 errout_tmplt: 2334 kfree(tmplt); 2335 errout_tb: 2336 kfree(tb); 2337 return ERR_PTR(err); 2338 } 2339 2340 static void fl_tmplt_destroy(void *tmplt_priv) 2341 { 2342 struct fl_flow_tmplt *tmplt = tmplt_priv; 2343 2344 fl_hw_destroy_tmplt(tmplt->chain, tmplt); 2345 kfree(tmplt); 2346 } 2347 2348 static int fl_dump_key_val(struct sk_buff *skb, 2349 void *val, int val_type, 2350 void *mask, int mask_type, int len) 2351 { 2352 int err; 2353 2354 if (!memchr_inv(mask, 0, len)) 2355 return 0; 2356 err = nla_put(skb, val_type, len, val); 2357 if (err) 2358 return err; 2359 if (mask_type != TCA_FLOWER_UNSPEC) { 2360 err = nla_put(skb, mask_type, len, mask); 2361 if (err) 2362 return err; 2363 } 2364 return 0; 2365 } 2366 2367 static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key, 2368 struct fl_flow_key *mask) 2369 { 2370 if (fl_dump_key_val(skb, &key->tp_range.tp_min.dst, 2371 TCA_FLOWER_KEY_PORT_DST_MIN, 2372 &mask->tp_range.tp_min.dst, TCA_FLOWER_UNSPEC, 2373 sizeof(key->tp_range.tp_min.dst)) || 2374 fl_dump_key_val(skb, &key->tp_range.tp_max.dst, 2375 TCA_FLOWER_KEY_PORT_DST_MAX, 2376 &mask->tp_range.tp_max.dst, TCA_FLOWER_UNSPEC, 2377 sizeof(key->tp_range.tp_max.dst)) || 2378 fl_dump_key_val(skb, &key->tp_range.tp_min.src, 2379 TCA_FLOWER_KEY_PORT_SRC_MIN, 2380 &mask->tp_range.tp_min.src, TCA_FLOWER_UNSPEC, 2381 sizeof(key->tp_range.tp_min.src)) || 2382 fl_dump_key_val(skb, &key->tp_range.tp_max.src, 2383 TCA_FLOWER_KEY_PORT_SRC_MAX, 2384 &mask->tp_range.tp_max.src, TCA_FLOWER_UNSPEC, 2385 sizeof(key->tp_range.tp_max.src))) 2386 return -1; 2387 2388 return 0; 2389 } 2390 2391 static int fl_dump_key_mpls_opt_lse(struct sk_buff *skb, 2392 struct flow_dissector_key_mpls *mpls_key, 2393 struct flow_dissector_key_mpls *mpls_mask, 2394 u8 lse_index) 2395 { 2396 struct flow_dissector_mpls_lse *lse_mask = &mpls_mask->ls[lse_index]; 2397 struct flow_dissector_mpls_lse *lse_key = &mpls_key->ls[lse_index]; 2398 int err; 2399 2400 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, 2401 lse_index + 1); 2402 if (err) 2403 return err; 2404 2405 if (lse_mask->mpls_ttl) { 2406 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, 2407 lse_key->mpls_ttl); 2408 if (err) 2409 return err; 2410 } 2411 if (lse_mask->mpls_bos) { 2412 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, 2413 lse_key->mpls_bos); 2414 if (err) 2415 return err; 2416 } 2417 if (lse_mask->mpls_tc) { 2418 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, 2419 lse_key->mpls_tc); 2420 if (err) 2421 return err; 2422 } 2423 if (lse_mask->mpls_label) { 2424 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, 2425 lse_key->mpls_label); 2426 if (err) 2427 return err; 2428 } 2429 2430 return 0; 2431 } 2432 2433 static int fl_dump_key_mpls_opts(struct sk_buff *skb, 2434 struct flow_dissector_key_mpls *mpls_key, 2435 struct flow_dissector_key_mpls *mpls_mask) 2436 { 2437 struct nlattr *opts; 2438 struct nlattr *lse; 2439 u8 lse_index; 2440 int err; 2441 2442 opts = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS); 2443 if (!opts) 2444 return -EMSGSIZE; 2445 2446 for (lse_index = 0; lse_index < FLOW_DIS_MPLS_MAX; lse_index++) { 2447 if (!(mpls_mask->used_lses & 1 << lse_index)) 2448 continue; 2449 2450 lse = nla_nest_start(skb, TCA_FLOWER_KEY_MPLS_OPTS_LSE); 2451 if (!lse) { 2452 err = -EMSGSIZE; 2453 goto err_opts; 2454 } 2455 2456 err = fl_dump_key_mpls_opt_lse(skb, mpls_key, mpls_mask, 2457 lse_index); 2458 if (err) 2459 goto err_opts_lse; 2460 nla_nest_end(skb, lse); 2461 } 2462 nla_nest_end(skb, opts); 2463 2464 return 0; 2465 2466 err_opts_lse: 2467 nla_nest_cancel(skb, lse); 2468 err_opts: 2469 nla_nest_cancel(skb, opts); 2470 2471 return err; 2472 } 2473 2474 static int fl_dump_key_mpls(struct sk_buff *skb, 2475 struct flow_dissector_key_mpls *mpls_key, 2476 struct flow_dissector_key_mpls *mpls_mask) 2477 { 2478 struct flow_dissector_mpls_lse *lse_mask; 2479 struct flow_dissector_mpls_lse *lse_key; 2480 int err; 2481 2482 if (!mpls_mask->used_lses) 2483 return 0; 2484 2485 lse_mask = &mpls_mask->ls[0]; 2486 lse_key = &mpls_key->ls[0]; 2487 2488 /* For backward compatibility, don't use the MPLS nested attributes if 2489 * the rule can be expressed using the old attributes. 2490 */ 2491 if (mpls_mask->used_lses & ~1 || 2492 (!lse_mask->mpls_ttl && !lse_mask->mpls_bos && 2493 !lse_mask->mpls_tc && !lse_mask->mpls_label)) 2494 return fl_dump_key_mpls_opts(skb, mpls_key, mpls_mask); 2495 2496 if (lse_mask->mpls_ttl) { 2497 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TTL, 2498 lse_key->mpls_ttl); 2499 if (err) 2500 return err; 2501 } 2502 if (lse_mask->mpls_tc) { 2503 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_TC, 2504 lse_key->mpls_tc); 2505 if (err) 2506 return err; 2507 } 2508 if (lse_mask->mpls_label) { 2509 err = nla_put_u32(skb, TCA_FLOWER_KEY_MPLS_LABEL, 2510 lse_key->mpls_label); 2511 if (err) 2512 return err; 2513 } 2514 if (lse_mask->mpls_bos) { 2515 err = nla_put_u8(skb, TCA_FLOWER_KEY_MPLS_BOS, 2516 lse_key->mpls_bos); 2517 if (err) 2518 return err; 2519 } 2520 return 0; 2521 } 2522 2523 static int fl_dump_key_ip(struct sk_buff *skb, bool encap, 2524 struct flow_dissector_key_ip *key, 2525 struct flow_dissector_key_ip *mask) 2526 { 2527 int tos_key = encap ? TCA_FLOWER_KEY_ENC_IP_TOS : TCA_FLOWER_KEY_IP_TOS; 2528 int ttl_key = encap ? TCA_FLOWER_KEY_ENC_IP_TTL : TCA_FLOWER_KEY_IP_TTL; 2529 int tos_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TOS_MASK : TCA_FLOWER_KEY_IP_TOS_MASK; 2530 int ttl_mask = encap ? TCA_FLOWER_KEY_ENC_IP_TTL_MASK : TCA_FLOWER_KEY_IP_TTL_MASK; 2531 2532 if (fl_dump_key_val(skb, &key->tos, tos_key, &mask->tos, tos_mask, sizeof(key->tos)) || 2533 fl_dump_key_val(skb, &key->ttl, ttl_key, &mask->ttl, ttl_mask, sizeof(key->ttl))) 2534 return -1; 2535 2536 return 0; 2537 } 2538 2539 static int fl_dump_key_vlan(struct sk_buff *skb, 2540 int vlan_id_key, int vlan_prio_key, 2541 struct flow_dissector_key_vlan *vlan_key, 2542 struct flow_dissector_key_vlan *vlan_mask) 2543 { 2544 int err; 2545 2546 if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) 2547 return 0; 2548 if (vlan_mask->vlan_id) { 2549 err = nla_put_u16(skb, vlan_id_key, 2550 vlan_key->vlan_id); 2551 if (err) 2552 return err; 2553 } 2554 if (vlan_mask->vlan_priority) { 2555 err = nla_put_u8(skb, vlan_prio_key, 2556 vlan_key->vlan_priority); 2557 if (err) 2558 return err; 2559 } 2560 return 0; 2561 } 2562 2563 static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, 2564 u32 *flower_key, u32 *flower_mask, 2565 u32 flower_flag_bit, u32 dissector_flag_bit) 2566 { 2567 if (dissector_mask & dissector_flag_bit) { 2568 *flower_mask |= flower_flag_bit; 2569 if (dissector_key & dissector_flag_bit) 2570 *flower_key |= flower_flag_bit; 2571 } 2572 } 2573 2574 static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) 2575 { 2576 u32 key, mask; 2577 __be32 _key, _mask; 2578 int err; 2579 2580 if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) 2581 return 0; 2582 2583 key = 0; 2584 mask = 0; 2585 2586 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2587 TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, FLOW_DIS_IS_FRAGMENT); 2588 fl_get_key_flag(flags_key, flags_mask, &key, &mask, 2589 TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, 2590 FLOW_DIS_FIRST_FRAG); 2591 2592 _key = cpu_to_be32(key); 2593 _mask = cpu_to_be32(mask); 2594 2595 err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); 2596 if (err) 2597 return err; 2598 2599 return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); 2600 } 2601 2602 static int fl_dump_key_geneve_opt(struct sk_buff *skb, 2603 struct flow_dissector_key_enc_opts *enc_opts) 2604 { 2605 struct geneve_opt *opt; 2606 struct nlattr *nest; 2607 int opt_off = 0; 2608 2609 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_GENEVE); 2610 if (!nest) 2611 goto nla_put_failure; 2612 2613 while (enc_opts->len > opt_off) { 2614 opt = (struct geneve_opt *)&enc_opts->data[opt_off]; 2615 2616 if (nla_put_be16(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, 2617 opt->opt_class)) 2618 goto nla_put_failure; 2619 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, 2620 opt->type)) 2621 goto nla_put_failure; 2622 if (nla_put(skb, TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, 2623 opt->length * 4, opt->opt_data)) 2624 goto nla_put_failure; 2625 2626 opt_off += sizeof(struct geneve_opt) + opt->length * 4; 2627 } 2628 nla_nest_end(skb, nest); 2629 return 0; 2630 2631 nla_put_failure: 2632 nla_nest_cancel(skb, nest); 2633 return -EMSGSIZE; 2634 } 2635 2636 static int fl_dump_key_vxlan_opt(struct sk_buff *skb, 2637 struct flow_dissector_key_enc_opts *enc_opts) 2638 { 2639 struct vxlan_metadata *md; 2640 struct nlattr *nest; 2641 2642 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_VXLAN); 2643 if (!nest) 2644 goto nla_put_failure; 2645 2646 md = (struct vxlan_metadata *)&enc_opts->data[0]; 2647 if (nla_put_u32(skb, TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, md->gbp)) 2648 goto nla_put_failure; 2649 2650 nla_nest_end(skb, nest); 2651 return 0; 2652 2653 nla_put_failure: 2654 nla_nest_cancel(skb, nest); 2655 return -EMSGSIZE; 2656 } 2657 2658 static int fl_dump_key_erspan_opt(struct sk_buff *skb, 2659 struct flow_dissector_key_enc_opts *enc_opts) 2660 { 2661 struct erspan_metadata *md; 2662 struct nlattr *nest; 2663 2664 nest = nla_nest_start_noflag(skb, TCA_FLOWER_KEY_ENC_OPTS_ERSPAN); 2665 if (!nest) 2666 goto nla_put_failure; 2667 2668 md = (struct erspan_metadata *)&enc_opts->data[0]; 2669 if (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, md->version)) 2670 goto nla_put_failure; 2671 2672 if (md->version == 1 && 2673 nla_put_be32(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, md->u.index)) 2674 goto nla_put_failure; 2675 2676 if (md->version == 2 && 2677 (nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, 2678 md->u.md2.dir) || 2679 nla_put_u8(skb, TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, 2680 get_hwid(&md->u.md2)))) 2681 goto nla_put_failure; 2682 2683 nla_nest_end(skb, nest); 2684 return 0; 2685 2686 nla_put_failure: 2687 nla_nest_cancel(skb, nest); 2688 return -EMSGSIZE; 2689 } 2690 2691 static int fl_dump_key_ct(struct sk_buff *skb, 2692 struct flow_dissector_key_ct *key, 2693 struct flow_dissector_key_ct *mask) 2694 { 2695 if (IS_ENABLED(CONFIG_NF_CONNTRACK) && 2696 fl_dump_key_val(skb, &key->ct_state, TCA_FLOWER_KEY_CT_STATE, 2697 &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK, 2698 sizeof(key->ct_state))) 2699 goto nla_put_failure; 2700 2701 if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) && 2702 fl_dump_key_val(skb, &key->ct_zone, TCA_FLOWER_KEY_CT_ZONE, 2703 &mask->ct_zone, TCA_FLOWER_KEY_CT_ZONE_MASK, 2704 sizeof(key->ct_zone))) 2705 goto nla_put_failure; 2706 2707 if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) && 2708 fl_dump_key_val(skb, &key->ct_mark, TCA_FLOWER_KEY_CT_MARK, 2709 &mask->ct_mark, TCA_FLOWER_KEY_CT_MARK_MASK, 2710 sizeof(key->ct_mark))) 2711 goto nla_put_failure; 2712 2713 if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) && 2714 fl_dump_key_val(skb, &key->ct_labels, TCA_FLOWER_KEY_CT_LABELS, 2715 &mask->ct_labels, TCA_FLOWER_KEY_CT_LABELS_MASK, 2716 sizeof(key->ct_labels))) 2717 goto nla_put_failure; 2718 2719 return 0; 2720 2721 nla_put_failure: 2722 return -EMSGSIZE; 2723 } 2724 2725 static int fl_dump_key_options(struct sk_buff *skb, int enc_opt_type, 2726 struct flow_dissector_key_enc_opts *enc_opts) 2727 { 2728 struct nlattr *nest; 2729 int err; 2730 2731 if (!enc_opts->len) 2732 return 0; 2733 2734 nest = nla_nest_start_noflag(skb, enc_opt_type); 2735 if (!nest) 2736 goto nla_put_failure; 2737 2738 switch (enc_opts->dst_opt_type) { 2739 case TUNNEL_GENEVE_OPT: 2740 err = fl_dump_key_geneve_opt(skb, enc_opts); 2741 if (err) 2742 goto nla_put_failure; 2743 break; 2744 case TUNNEL_VXLAN_OPT: 2745 err = fl_dump_key_vxlan_opt(skb, enc_opts); 2746 if (err) 2747 goto nla_put_failure; 2748 break; 2749 case TUNNEL_ERSPAN_OPT: 2750 err = fl_dump_key_erspan_opt(skb, enc_opts); 2751 if (err) 2752 goto nla_put_failure; 2753 break; 2754 default: 2755 goto nla_put_failure; 2756 } 2757 nla_nest_end(skb, nest); 2758 return 0; 2759 2760 nla_put_failure: 2761 nla_nest_cancel(skb, nest); 2762 return -EMSGSIZE; 2763 } 2764 2765 static int fl_dump_key_enc_opt(struct sk_buff *skb, 2766 struct flow_dissector_key_enc_opts *key_opts, 2767 struct flow_dissector_key_enc_opts *msk_opts) 2768 { 2769 int err; 2770 2771 err = fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS, key_opts); 2772 if (err) 2773 return err; 2774 2775 return fl_dump_key_options(skb, TCA_FLOWER_KEY_ENC_OPTS_MASK, msk_opts); 2776 } 2777 2778 static int fl_dump_key(struct sk_buff *skb, struct net *net, 2779 struct fl_flow_key *key, struct fl_flow_key *mask) 2780 { 2781 if (mask->meta.ingress_ifindex) { 2782 struct net_device *dev; 2783 2784 dev = __dev_get_by_index(net, key->meta.ingress_ifindex); 2785 if (dev && nla_put_string(skb, TCA_FLOWER_INDEV, dev->name)) 2786 goto nla_put_failure; 2787 } 2788 2789 if (fl_dump_key_val(skb, key->eth.dst, TCA_FLOWER_KEY_ETH_DST, 2790 mask->eth.dst, TCA_FLOWER_KEY_ETH_DST_MASK, 2791 sizeof(key->eth.dst)) || 2792 fl_dump_key_val(skb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, 2793 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, 2794 sizeof(key->eth.src)) || 2795 fl_dump_key_val(skb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, 2796 &mask->basic.n_proto, TCA_FLOWER_UNSPEC, 2797 sizeof(key->basic.n_proto))) 2798 goto nla_put_failure; 2799 2800 if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) 2801 goto nla_put_failure; 2802 2803 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_VLAN_ID, 2804 TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan, &mask->vlan)) 2805 goto nla_put_failure; 2806 2807 if (fl_dump_key_vlan(skb, TCA_FLOWER_KEY_CVLAN_ID, 2808 TCA_FLOWER_KEY_CVLAN_PRIO, 2809 &key->cvlan, &mask->cvlan) || 2810 (mask->cvlan.vlan_tpid && 2811 nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2812 key->cvlan.vlan_tpid))) 2813 goto nla_put_failure; 2814 2815 if (mask->basic.n_proto) { 2816 if (mask->cvlan.vlan_tpid) { 2817 if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE, 2818 key->basic.n_proto)) 2819 goto nla_put_failure; 2820 } else if (mask->vlan.vlan_tpid) { 2821 if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE, 2822 key->basic.n_proto)) 2823 goto nla_put_failure; 2824 } 2825 } 2826 2827 if ((key->basic.n_proto == htons(ETH_P_IP) || 2828 key->basic.n_proto == htons(ETH_P_IPV6)) && 2829 (fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, 2830 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC, 2831 sizeof(key->basic.ip_proto)) || 2832 fl_dump_key_ip(skb, false, &key->ip, &mask->ip))) 2833 goto nla_put_failure; 2834 2835 if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2836 (fl_dump_key_val(skb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC, 2837 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK, 2838 sizeof(key->ipv4.src)) || 2839 fl_dump_key_val(skb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST, 2840 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK, 2841 sizeof(key->ipv4.dst)))) 2842 goto nla_put_failure; 2843 else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2844 (fl_dump_key_val(skb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC, 2845 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK, 2846 sizeof(key->ipv6.src)) || 2847 fl_dump_key_val(skb, &key->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST, 2848 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK, 2849 sizeof(key->ipv6.dst)))) 2850 goto nla_put_failure; 2851 2852 if (key->basic.ip_proto == IPPROTO_TCP && 2853 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC, 2854 &mask->tp.src, TCA_FLOWER_KEY_TCP_SRC_MASK, 2855 sizeof(key->tp.src)) || 2856 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_TCP_DST, 2857 &mask->tp.dst, TCA_FLOWER_KEY_TCP_DST_MASK, 2858 sizeof(key->tp.dst)) || 2859 fl_dump_key_val(skb, &key->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS, 2860 &mask->tcp.flags, TCA_FLOWER_KEY_TCP_FLAGS_MASK, 2861 sizeof(key->tcp.flags)))) 2862 goto nla_put_failure; 2863 else if (key->basic.ip_proto == IPPROTO_UDP && 2864 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_UDP_SRC, 2865 &mask->tp.src, TCA_FLOWER_KEY_UDP_SRC_MASK, 2866 sizeof(key->tp.src)) || 2867 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_UDP_DST, 2868 &mask->tp.dst, TCA_FLOWER_KEY_UDP_DST_MASK, 2869 sizeof(key->tp.dst)))) 2870 goto nla_put_failure; 2871 else if (key->basic.ip_proto == IPPROTO_SCTP && 2872 (fl_dump_key_val(skb, &key->tp.src, TCA_FLOWER_KEY_SCTP_SRC, 2873 &mask->tp.src, TCA_FLOWER_KEY_SCTP_SRC_MASK, 2874 sizeof(key->tp.src)) || 2875 fl_dump_key_val(skb, &key->tp.dst, TCA_FLOWER_KEY_SCTP_DST, 2876 &mask->tp.dst, TCA_FLOWER_KEY_SCTP_DST_MASK, 2877 sizeof(key->tp.dst)))) 2878 goto nla_put_failure; 2879 else if (key->basic.n_proto == htons(ETH_P_IP) && 2880 key->basic.ip_proto == IPPROTO_ICMP && 2881 (fl_dump_key_val(skb, &key->icmp.type, 2882 TCA_FLOWER_KEY_ICMPV4_TYPE, &mask->icmp.type, 2883 TCA_FLOWER_KEY_ICMPV4_TYPE_MASK, 2884 sizeof(key->icmp.type)) || 2885 fl_dump_key_val(skb, &key->icmp.code, 2886 TCA_FLOWER_KEY_ICMPV4_CODE, &mask->icmp.code, 2887 TCA_FLOWER_KEY_ICMPV4_CODE_MASK, 2888 sizeof(key->icmp.code)))) 2889 goto nla_put_failure; 2890 else if (key->basic.n_proto == htons(ETH_P_IPV6) && 2891 key->basic.ip_proto == IPPROTO_ICMPV6 && 2892 (fl_dump_key_val(skb, &key->icmp.type, 2893 TCA_FLOWER_KEY_ICMPV6_TYPE, &mask->icmp.type, 2894 TCA_FLOWER_KEY_ICMPV6_TYPE_MASK, 2895 sizeof(key->icmp.type)) || 2896 fl_dump_key_val(skb, &key->icmp.code, 2897 TCA_FLOWER_KEY_ICMPV6_CODE, &mask->icmp.code, 2898 TCA_FLOWER_KEY_ICMPV6_CODE_MASK, 2899 sizeof(key->icmp.code)))) 2900 goto nla_put_failure; 2901 else if ((key->basic.n_proto == htons(ETH_P_ARP) || 2902 key->basic.n_proto == htons(ETH_P_RARP)) && 2903 (fl_dump_key_val(skb, &key->arp.sip, 2904 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip, 2905 TCA_FLOWER_KEY_ARP_SIP_MASK, 2906 sizeof(key->arp.sip)) || 2907 fl_dump_key_val(skb, &key->arp.tip, 2908 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip, 2909 TCA_FLOWER_KEY_ARP_TIP_MASK, 2910 sizeof(key->arp.tip)) || 2911 fl_dump_key_val(skb, &key->arp.op, 2912 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op, 2913 TCA_FLOWER_KEY_ARP_OP_MASK, 2914 sizeof(key->arp.op)) || 2915 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA, 2916 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK, 2917 sizeof(key->arp.sha)) || 2918 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA, 2919 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK, 2920 sizeof(key->arp.tha)))) 2921 goto nla_put_failure; 2922 2923 if ((key->basic.ip_proto == IPPROTO_TCP || 2924 key->basic.ip_proto == IPPROTO_UDP || 2925 key->basic.ip_proto == IPPROTO_SCTP) && 2926 fl_dump_key_port_range(skb, key, mask)) 2927 goto nla_put_failure; 2928 2929 if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS && 2930 (fl_dump_key_val(skb, &key->enc_ipv4.src, 2931 TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src, 2932 TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK, 2933 sizeof(key->enc_ipv4.src)) || 2934 fl_dump_key_val(skb, &key->enc_ipv4.dst, 2935 TCA_FLOWER_KEY_ENC_IPV4_DST, &mask->enc_ipv4.dst, 2936 TCA_FLOWER_KEY_ENC_IPV4_DST_MASK, 2937 sizeof(key->enc_ipv4.dst)))) 2938 goto nla_put_failure; 2939 else if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS && 2940 (fl_dump_key_val(skb, &key->enc_ipv6.src, 2941 TCA_FLOWER_KEY_ENC_IPV6_SRC, &mask->enc_ipv6.src, 2942 TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK, 2943 sizeof(key->enc_ipv6.src)) || 2944 fl_dump_key_val(skb, &key->enc_ipv6.dst, 2945 TCA_FLOWER_KEY_ENC_IPV6_DST, 2946 &mask->enc_ipv6.dst, 2947 TCA_FLOWER_KEY_ENC_IPV6_DST_MASK, 2948 sizeof(key->enc_ipv6.dst)))) 2949 goto nla_put_failure; 2950 2951 if (fl_dump_key_val(skb, &key->enc_key_id, TCA_FLOWER_KEY_ENC_KEY_ID, 2952 &mask->enc_key_id, TCA_FLOWER_UNSPEC, 2953 sizeof(key->enc_key_id)) || 2954 fl_dump_key_val(skb, &key->enc_tp.src, 2955 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, 2956 &mask->enc_tp.src, 2957 TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, 2958 sizeof(key->enc_tp.src)) || 2959 fl_dump_key_val(skb, &key->enc_tp.dst, 2960 TCA_FLOWER_KEY_ENC_UDP_DST_PORT, 2961 &mask->enc_tp.dst, 2962 TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, 2963 sizeof(key->enc_tp.dst)) || 2964 fl_dump_key_ip(skb, true, &key->enc_ip, &mask->enc_ip) || 2965 fl_dump_key_enc_opt(skb, &key->enc_opts, &mask->enc_opts)) 2966 goto nla_put_failure; 2967 2968 if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) 2969 goto nla_put_failure; 2970 2971 if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) 2972 goto nla_put_failure; 2973 2974 if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, 2975 &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, 2976 sizeof(key->hash.hash))) 2977 goto nla_put_failure; 2978 2979 return 0; 2980 2981 nla_put_failure: 2982 return -EMSGSIZE; 2983 } 2984 2985 static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, 2986 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 2987 { 2988 struct cls_fl_filter *f = fh; 2989 struct nlattr *nest; 2990 struct fl_flow_key *key, *mask; 2991 bool skip_hw; 2992 2993 if (!f) 2994 return skb->len; 2995 2996 t->tcm_handle = f->handle; 2997 2998 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 2999 if (!nest) 3000 goto nla_put_failure; 3001 3002 spin_lock(&tp->lock); 3003 3004 if (f->res.classid && 3005 nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) 3006 goto nla_put_failure_locked; 3007 3008 key = &f->key; 3009 mask = &f->mask->key; 3010 skip_hw = tc_skip_hw(f->flags); 3011 3012 if (fl_dump_key(skb, net, key, mask)) 3013 goto nla_put_failure_locked; 3014 3015 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3016 goto nla_put_failure_locked; 3017 3018 spin_unlock(&tp->lock); 3019 3020 if (!skip_hw) 3021 fl_hw_update_stats(tp, f, rtnl_held); 3022 3023 if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) 3024 goto nla_put_failure; 3025 3026 if (tcf_exts_dump(skb, &f->exts)) 3027 goto nla_put_failure; 3028 3029 nla_nest_end(skb, nest); 3030 3031 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 3032 goto nla_put_failure; 3033 3034 return skb->len; 3035 3036 nla_put_failure_locked: 3037 spin_unlock(&tp->lock); 3038 nla_put_failure: 3039 nla_nest_cancel(skb, nest); 3040 return -1; 3041 } 3042 3043 static int fl_terse_dump(struct net *net, struct tcf_proto *tp, void *fh, 3044 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) 3045 { 3046 struct cls_fl_filter *f = fh; 3047 struct nlattr *nest; 3048 bool skip_hw; 3049 3050 if (!f) 3051 return skb->len; 3052 3053 t->tcm_handle = f->handle; 3054 3055 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3056 if (!nest) 3057 goto nla_put_failure; 3058 3059 spin_lock(&tp->lock); 3060 3061 skip_hw = tc_skip_hw(f->flags); 3062 3063 if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) 3064 goto nla_put_failure_locked; 3065 3066 spin_unlock(&tp->lock); 3067 3068 if (!skip_hw) 3069 fl_hw_update_stats(tp, f, rtnl_held); 3070 3071 if (tcf_exts_terse_dump(skb, &f->exts)) 3072 goto nla_put_failure; 3073 3074 nla_nest_end(skb, nest); 3075 3076 return skb->len; 3077 3078 nla_put_failure_locked: 3079 spin_unlock(&tp->lock); 3080 nla_put_failure: 3081 nla_nest_cancel(skb, nest); 3082 return -1; 3083 } 3084 3085 static int fl_tmplt_dump(struct sk_buff *skb, struct net *net, void *tmplt_priv) 3086 { 3087 struct fl_flow_tmplt *tmplt = tmplt_priv; 3088 struct fl_flow_key *key, *mask; 3089 struct nlattr *nest; 3090 3091 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 3092 if (!nest) 3093 goto nla_put_failure; 3094 3095 key = &tmplt->dummy_key; 3096 mask = &tmplt->mask; 3097 3098 if (fl_dump_key(skb, net, key, mask)) 3099 goto nla_put_failure; 3100 3101 nla_nest_end(skb, nest); 3102 3103 return skb->len; 3104 3105 nla_put_failure: 3106 nla_nest_cancel(skb, nest); 3107 return -EMSGSIZE; 3108 } 3109 3110 static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, 3111 unsigned long base) 3112 { 3113 struct cls_fl_filter *f = fh; 3114 3115 if (f && f->res.classid == classid) { 3116 if (cl) 3117 __tcf_bind_filter(q, &f->res, base); 3118 else 3119 __tcf_unbind_filter(q, &f->res); 3120 } 3121 } 3122 3123 static bool fl_delete_empty(struct tcf_proto *tp) 3124 { 3125 struct cls_fl_head *head = fl_head_dereference(tp); 3126 3127 spin_lock(&tp->lock); 3128 tp->deleting = idr_is_empty(&head->handle_idr); 3129 spin_unlock(&tp->lock); 3130 3131 return tp->deleting; 3132 } 3133 3134 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 3135 .kind = "flower", 3136 .classify = fl_classify, 3137 .init = fl_init, 3138 .destroy = fl_destroy, 3139 .get = fl_get, 3140 .put = fl_put, 3141 .change = fl_change, 3142 .delete = fl_delete, 3143 .delete_empty = fl_delete_empty, 3144 .walk = fl_walk, 3145 .reoffload = fl_reoffload, 3146 .hw_add = fl_hw_add, 3147 .hw_del = fl_hw_del, 3148 .dump = fl_dump, 3149 .terse_dump = fl_terse_dump, 3150 .bind_class = fl_bind_class, 3151 .tmplt_create = fl_tmplt_create, 3152 .tmplt_destroy = fl_tmplt_destroy, 3153 .tmplt_dump = fl_tmplt_dump, 3154 .owner = THIS_MODULE, 3155 .flags = TCF_PROTO_OPS_DOIT_UNLOCKED, 3156 }; 3157 3158 static int __init cls_fl_init(void) 3159 { 3160 return register_tcf_proto_ops(&cls_fl_ops); 3161 } 3162 3163 static void __exit cls_fl_exit(void) 3164 { 3165 unregister_tcf_proto_ops(&cls_fl_ops); 3166 } 3167 3168 module_init(cls_fl_init); 3169 module_exit(cls_fl_exit); 3170 3171 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); 3172 MODULE_DESCRIPTION("Flower classifier"); 3173 MODULE_LICENSE("GPL v2"); 3174