1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 10 /* TC action not accessible from user space */ 11 #define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1) 12 13 /* Basic packet classifier frontend definitions. */ 14 15 struct tcf_walker { 16 int stop; 17 int skip; 18 int count; 19 unsigned long cookie; 20 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 21 }; 22 23 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 24 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 25 26 enum tcf_block_binder_type { 27 TCF_BLOCK_BINDER_TYPE_UNSPEC, 28 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, 29 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 30 }; 31 32 struct tcf_block_ext_info { 33 enum tcf_block_binder_type binder_type; 34 tcf_chain_head_change_t *chain_head_change; 35 void *chain_head_change_priv; 36 u32 block_index; 37 }; 38 39 struct tcf_block_cb; 40 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 41 42 #ifdef CONFIG_NET_CLS 43 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 44 u32 chain_index); 45 void tcf_chain_put_by_act(struct tcf_chain *chain); 46 void tcf_block_netif_keep_dst(struct tcf_block *block); 47 int tcf_block_get(struct tcf_block **p_block, 48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 49 struct netlink_ext_ack *extack); 50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 51 struct tcf_block_ext_info *ei, 52 struct netlink_ext_ack *extack); 53 void tcf_block_put(struct tcf_block *block); 54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 55 struct tcf_block_ext_info *ei); 56 57 static inline bool tcf_block_shared(struct tcf_block *block) 58 { 59 return block->index; 60 } 61 62 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 63 { 64 WARN_ON(tcf_block_shared(block)); 65 return block->q; 66 } 67 68 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb); 69 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 70 tc_setup_cb_t *cb, void *cb_ident); 71 void tcf_block_cb_incref(struct tcf_block_cb *block_cb); 72 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); 73 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 74 tc_setup_cb_t *cb, void *cb_ident, 75 void *cb_priv, 76 struct netlink_ext_ack *extack); 77 int tcf_block_cb_register(struct tcf_block *block, 78 tc_setup_cb_t *cb, void *cb_ident, 79 void *cb_priv, struct netlink_ext_ack *extack); 80 void __tcf_block_cb_unregister(struct tcf_block *block, 81 struct tcf_block_cb *block_cb); 82 void tcf_block_cb_unregister(struct tcf_block *block, 83 tc_setup_cb_t *cb, void *cb_ident); 84 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 85 tc_indr_block_bind_cb_t *cb, void *cb_ident); 86 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 87 tc_indr_block_bind_cb_t *cb, void *cb_ident); 88 void __tc_indr_block_cb_unregister(struct net_device *dev, 89 tc_indr_block_bind_cb_t *cb, void *cb_ident); 90 void tc_indr_block_cb_unregister(struct net_device *dev, 91 tc_indr_block_bind_cb_t *cb, void *cb_ident); 92 93 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 94 struct tcf_result *res, bool compat_mode); 95 96 #else 97 static inline 98 int tcf_block_get(struct tcf_block **p_block, 99 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 100 struct netlink_ext_ack *extack) 101 { 102 return 0; 103 } 104 105 static inline 106 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 107 struct tcf_block_ext_info *ei, 108 struct netlink_ext_ack *extack) 109 { 110 return 0; 111 } 112 113 static inline void tcf_block_put(struct tcf_block *block) 114 { 115 } 116 117 static inline 118 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 119 struct tcf_block_ext_info *ei) 120 { 121 } 122 123 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 124 { 125 return NULL; 126 } 127 128 static inline 129 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, 130 void *cb_priv) 131 { 132 return 0; 133 } 134 135 static inline 136 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, 137 void *cb_priv) 138 { 139 } 140 141 static inline 142 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb) 143 { 144 return NULL; 145 } 146 147 static inline 148 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 149 tc_setup_cb_t *cb, void *cb_ident) 150 { 151 return NULL; 152 } 153 154 static inline 155 void tcf_block_cb_incref(struct tcf_block_cb *block_cb) 156 { 157 } 158 159 static inline 160 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) 161 { 162 return 0; 163 } 164 165 static inline 166 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 167 tc_setup_cb_t *cb, void *cb_ident, 168 void *cb_priv, 169 struct netlink_ext_ack *extack) 170 { 171 return NULL; 172 } 173 174 static inline 175 int tcf_block_cb_register(struct tcf_block *block, 176 tc_setup_cb_t *cb, void *cb_ident, 177 void *cb_priv, struct netlink_ext_ack *extack) 178 { 179 return 0; 180 } 181 182 static inline 183 void __tcf_block_cb_unregister(struct tcf_block *block, 184 struct tcf_block_cb *block_cb) 185 { 186 } 187 188 static inline 189 void tcf_block_cb_unregister(struct tcf_block *block, 190 tc_setup_cb_t *cb, void *cb_ident) 191 { 192 } 193 194 static inline 195 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 196 tc_indr_block_bind_cb_t *cb, void *cb_ident) 197 { 198 return 0; 199 } 200 201 static inline 202 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 203 tc_indr_block_bind_cb_t *cb, void *cb_ident) 204 { 205 return 0; 206 } 207 208 static inline 209 void __tc_indr_block_cb_unregister(struct net_device *dev, 210 tc_indr_block_bind_cb_t *cb, void *cb_ident) 211 { 212 } 213 214 static inline 215 void tc_indr_block_cb_unregister(struct net_device *dev, 216 tc_indr_block_bind_cb_t *cb, void *cb_ident) 217 { 218 } 219 220 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 221 struct tcf_result *res, bool compat_mode) 222 { 223 return TC_ACT_UNSPEC; 224 } 225 #endif 226 227 static inline unsigned long 228 __cls_set_class(unsigned long *clp, unsigned long cl) 229 { 230 return xchg(clp, cl); 231 } 232 233 static inline unsigned long 234 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl) 235 { 236 unsigned long old_cl; 237 238 sch_tree_lock(q); 239 old_cl = __cls_set_class(clp, cl); 240 sch_tree_unlock(q); 241 return old_cl; 242 } 243 244 static inline void 245 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 246 { 247 struct Qdisc *q = tp->chain->block->q; 248 unsigned long cl; 249 250 /* Check q as it is not set for shared blocks. In that case, 251 * setting class is not supported. 252 */ 253 if (!q) 254 return; 255 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 256 cl = cls_set_class(q, &r->class, cl); 257 if (cl) 258 q->ops->cl_ops->unbind_tcf(q, cl); 259 } 260 261 static inline void 262 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 263 { 264 struct Qdisc *q = tp->chain->block->q; 265 unsigned long cl; 266 267 if (!q) 268 return; 269 if ((cl = __cls_set_class(&r->class, 0)) != 0) 270 q->ops->cl_ops->unbind_tcf(q, cl); 271 } 272 273 struct tcf_exts { 274 #ifdef CONFIG_NET_CLS_ACT 275 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 276 int nr_actions; 277 struct tc_action **actions; 278 struct net *net; 279 #endif 280 /* Map to export classifier specific extension TLV types to the 281 * generic extensions API. Unsupported extensions must be set to 0. 282 */ 283 int action; 284 int police; 285 }; 286 287 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police) 288 { 289 #ifdef CONFIG_NET_CLS_ACT 290 exts->type = 0; 291 exts->nr_actions = 0; 292 exts->net = NULL; 293 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 294 GFP_KERNEL); 295 if (!exts->actions) 296 return -ENOMEM; 297 #endif 298 exts->action = action; 299 exts->police = police; 300 return 0; 301 } 302 303 /* Return false if the netns is being destroyed in cleanup_net(). Callers 304 * need to do cleanup synchronously in this case, otherwise may race with 305 * tc_action_net_exit(). Return true for other cases. 306 */ 307 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 308 { 309 #ifdef CONFIG_NET_CLS_ACT 310 exts->net = maybe_get_net(exts->net); 311 return exts->net != NULL; 312 #else 313 return true; 314 #endif 315 } 316 317 static inline void tcf_exts_put_net(struct tcf_exts *exts) 318 { 319 #ifdef CONFIG_NET_CLS_ACT 320 if (exts->net) 321 put_net(exts->net); 322 #endif 323 } 324 325 #ifdef CONFIG_NET_CLS_ACT 326 #define tcf_exts_for_each_action(i, a, exts) \ 327 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 328 #else 329 #define tcf_exts_for_each_action(i, a, exts) \ 330 for (; 0; (void)(i), (void)(a), (void)(exts)) 331 #endif 332 333 static inline void 334 tcf_exts_stats_update(const struct tcf_exts *exts, 335 u64 bytes, u64 packets, u64 lastuse) 336 { 337 #ifdef CONFIG_NET_CLS_ACT 338 int i; 339 340 preempt_disable(); 341 342 for (i = 0; i < exts->nr_actions; i++) { 343 struct tc_action *a = exts->actions[i]; 344 345 tcf_action_stats_update(a, bytes, packets, lastuse, true); 346 } 347 348 preempt_enable(); 349 #endif 350 } 351 352 /** 353 * tcf_exts_has_actions - check if at least one action is present 354 * @exts: tc filter extensions handle 355 * 356 * Returns true if at least one action is present. 357 */ 358 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 359 { 360 #ifdef CONFIG_NET_CLS_ACT 361 return exts->nr_actions; 362 #else 363 return false; 364 #endif 365 } 366 367 /** 368 * tcf_exts_has_one_action - check if exactly one action is present 369 * @exts: tc filter extensions handle 370 * 371 * Returns true if exactly one action is present. 372 */ 373 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) 374 { 375 #ifdef CONFIG_NET_CLS_ACT 376 return exts->nr_actions == 1; 377 #else 378 return false; 379 #endif 380 } 381 382 static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts) 383 { 384 #ifdef CONFIG_NET_CLS_ACT 385 return exts->actions[0]; 386 #else 387 return NULL; 388 #endif 389 } 390 391 /** 392 * tcf_exts_exec - execute tc filter extensions 393 * @skb: socket buffer 394 * @exts: tc filter extensions handle 395 * @res: desired result 396 * 397 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 398 * a negative number if the filter must be considered unmatched or 399 * a positive action code (TC_ACT_*) which must be returned to the 400 * underlying layer. 401 */ 402 static inline int 403 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 404 struct tcf_result *res) 405 { 406 #ifdef CONFIG_NET_CLS_ACT 407 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 408 #endif 409 return TC_ACT_OK; 410 } 411 412 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 413 struct nlattr **tb, struct nlattr *rate_tlv, 414 struct tcf_exts *exts, bool ovr, 415 struct netlink_ext_ack *extack); 416 void tcf_exts_destroy(struct tcf_exts *exts); 417 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 418 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 419 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 420 421 /** 422 * struct tcf_pkt_info - packet information 423 */ 424 struct tcf_pkt_info { 425 unsigned char * ptr; 426 int nexthdr; 427 }; 428 429 #ifdef CONFIG_NET_EMATCH 430 431 struct tcf_ematch_ops; 432 433 /** 434 * struct tcf_ematch - extended match (ematch) 435 * 436 * @matchid: identifier to allow userspace to reidentify a match 437 * @flags: flags specifying attributes and the relation to other matches 438 * @ops: the operations lookup table of the corresponding ematch module 439 * @datalen: length of the ematch specific configuration data 440 * @data: ematch specific data 441 */ 442 struct tcf_ematch { 443 struct tcf_ematch_ops * ops; 444 unsigned long data; 445 unsigned int datalen; 446 u16 matchid; 447 u16 flags; 448 struct net *net; 449 }; 450 451 static inline int tcf_em_is_container(struct tcf_ematch *em) 452 { 453 return !em->ops; 454 } 455 456 static inline int tcf_em_is_simple(struct tcf_ematch *em) 457 { 458 return em->flags & TCF_EM_SIMPLE; 459 } 460 461 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 462 { 463 return em->flags & TCF_EM_INVERT; 464 } 465 466 static inline int tcf_em_last_match(struct tcf_ematch *em) 467 { 468 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 469 } 470 471 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 472 { 473 if (tcf_em_last_match(em)) 474 return 1; 475 476 if (result == 0 && em->flags & TCF_EM_REL_AND) 477 return 1; 478 479 if (result != 0 && em->flags & TCF_EM_REL_OR) 480 return 1; 481 482 return 0; 483 } 484 485 /** 486 * struct tcf_ematch_tree - ematch tree handle 487 * 488 * @hdr: ematch tree header supplied by userspace 489 * @matches: array of ematches 490 */ 491 struct tcf_ematch_tree { 492 struct tcf_ematch_tree_hdr hdr; 493 struct tcf_ematch * matches; 494 495 }; 496 497 /** 498 * struct tcf_ematch_ops - ematch module operations 499 * 500 * @kind: identifier (kind) of this ematch module 501 * @datalen: length of expected configuration data (optional) 502 * @change: called during validation (optional) 503 * @match: called during ematch tree evaluation, must return 1/0 504 * @destroy: called during destroyage (optional) 505 * @dump: called during dumping process (optional) 506 * @owner: owner, must be set to THIS_MODULE 507 * @link: link to previous/next ematch module (internal use) 508 */ 509 struct tcf_ematch_ops { 510 int kind; 511 int datalen; 512 int (*change)(struct net *net, void *, 513 int, struct tcf_ematch *); 514 int (*match)(struct sk_buff *, struct tcf_ematch *, 515 struct tcf_pkt_info *); 516 void (*destroy)(struct tcf_ematch *); 517 int (*dump)(struct sk_buff *, struct tcf_ematch *); 518 struct module *owner; 519 struct list_head link; 520 }; 521 522 int tcf_em_register(struct tcf_ematch_ops *); 523 void tcf_em_unregister(struct tcf_ematch_ops *); 524 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 525 struct tcf_ematch_tree *); 526 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 527 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 528 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 529 struct tcf_pkt_info *); 530 531 /** 532 * tcf_em_tree_match - evaulate an ematch tree 533 * 534 * @skb: socket buffer of the packet in question 535 * @tree: ematch tree to be used for evaluation 536 * @info: packet information examined by classifier 537 * 538 * This function matches @skb against the ematch tree in @tree by going 539 * through all ematches respecting their logic relations returning 540 * as soon as the result is obvious. 541 * 542 * Returns 1 if the ematch tree as-one matches, no ematches are configured 543 * or ematch is not enabled in the kernel, otherwise 0 is returned. 544 */ 545 static inline int tcf_em_tree_match(struct sk_buff *skb, 546 struct tcf_ematch_tree *tree, 547 struct tcf_pkt_info *info) 548 { 549 if (tree->hdr.nmatches) 550 return __tcf_em_tree_match(skb, tree, info); 551 else 552 return 1; 553 } 554 555 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 556 557 #else /* CONFIG_NET_EMATCH */ 558 559 struct tcf_ematch_tree { 560 }; 561 562 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 563 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 564 #define tcf_em_tree_dump(skb, t, tlv) (0) 565 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 566 567 #endif /* CONFIG_NET_EMATCH */ 568 569 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 570 { 571 switch (layer) { 572 case TCF_LAYER_LINK: 573 return skb_mac_header(skb); 574 case TCF_LAYER_NETWORK: 575 return skb_network_header(skb); 576 case TCF_LAYER_TRANSPORT: 577 return skb_transport_header(skb); 578 } 579 580 return NULL; 581 } 582 583 static inline int tcf_valid_offset(const struct sk_buff *skb, 584 const unsigned char *ptr, const int len) 585 { 586 return likely((ptr + len) <= skb_tail_pointer(skb) && 587 ptr >= skb->head && 588 (ptr <= (ptr + len))); 589 } 590 591 #ifdef CONFIG_NET_CLS_IND 592 #include <net/net_namespace.h> 593 594 static inline int 595 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 596 struct netlink_ext_ack *extack) 597 { 598 char indev[IFNAMSIZ]; 599 struct net_device *dev; 600 601 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { 602 NL_SET_ERR_MSG(extack, "Interface name too long"); 603 return -EINVAL; 604 } 605 dev = __dev_get_by_name(net, indev); 606 if (!dev) 607 return -ENODEV; 608 return dev->ifindex; 609 } 610 611 static inline bool 612 tcf_match_indev(struct sk_buff *skb, int ifindex) 613 { 614 if (!ifindex) 615 return true; 616 if (!skb->skb_iif) 617 return false; 618 return ifindex == skb->skb_iif; 619 } 620 #endif /* CONFIG_NET_CLS_IND */ 621 622 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts, 623 enum tc_setup_type type, void *type_data, bool err_stop); 624 625 enum tc_block_command { 626 TC_BLOCK_BIND, 627 TC_BLOCK_UNBIND, 628 }; 629 630 struct tc_block_offload { 631 enum tc_block_command command; 632 enum tcf_block_binder_type binder_type; 633 struct tcf_block *block; 634 struct netlink_ext_ack *extack; 635 }; 636 637 struct tc_cls_common_offload { 638 u32 chain_index; 639 __be16 protocol; 640 u32 prio; 641 struct netlink_ext_ack *extack; 642 }; 643 644 struct tc_cls_u32_knode { 645 struct tcf_exts *exts; 646 struct tc_u32_sel *sel; 647 u32 handle; 648 u32 val; 649 u32 mask; 650 u32 link_handle; 651 u8 fshift; 652 }; 653 654 struct tc_cls_u32_hnode { 655 u32 handle; 656 u32 prio; 657 unsigned int divisor; 658 }; 659 660 enum tc_clsu32_command { 661 TC_CLSU32_NEW_KNODE, 662 TC_CLSU32_REPLACE_KNODE, 663 TC_CLSU32_DELETE_KNODE, 664 TC_CLSU32_NEW_HNODE, 665 TC_CLSU32_REPLACE_HNODE, 666 TC_CLSU32_DELETE_HNODE, 667 }; 668 669 struct tc_cls_u32_offload { 670 struct tc_cls_common_offload common; 671 /* knode values */ 672 enum tc_clsu32_command command; 673 union { 674 struct tc_cls_u32_knode knode; 675 struct tc_cls_u32_hnode hnode; 676 }; 677 }; 678 679 static inline bool tc_can_offload(const struct net_device *dev) 680 { 681 return dev->features & NETIF_F_HW_TC; 682 } 683 684 static inline bool tc_can_offload_extack(const struct net_device *dev, 685 struct netlink_ext_ack *extack) 686 { 687 bool can = tc_can_offload(dev); 688 689 if (!can) 690 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 691 692 return can; 693 } 694 695 static inline bool 696 tc_cls_can_offload_and_chain0(const struct net_device *dev, 697 struct tc_cls_common_offload *common) 698 { 699 if (!tc_can_offload_extack(dev, common->extack)) 700 return false; 701 if (common->chain_index) { 702 NL_SET_ERR_MSG(common->extack, 703 "Driver supports only offload of chain 0"); 704 return false; 705 } 706 return true; 707 } 708 709 static inline bool tc_skip_hw(u32 flags) 710 { 711 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 712 } 713 714 static inline bool tc_skip_sw(u32 flags) 715 { 716 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 717 } 718 719 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 720 static inline bool tc_flags_valid(u32 flags) 721 { 722 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 723 TCA_CLS_FLAGS_VERBOSE)) 724 return false; 725 726 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 727 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 728 return false; 729 730 return true; 731 } 732 733 static inline bool tc_in_hw(u32 flags) 734 { 735 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 736 } 737 738 static inline void 739 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, 740 const struct tcf_proto *tp, u32 flags, 741 struct netlink_ext_ack *extack) 742 { 743 cls_common->chain_index = tp->chain->index; 744 cls_common->protocol = tp->protocol; 745 cls_common->prio = tp->prio; 746 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 747 cls_common->extack = extack; 748 } 749 750 enum tc_fl_command { 751 TC_CLSFLOWER_REPLACE, 752 TC_CLSFLOWER_DESTROY, 753 TC_CLSFLOWER_STATS, 754 TC_CLSFLOWER_TMPLT_CREATE, 755 TC_CLSFLOWER_TMPLT_DESTROY, 756 }; 757 758 struct tc_cls_flower_offload { 759 struct tc_cls_common_offload common; 760 enum tc_fl_command command; 761 unsigned long cookie; 762 struct flow_dissector *dissector; 763 struct fl_flow_key *mask; 764 struct fl_flow_key *key; 765 struct tcf_exts *exts; 766 u32 classid; 767 }; 768 769 enum tc_matchall_command { 770 TC_CLSMATCHALL_REPLACE, 771 TC_CLSMATCHALL_DESTROY, 772 }; 773 774 struct tc_cls_matchall_offload { 775 struct tc_cls_common_offload common; 776 enum tc_matchall_command command; 777 struct tcf_exts *exts; 778 unsigned long cookie; 779 }; 780 781 enum tc_clsbpf_command { 782 TC_CLSBPF_OFFLOAD, 783 TC_CLSBPF_STATS, 784 }; 785 786 struct tc_cls_bpf_offload { 787 struct tc_cls_common_offload common; 788 enum tc_clsbpf_command command; 789 struct tcf_exts *exts; 790 struct bpf_prog *prog; 791 struct bpf_prog *oldprog; 792 const char *name; 793 bool exts_integrated; 794 }; 795 796 struct tc_mqprio_qopt_offload { 797 /* struct tc_mqprio_qopt must always be the first element */ 798 struct tc_mqprio_qopt qopt; 799 u16 mode; 800 u16 shaper; 801 u32 flags; 802 u64 min_rate[TC_QOPT_MAX_QUEUE]; 803 u64 max_rate[TC_QOPT_MAX_QUEUE]; 804 }; 805 806 /* This structure holds cookie structure that is passed from user 807 * to the kernel for actions and classifiers 808 */ 809 struct tc_cookie { 810 u8 *data; 811 u32 len; 812 struct rcu_head rcu; 813 }; 814 815 struct tc_qopt_offload_stats { 816 struct gnet_stats_basic_packed *bstats; 817 struct gnet_stats_queue *qstats; 818 }; 819 820 enum tc_mq_command { 821 TC_MQ_CREATE, 822 TC_MQ_DESTROY, 823 TC_MQ_STATS, 824 }; 825 826 struct tc_mq_qopt_offload { 827 enum tc_mq_command command; 828 u32 handle; 829 struct tc_qopt_offload_stats stats; 830 }; 831 832 enum tc_red_command { 833 TC_RED_REPLACE, 834 TC_RED_DESTROY, 835 TC_RED_STATS, 836 TC_RED_XSTATS, 837 }; 838 839 struct tc_red_qopt_offload_params { 840 u32 min; 841 u32 max; 842 u32 probability; 843 bool is_ecn; 844 bool is_harddrop; 845 struct gnet_stats_queue *qstats; 846 }; 847 848 struct tc_red_qopt_offload { 849 enum tc_red_command command; 850 u32 handle; 851 u32 parent; 852 union { 853 struct tc_red_qopt_offload_params set; 854 struct tc_qopt_offload_stats stats; 855 struct red_stats *xstats; 856 }; 857 }; 858 859 enum tc_prio_command { 860 TC_PRIO_REPLACE, 861 TC_PRIO_DESTROY, 862 TC_PRIO_STATS, 863 TC_PRIO_GRAFT, 864 }; 865 866 struct tc_prio_qopt_offload_params { 867 int bands; 868 u8 priomap[TC_PRIO_MAX + 1]; 869 /* In case that a prio qdisc is offloaded and now is changed to a 870 * non-offloadedable config, it needs to update the backlog & qlen 871 * values to negate the HW backlog & qlen values (and only them). 872 */ 873 struct gnet_stats_queue *qstats; 874 }; 875 876 struct tc_prio_qopt_offload_graft_params { 877 u8 band; 878 u32 child_handle; 879 }; 880 881 struct tc_prio_qopt_offload { 882 enum tc_prio_command command; 883 u32 handle; 884 u32 parent; 885 union { 886 struct tc_prio_qopt_offload_params replace_params; 887 struct tc_qopt_offload_stats stats; 888 struct tc_prio_qopt_offload_graft_params graft_params; 889 }; 890 }; 891 892 enum tc_root_command { 893 TC_ROOT_GRAFT, 894 }; 895 896 struct tc_root_qopt_offload { 897 enum tc_root_command command; 898 u32 handle; 899 bool ingress; 900 }; 901 902 #endif 903