1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 10 /* TC action not accessible from user space */ 11 #define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1) 12 13 /* Basic packet classifier frontend definitions. */ 14 15 struct tcf_walker { 16 int stop; 17 int skip; 18 int count; 19 unsigned long cookie; 20 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 21 }; 22 23 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 24 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 25 26 enum tcf_block_binder_type { 27 TCF_BLOCK_BINDER_TYPE_UNSPEC, 28 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, 29 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 30 }; 31 32 struct tcf_block_ext_info { 33 enum tcf_block_binder_type binder_type; 34 tcf_chain_head_change_t *chain_head_change; 35 void *chain_head_change_priv; 36 u32 block_index; 37 }; 38 39 struct tcf_block_cb; 40 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 41 42 #ifdef CONFIG_NET_CLS 43 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 44 u32 chain_index); 45 void tcf_chain_put_by_act(struct tcf_chain *chain); 46 void tcf_block_netif_keep_dst(struct tcf_block *block); 47 int tcf_block_get(struct tcf_block **p_block, 48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 49 struct netlink_ext_ack *extack); 50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 51 struct tcf_block_ext_info *ei, 52 struct netlink_ext_ack *extack); 53 void tcf_block_put(struct tcf_block *block); 54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 55 struct tcf_block_ext_info *ei); 56 57 static inline bool tcf_block_shared(struct tcf_block *block) 58 { 59 return block->index; 60 } 61 62 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 63 { 64 WARN_ON(tcf_block_shared(block)); 65 return block->q; 66 } 67 68 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb); 69 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 70 tc_setup_cb_t *cb, void *cb_ident); 71 void tcf_block_cb_incref(struct tcf_block_cb *block_cb); 72 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); 73 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 74 tc_setup_cb_t *cb, void *cb_ident, 75 void *cb_priv, 76 struct netlink_ext_ack *extack); 77 int tcf_block_cb_register(struct tcf_block *block, 78 tc_setup_cb_t *cb, void *cb_ident, 79 void *cb_priv, struct netlink_ext_ack *extack); 80 void __tcf_block_cb_unregister(struct tcf_block *block, 81 struct tcf_block_cb *block_cb); 82 void tcf_block_cb_unregister(struct tcf_block *block, 83 tc_setup_cb_t *cb, void *cb_ident); 84 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 85 tc_indr_block_bind_cb_t *cb, void *cb_ident); 86 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 87 tc_indr_block_bind_cb_t *cb, void *cb_ident); 88 void __tc_indr_block_cb_unregister(struct net_device *dev, 89 tc_indr_block_bind_cb_t *cb, void *cb_ident); 90 void tc_indr_block_cb_unregister(struct net_device *dev, 91 tc_indr_block_bind_cb_t *cb, void *cb_ident); 92 93 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 94 struct tcf_result *res, bool compat_mode); 95 96 #else 97 static inline 98 int tcf_block_get(struct tcf_block **p_block, 99 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 100 struct netlink_ext_ack *extack) 101 { 102 return 0; 103 } 104 105 static inline 106 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 107 struct tcf_block_ext_info *ei, 108 struct netlink_ext_ack *extack) 109 { 110 return 0; 111 } 112 113 static inline void tcf_block_put(struct tcf_block *block) 114 { 115 } 116 117 static inline 118 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 119 struct tcf_block_ext_info *ei) 120 { 121 } 122 123 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 124 { 125 return NULL; 126 } 127 128 static inline 129 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, 130 void *cb_priv) 131 { 132 return 0; 133 } 134 135 static inline 136 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, 137 void *cb_priv) 138 { 139 } 140 141 static inline 142 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb) 143 { 144 return NULL; 145 } 146 147 static inline 148 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 149 tc_setup_cb_t *cb, void *cb_ident) 150 { 151 return NULL; 152 } 153 154 static inline 155 void tcf_block_cb_incref(struct tcf_block_cb *block_cb) 156 { 157 } 158 159 static inline 160 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) 161 { 162 return 0; 163 } 164 165 static inline 166 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 167 tc_setup_cb_t *cb, void *cb_ident, 168 void *cb_priv, 169 struct netlink_ext_ack *extack) 170 { 171 return NULL; 172 } 173 174 static inline 175 int tcf_block_cb_register(struct tcf_block *block, 176 tc_setup_cb_t *cb, void *cb_ident, 177 void *cb_priv, struct netlink_ext_ack *extack) 178 { 179 return 0; 180 } 181 182 static inline 183 void __tcf_block_cb_unregister(struct tcf_block *block, 184 struct tcf_block_cb *block_cb) 185 { 186 } 187 188 static inline 189 void tcf_block_cb_unregister(struct tcf_block *block, 190 tc_setup_cb_t *cb, void *cb_ident) 191 { 192 } 193 194 static inline 195 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 196 tc_indr_block_bind_cb_t *cb, void *cb_ident) 197 { 198 return 0; 199 } 200 201 static inline 202 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 203 tc_indr_block_bind_cb_t *cb, void *cb_ident) 204 { 205 return 0; 206 } 207 208 static inline 209 void __tc_indr_block_cb_unregister(struct net_device *dev, 210 tc_indr_block_bind_cb_t *cb, void *cb_ident) 211 { 212 } 213 214 static inline 215 void tc_indr_block_cb_unregister(struct net_device *dev, 216 tc_indr_block_bind_cb_t *cb, void *cb_ident) 217 { 218 } 219 220 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 221 struct tcf_result *res, bool compat_mode) 222 { 223 return TC_ACT_UNSPEC; 224 } 225 #endif 226 227 static inline unsigned long 228 __cls_set_class(unsigned long *clp, unsigned long cl) 229 { 230 return xchg(clp, cl); 231 } 232 233 static inline unsigned long 234 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl) 235 { 236 unsigned long old_cl; 237 238 sch_tree_lock(q); 239 old_cl = __cls_set_class(clp, cl); 240 sch_tree_unlock(q); 241 return old_cl; 242 } 243 244 static inline void 245 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 246 { 247 struct Qdisc *q = tp->chain->block->q; 248 unsigned long cl; 249 250 /* Check q as it is not set for shared blocks. In that case, 251 * setting class is not supported. 252 */ 253 if (!q) 254 return; 255 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 256 cl = cls_set_class(q, &r->class, cl); 257 if (cl) 258 q->ops->cl_ops->unbind_tcf(q, cl); 259 } 260 261 static inline void 262 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 263 { 264 struct Qdisc *q = tp->chain->block->q; 265 unsigned long cl; 266 267 if (!q) 268 return; 269 if ((cl = __cls_set_class(&r->class, 0)) != 0) 270 q->ops->cl_ops->unbind_tcf(q, cl); 271 } 272 273 struct tcf_exts { 274 #ifdef CONFIG_NET_CLS_ACT 275 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 276 int nr_actions; 277 struct tc_action **actions; 278 struct net *net; 279 #endif 280 /* Map to export classifier specific extension TLV types to the 281 * generic extensions API. Unsupported extensions must be set to 0. 282 */ 283 int action; 284 int police; 285 }; 286 287 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police) 288 { 289 #ifdef CONFIG_NET_CLS_ACT 290 exts->type = 0; 291 exts->nr_actions = 0; 292 exts->net = NULL; 293 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 294 GFP_KERNEL); 295 if (!exts->actions) 296 return -ENOMEM; 297 #endif 298 exts->action = action; 299 exts->police = police; 300 return 0; 301 } 302 303 /* Return false if the netns is being destroyed in cleanup_net(). Callers 304 * need to do cleanup synchronously in this case, otherwise may race with 305 * tc_action_net_exit(). Return true for other cases. 306 */ 307 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 308 { 309 #ifdef CONFIG_NET_CLS_ACT 310 exts->net = maybe_get_net(exts->net); 311 return exts->net != NULL; 312 #else 313 return true; 314 #endif 315 } 316 317 static inline void tcf_exts_put_net(struct tcf_exts *exts) 318 { 319 #ifdef CONFIG_NET_CLS_ACT 320 if (exts->net) 321 put_net(exts->net); 322 #endif 323 } 324 325 #ifdef CONFIG_NET_CLS_ACT 326 #define tcf_exts_for_each_action(i, a, exts) \ 327 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 328 #else 329 #define tcf_exts_for_each_action(i, a, exts) \ 330 for (; 0; (void)(i), (void)(a), (void)(exts)) 331 #endif 332 333 static inline void 334 tcf_exts_stats_update(const struct tcf_exts *exts, 335 u64 bytes, u64 packets, u64 lastuse) 336 { 337 #ifdef CONFIG_NET_CLS_ACT 338 int i; 339 340 preempt_disable(); 341 342 for (i = 0; i < exts->nr_actions; i++) { 343 struct tc_action *a = exts->actions[i]; 344 345 tcf_action_stats_update(a, bytes, packets, lastuse, true); 346 } 347 348 preempt_enable(); 349 #endif 350 } 351 352 /** 353 * tcf_exts_has_actions - check if at least one action is present 354 * @exts: tc filter extensions handle 355 * 356 * Returns true if at least one action is present. 357 */ 358 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 359 { 360 #ifdef CONFIG_NET_CLS_ACT 361 return exts->nr_actions; 362 #else 363 return false; 364 #endif 365 } 366 367 /** 368 * tcf_exts_has_one_action - check if exactly one action is present 369 * @exts: tc filter extensions handle 370 * 371 * Returns true if exactly one action is present. 372 */ 373 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) 374 { 375 #ifdef CONFIG_NET_CLS_ACT 376 return exts->nr_actions == 1; 377 #else 378 return false; 379 #endif 380 } 381 382 static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts) 383 { 384 #ifdef CONFIG_NET_CLS_ACT 385 return exts->actions[0]; 386 #else 387 return NULL; 388 #endif 389 } 390 391 /** 392 * tcf_exts_exec - execute tc filter extensions 393 * @skb: socket buffer 394 * @exts: tc filter extensions handle 395 * @res: desired result 396 * 397 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 398 * a negative number if the filter must be considered unmatched or 399 * a positive action code (TC_ACT_*) which must be returned to the 400 * underlying layer. 401 */ 402 static inline int 403 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 404 struct tcf_result *res) 405 { 406 #ifdef CONFIG_NET_CLS_ACT 407 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 408 #endif 409 return TC_ACT_OK; 410 } 411 412 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 413 struct nlattr **tb, struct nlattr *rate_tlv, 414 struct tcf_exts *exts, bool ovr, 415 struct netlink_ext_ack *extack); 416 void tcf_exts_destroy(struct tcf_exts *exts); 417 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 418 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 419 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 420 421 /** 422 * struct tcf_pkt_info - packet information 423 */ 424 struct tcf_pkt_info { 425 unsigned char * ptr; 426 int nexthdr; 427 }; 428 429 #ifdef CONFIG_NET_EMATCH 430 431 struct tcf_ematch_ops; 432 433 /** 434 * struct tcf_ematch - extended match (ematch) 435 * 436 * @matchid: identifier to allow userspace to reidentify a match 437 * @flags: flags specifying attributes and the relation to other matches 438 * @ops: the operations lookup table of the corresponding ematch module 439 * @datalen: length of the ematch specific configuration data 440 * @data: ematch specific data 441 */ 442 struct tcf_ematch { 443 struct tcf_ematch_ops * ops; 444 unsigned long data; 445 unsigned int datalen; 446 u16 matchid; 447 u16 flags; 448 struct net *net; 449 }; 450 451 static inline int tcf_em_is_container(struct tcf_ematch *em) 452 { 453 return !em->ops; 454 } 455 456 static inline int tcf_em_is_simple(struct tcf_ematch *em) 457 { 458 return em->flags & TCF_EM_SIMPLE; 459 } 460 461 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 462 { 463 return em->flags & TCF_EM_INVERT; 464 } 465 466 static inline int tcf_em_last_match(struct tcf_ematch *em) 467 { 468 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 469 } 470 471 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 472 { 473 if (tcf_em_last_match(em)) 474 return 1; 475 476 if (result == 0 && em->flags & TCF_EM_REL_AND) 477 return 1; 478 479 if (result != 0 && em->flags & TCF_EM_REL_OR) 480 return 1; 481 482 return 0; 483 } 484 485 /** 486 * struct tcf_ematch_tree - ematch tree handle 487 * 488 * @hdr: ematch tree header supplied by userspace 489 * @matches: array of ematches 490 */ 491 struct tcf_ematch_tree { 492 struct tcf_ematch_tree_hdr hdr; 493 struct tcf_ematch * matches; 494 495 }; 496 497 /** 498 * struct tcf_ematch_ops - ematch module operations 499 * 500 * @kind: identifier (kind) of this ematch module 501 * @datalen: length of expected configuration data (optional) 502 * @change: called during validation (optional) 503 * @match: called during ematch tree evaluation, must return 1/0 504 * @destroy: called during destroyage (optional) 505 * @dump: called during dumping process (optional) 506 * @owner: owner, must be set to THIS_MODULE 507 * @link: link to previous/next ematch module (internal use) 508 */ 509 struct tcf_ematch_ops { 510 int kind; 511 int datalen; 512 int (*change)(struct net *net, void *, 513 int, struct tcf_ematch *); 514 int (*match)(struct sk_buff *, struct tcf_ematch *, 515 struct tcf_pkt_info *); 516 void (*destroy)(struct tcf_ematch *); 517 int (*dump)(struct sk_buff *, struct tcf_ematch *); 518 struct module *owner; 519 struct list_head link; 520 }; 521 522 int tcf_em_register(struct tcf_ematch_ops *); 523 void tcf_em_unregister(struct tcf_ematch_ops *); 524 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 525 struct tcf_ematch_tree *); 526 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 527 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 528 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 529 struct tcf_pkt_info *); 530 531 /** 532 * tcf_em_tree_match - evaulate an ematch tree 533 * 534 * @skb: socket buffer of the packet in question 535 * @tree: ematch tree to be used for evaluation 536 * @info: packet information examined by classifier 537 * 538 * This function matches @skb against the ematch tree in @tree by going 539 * through all ematches respecting their logic relations returning 540 * as soon as the result is obvious. 541 * 542 * Returns 1 if the ematch tree as-one matches, no ematches are configured 543 * or ematch is not enabled in the kernel, otherwise 0 is returned. 544 */ 545 static inline int tcf_em_tree_match(struct sk_buff *skb, 546 struct tcf_ematch_tree *tree, 547 struct tcf_pkt_info *info) 548 { 549 if (tree->hdr.nmatches) 550 return __tcf_em_tree_match(skb, tree, info); 551 else 552 return 1; 553 } 554 555 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 556 557 #else /* CONFIG_NET_EMATCH */ 558 559 struct tcf_ematch_tree { 560 }; 561 562 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 563 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 564 #define tcf_em_tree_dump(skb, t, tlv) (0) 565 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 566 567 #endif /* CONFIG_NET_EMATCH */ 568 569 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 570 { 571 switch (layer) { 572 case TCF_LAYER_LINK: 573 return skb_mac_header(skb); 574 case TCF_LAYER_NETWORK: 575 return skb_network_header(skb); 576 case TCF_LAYER_TRANSPORT: 577 return skb_transport_header(skb); 578 } 579 580 return NULL; 581 } 582 583 static inline int tcf_valid_offset(const struct sk_buff *skb, 584 const unsigned char *ptr, const int len) 585 { 586 return likely((ptr + len) <= skb_tail_pointer(skb) && 587 ptr >= skb->head && 588 (ptr <= (ptr + len))); 589 } 590 591 #ifdef CONFIG_NET_CLS_IND 592 #include <net/net_namespace.h> 593 594 static inline int 595 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 596 struct netlink_ext_ack *extack) 597 { 598 char indev[IFNAMSIZ]; 599 struct net_device *dev; 600 601 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { 602 NL_SET_ERR_MSG(extack, "Interface name too long"); 603 return -EINVAL; 604 } 605 dev = __dev_get_by_name(net, indev); 606 if (!dev) 607 return -ENODEV; 608 return dev->ifindex; 609 } 610 611 static inline bool 612 tcf_match_indev(struct sk_buff *skb, int ifindex) 613 { 614 if (!ifindex) 615 return true; 616 if (!skb->skb_iif) 617 return false; 618 return ifindex == skb->skb_iif; 619 } 620 #endif /* CONFIG_NET_CLS_IND */ 621 622 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 623 void *type_data, bool err_stop); 624 625 enum tc_block_command { 626 TC_BLOCK_BIND, 627 TC_BLOCK_UNBIND, 628 }; 629 630 struct tc_block_offload { 631 enum tc_block_command command; 632 enum tcf_block_binder_type binder_type; 633 struct tcf_block *block; 634 struct netlink_ext_ack *extack; 635 }; 636 637 struct tc_cls_common_offload { 638 u32 chain_index; 639 __be16 protocol; 640 u32 prio; 641 struct netlink_ext_ack *extack; 642 }; 643 644 struct tc_cls_u32_knode { 645 struct tcf_exts *exts; 646 struct tcf_result *res; 647 struct tc_u32_sel *sel; 648 u32 handle; 649 u32 val; 650 u32 mask; 651 u32 link_handle; 652 u8 fshift; 653 }; 654 655 struct tc_cls_u32_hnode { 656 u32 handle; 657 u32 prio; 658 unsigned int divisor; 659 }; 660 661 enum tc_clsu32_command { 662 TC_CLSU32_NEW_KNODE, 663 TC_CLSU32_REPLACE_KNODE, 664 TC_CLSU32_DELETE_KNODE, 665 TC_CLSU32_NEW_HNODE, 666 TC_CLSU32_REPLACE_HNODE, 667 TC_CLSU32_DELETE_HNODE, 668 }; 669 670 struct tc_cls_u32_offload { 671 struct tc_cls_common_offload common; 672 /* knode values */ 673 enum tc_clsu32_command command; 674 union { 675 struct tc_cls_u32_knode knode; 676 struct tc_cls_u32_hnode hnode; 677 }; 678 }; 679 680 static inline bool tc_can_offload(const struct net_device *dev) 681 { 682 return dev->features & NETIF_F_HW_TC; 683 } 684 685 static inline bool tc_can_offload_extack(const struct net_device *dev, 686 struct netlink_ext_ack *extack) 687 { 688 bool can = tc_can_offload(dev); 689 690 if (!can) 691 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 692 693 return can; 694 } 695 696 static inline bool 697 tc_cls_can_offload_and_chain0(const struct net_device *dev, 698 struct tc_cls_common_offload *common) 699 { 700 if (!tc_can_offload_extack(dev, common->extack)) 701 return false; 702 if (common->chain_index) { 703 NL_SET_ERR_MSG(common->extack, 704 "Driver supports only offload of chain 0"); 705 return false; 706 } 707 return true; 708 } 709 710 static inline bool tc_skip_hw(u32 flags) 711 { 712 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 713 } 714 715 static inline bool tc_skip_sw(u32 flags) 716 { 717 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 718 } 719 720 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 721 static inline bool tc_flags_valid(u32 flags) 722 { 723 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 724 TCA_CLS_FLAGS_VERBOSE)) 725 return false; 726 727 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 728 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 729 return false; 730 731 return true; 732 } 733 734 static inline bool tc_in_hw(u32 flags) 735 { 736 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 737 } 738 739 static inline void 740 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, 741 const struct tcf_proto *tp, u32 flags, 742 struct netlink_ext_ack *extack) 743 { 744 cls_common->chain_index = tp->chain->index; 745 cls_common->protocol = tp->protocol; 746 cls_common->prio = tp->prio; 747 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 748 cls_common->extack = extack; 749 } 750 751 enum tc_fl_command { 752 TC_CLSFLOWER_REPLACE, 753 TC_CLSFLOWER_DESTROY, 754 TC_CLSFLOWER_STATS, 755 TC_CLSFLOWER_TMPLT_CREATE, 756 TC_CLSFLOWER_TMPLT_DESTROY, 757 }; 758 759 struct tc_cls_flower_offload { 760 struct tc_cls_common_offload common; 761 enum tc_fl_command command; 762 unsigned long cookie; 763 struct flow_dissector *dissector; 764 struct fl_flow_key *mask; 765 struct fl_flow_key *key; 766 struct tcf_exts *exts; 767 u32 classid; 768 }; 769 770 enum tc_matchall_command { 771 TC_CLSMATCHALL_REPLACE, 772 TC_CLSMATCHALL_DESTROY, 773 }; 774 775 struct tc_cls_matchall_offload { 776 struct tc_cls_common_offload common; 777 enum tc_matchall_command command; 778 struct tcf_exts *exts; 779 unsigned long cookie; 780 }; 781 782 enum tc_clsbpf_command { 783 TC_CLSBPF_OFFLOAD, 784 TC_CLSBPF_STATS, 785 }; 786 787 struct tc_cls_bpf_offload { 788 struct tc_cls_common_offload common; 789 enum tc_clsbpf_command command; 790 struct tcf_exts *exts; 791 struct bpf_prog *prog; 792 struct bpf_prog *oldprog; 793 const char *name; 794 bool exts_integrated; 795 }; 796 797 struct tc_mqprio_qopt_offload { 798 /* struct tc_mqprio_qopt must always be the first element */ 799 struct tc_mqprio_qopt qopt; 800 u16 mode; 801 u16 shaper; 802 u32 flags; 803 u64 min_rate[TC_QOPT_MAX_QUEUE]; 804 u64 max_rate[TC_QOPT_MAX_QUEUE]; 805 }; 806 807 /* This structure holds cookie structure that is passed from user 808 * to the kernel for actions and classifiers 809 */ 810 struct tc_cookie { 811 u8 *data; 812 u32 len; 813 struct rcu_head rcu; 814 }; 815 816 struct tc_qopt_offload_stats { 817 struct gnet_stats_basic_packed *bstats; 818 struct gnet_stats_queue *qstats; 819 }; 820 821 enum tc_mq_command { 822 TC_MQ_CREATE, 823 TC_MQ_DESTROY, 824 TC_MQ_STATS, 825 TC_MQ_GRAFT, 826 }; 827 828 struct tc_mq_opt_offload_graft_params { 829 unsigned long queue; 830 u32 child_handle; 831 }; 832 833 struct tc_mq_qopt_offload { 834 enum tc_mq_command command; 835 u32 handle; 836 union { 837 struct tc_qopt_offload_stats stats; 838 struct tc_mq_opt_offload_graft_params graft_params; 839 }; 840 }; 841 842 enum tc_red_command { 843 TC_RED_REPLACE, 844 TC_RED_DESTROY, 845 TC_RED_STATS, 846 TC_RED_XSTATS, 847 TC_RED_GRAFT, 848 }; 849 850 struct tc_red_qopt_offload_params { 851 u32 min; 852 u32 max; 853 u32 probability; 854 u32 limit; 855 bool is_ecn; 856 bool is_harddrop; 857 struct gnet_stats_queue *qstats; 858 }; 859 860 struct tc_red_qopt_offload { 861 enum tc_red_command command; 862 u32 handle; 863 u32 parent; 864 union { 865 struct tc_red_qopt_offload_params set; 866 struct tc_qopt_offload_stats stats; 867 struct red_stats *xstats; 868 u32 child_handle; 869 }; 870 }; 871 872 enum tc_gred_command { 873 TC_GRED_REPLACE, 874 TC_GRED_DESTROY, 875 TC_GRED_STATS, 876 }; 877 878 struct tc_gred_vq_qopt_offload_params { 879 bool present; 880 u32 limit; 881 u32 prio; 882 u32 min; 883 u32 max; 884 bool is_ecn; 885 bool is_harddrop; 886 u32 probability; 887 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 888 u32 *backlog; 889 }; 890 891 struct tc_gred_qopt_offload_params { 892 bool grio_on; 893 bool wred_on; 894 unsigned int dp_cnt; 895 unsigned int dp_def; 896 struct gnet_stats_queue *qstats; 897 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 898 }; 899 900 struct tc_gred_qopt_offload_stats { 901 struct gnet_stats_basic_packed bstats[MAX_DPs]; 902 struct gnet_stats_queue qstats[MAX_DPs]; 903 struct red_stats *xstats[MAX_DPs]; 904 }; 905 906 struct tc_gred_qopt_offload { 907 enum tc_gred_command command; 908 u32 handle; 909 u32 parent; 910 union { 911 struct tc_gred_qopt_offload_params set; 912 struct tc_gred_qopt_offload_stats stats; 913 }; 914 }; 915 916 enum tc_prio_command { 917 TC_PRIO_REPLACE, 918 TC_PRIO_DESTROY, 919 TC_PRIO_STATS, 920 TC_PRIO_GRAFT, 921 }; 922 923 struct tc_prio_qopt_offload_params { 924 int bands; 925 u8 priomap[TC_PRIO_MAX + 1]; 926 /* In case that a prio qdisc is offloaded and now is changed to a 927 * non-offloadedable config, it needs to update the backlog & qlen 928 * values to negate the HW backlog & qlen values (and only them). 929 */ 930 struct gnet_stats_queue *qstats; 931 }; 932 933 struct tc_prio_qopt_offload_graft_params { 934 u8 band; 935 u32 child_handle; 936 }; 937 938 struct tc_prio_qopt_offload { 939 enum tc_prio_command command; 940 u32 handle; 941 u32 parent; 942 union { 943 struct tc_prio_qopt_offload_params replace_params; 944 struct tc_qopt_offload_stats stats; 945 struct tc_prio_qopt_offload_graft_params graft_params; 946 }; 947 }; 948 949 enum tc_root_command { 950 TC_ROOT_GRAFT, 951 }; 952 953 struct tc_root_qopt_offload { 954 enum tc_root_command command; 955 u32 handle; 956 bool ingress; 957 }; 958 959 #endif 960