1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 #include <net/net_namespace.h> 10 11 /* TC action not accessible from user space */ 12 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) 13 14 /* Basic packet classifier frontend definitions. */ 15 16 struct tcf_walker { 17 int stop; 18 int skip; 19 int count; 20 bool nonempty; 21 unsigned long cookie; 22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 23 }; 24 25 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 27 28 struct tcf_block_ext_info { 29 enum flow_block_binder_type binder_type; 30 tcf_chain_head_change_t *chain_head_change; 31 void *chain_head_change_priv; 32 u32 block_index; 33 }; 34 35 struct tcf_block_cb; 36 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 37 38 #ifdef CONFIG_NET_CLS 39 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 40 u32 chain_index); 41 void tcf_chain_put_by_act(struct tcf_chain *chain); 42 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, 43 struct tcf_chain *chain); 44 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, 45 struct tcf_proto *tp, bool rtnl_held); 46 void tcf_block_netif_keep_dst(struct tcf_block *block); 47 int tcf_block_get(struct tcf_block **p_block, 48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 49 struct netlink_ext_ack *extack); 50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 51 struct tcf_block_ext_info *ei, 52 struct netlink_ext_ack *extack); 53 void tcf_block_put(struct tcf_block *block); 54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 55 struct tcf_block_ext_info *ei); 56 57 static inline bool tcf_block_shared(struct tcf_block *block) 58 { 59 return block->index; 60 } 61 62 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 63 { 64 return block && block->index; 65 } 66 67 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 68 { 69 WARN_ON(tcf_block_shared(block)); 70 return block->q; 71 } 72 73 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 74 struct tcf_result *res, bool compat_mode); 75 int tcf_classify_ingress(struct sk_buff *skb, 76 const struct tcf_block *ingress_block, 77 const struct tcf_proto *tp, struct tcf_result *res, 78 bool compat_mode); 79 80 #else 81 static inline bool tcf_block_shared(struct tcf_block *block) 82 { 83 return false; 84 } 85 86 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 87 { 88 return false; 89 } 90 91 static inline 92 int tcf_block_get(struct tcf_block **p_block, 93 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 94 struct netlink_ext_ack *extack) 95 { 96 return 0; 97 } 98 99 static inline 100 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 101 struct tcf_block_ext_info *ei, 102 struct netlink_ext_ack *extack) 103 { 104 return 0; 105 } 106 107 static inline void tcf_block_put(struct tcf_block *block) 108 { 109 } 110 111 static inline 112 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 113 struct tcf_block_ext_info *ei) 114 { 115 } 116 117 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 118 { 119 return NULL; 120 } 121 122 static inline 123 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, 124 void *cb_priv) 125 { 126 return 0; 127 } 128 129 static inline 130 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, 131 void *cb_priv) 132 { 133 } 134 135 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 136 struct tcf_result *res, bool compat_mode) 137 { 138 return TC_ACT_UNSPEC; 139 } 140 141 static inline int tcf_classify_ingress(struct sk_buff *skb, 142 const struct tcf_block *ingress_block, 143 const struct tcf_proto *tp, 144 struct tcf_result *res, bool compat_mode) 145 { 146 return TC_ACT_UNSPEC; 147 } 148 149 #endif 150 151 static inline unsigned long 152 __cls_set_class(unsigned long *clp, unsigned long cl) 153 { 154 return xchg(clp, cl); 155 } 156 157 static inline void 158 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) 159 { 160 unsigned long cl; 161 162 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 163 cl = __cls_set_class(&r->class, cl); 164 if (cl) 165 q->ops->cl_ops->unbind_tcf(q, cl); 166 } 167 168 static inline void 169 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 170 { 171 struct Qdisc *q = tp->chain->block->q; 172 173 /* Check q as it is not set for shared blocks. In that case, 174 * setting class is not supported. 175 */ 176 if (!q) 177 return; 178 sch_tree_lock(q); 179 __tcf_bind_filter(q, r, base); 180 sch_tree_unlock(q); 181 } 182 183 static inline void 184 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) 185 { 186 unsigned long cl; 187 188 if ((cl = __cls_set_class(&r->class, 0)) != 0) 189 q->ops->cl_ops->unbind_tcf(q, cl); 190 } 191 192 static inline void 193 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 194 { 195 struct Qdisc *q = tp->chain->block->q; 196 197 if (!q) 198 return; 199 __tcf_unbind_filter(q, r); 200 } 201 202 struct tcf_exts { 203 #ifdef CONFIG_NET_CLS_ACT 204 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 205 int nr_actions; 206 struct tc_action **actions; 207 struct net *net; 208 #endif 209 /* Map to export classifier specific extension TLV types to the 210 * generic extensions API. Unsupported extensions must be set to 0. 211 */ 212 int action; 213 int police; 214 }; 215 216 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, 217 int action, int police) 218 { 219 #ifdef CONFIG_NET_CLS_ACT 220 exts->type = 0; 221 exts->nr_actions = 0; 222 exts->net = net; 223 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 224 GFP_KERNEL); 225 if (!exts->actions) 226 return -ENOMEM; 227 #endif 228 exts->action = action; 229 exts->police = police; 230 return 0; 231 } 232 233 /* Return false if the netns is being destroyed in cleanup_net(). Callers 234 * need to do cleanup synchronously in this case, otherwise may race with 235 * tc_action_net_exit(). Return true for other cases. 236 */ 237 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 238 { 239 #ifdef CONFIG_NET_CLS_ACT 240 exts->net = maybe_get_net(exts->net); 241 return exts->net != NULL; 242 #else 243 return true; 244 #endif 245 } 246 247 static inline void tcf_exts_put_net(struct tcf_exts *exts) 248 { 249 #ifdef CONFIG_NET_CLS_ACT 250 if (exts->net) 251 put_net(exts->net); 252 #endif 253 } 254 255 #ifdef CONFIG_NET_CLS_ACT 256 #define tcf_exts_for_each_action(i, a, exts) \ 257 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 258 #else 259 #define tcf_exts_for_each_action(i, a, exts) \ 260 for (; 0; (void)(i), (void)(a), (void)(exts)) 261 #endif 262 263 static inline void 264 tcf_exts_stats_update(const struct tcf_exts *exts, 265 u64 bytes, u64 packets, u64 lastuse) 266 { 267 #ifdef CONFIG_NET_CLS_ACT 268 int i; 269 270 preempt_disable(); 271 272 for (i = 0; i < exts->nr_actions; i++) { 273 struct tc_action *a = exts->actions[i]; 274 275 tcf_action_stats_update(a, bytes, packets, lastuse, true); 276 } 277 278 preempt_enable(); 279 #endif 280 } 281 282 /** 283 * tcf_exts_has_actions - check if at least one action is present 284 * @exts: tc filter extensions handle 285 * 286 * Returns true if at least one action is present. 287 */ 288 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 289 { 290 #ifdef CONFIG_NET_CLS_ACT 291 return exts->nr_actions; 292 #else 293 return false; 294 #endif 295 } 296 297 /** 298 * tcf_exts_exec - execute tc filter extensions 299 * @skb: socket buffer 300 * @exts: tc filter extensions handle 301 * @res: desired result 302 * 303 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 304 * a negative number if the filter must be considered unmatched or 305 * a positive action code (TC_ACT_*) which must be returned to the 306 * underlying layer. 307 */ 308 static inline int 309 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 310 struct tcf_result *res) 311 { 312 #ifdef CONFIG_NET_CLS_ACT 313 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 314 #endif 315 return TC_ACT_OK; 316 } 317 318 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 319 struct nlattr **tb, struct nlattr *rate_tlv, 320 struct tcf_exts *exts, bool ovr, bool rtnl_held, 321 struct netlink_ext_ack *extack); 322 void tcf_exts_destroy(struct tcf_exts *exts); 323 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 324 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 325 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 326 327 /** 328 * struct tcf_pkt_info - packet information 329 */ 330 struct tcf_pkt_info { 331 unsigned char * ptr; 332 int nexthdr; 333 }; 334 335 #ifdef CONFIG_NET_EMATCH 336 337 struct tcf_ematch_ops; 338 339 /** 340 * struct tcf_ematch - extended match (ematch) 341 * 342 * @matchid: identifier to allow userspace to reidentify a match 343 * @flags: flags specifying attributes and the relation to other matches 344 * @ops: the operations lookup table of the corresponding ematch module 345 * @datalen: length of the ematch specific configuration data 346 * @data: ematch specific data 347 */ 348 struct tcf_ematch { 349 struct tcf_ematch_ops * ops; 350 unsigned long data; 351 unsigned int datalen; 352 u16 matchid; 353 u16 flags; 354 struct net *net; 355 }; 356 357 static inline int tcf_em_is_container(struct tcf_ematch *em) 358 { 359 return !em->ops; 360 } 361 362 static inline int tcf_em_is_simple(struct tcf_ematch *em) 363 { 364 return em->flags & TCF_EM_SIMPLE; 365 } 366 367 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 368 { 369 return em->flags & TCF_EM_INVERT; 370 } 371 372 static inline int tcf_em_last_match(struct tcf_ematch *em) 373 { 374 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 375 } 376 377 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 378 { 379 if (tcf_em_last_match(em)) 380 return 1; 381 382 if (result == 0 && em->flags & TCF_EM_REL_AND) 383 return 1; 384 385 if (result != 0 && em->flags & TCF_EM_REL_OR) 386 return 1; 387 388 return 0; 389 } 390 391 /** 392 * struct tcf_ematch_tree - ematch tree handle 393 * 394 * @hdr: ematch tree header supplied by userspace 395 * @matches: array of ematches 396 */ 397 struct tcf_ematch_tree { 398 struct tcf_ematch_tree_hdr hdr; 399 struct tcf_ematch * matches; 400 401 }; 402 403 /** 404 * struct tcf_ematch_ops - ematch module operations 405 * 406 * @kind: identifier (kind) of this ematch module 407 * @datalen: length of expected configuration data (optional) 408 * @change: called during validation (optional) 409 * @match: called during ematch tree evaluation, must return 1/0 410 * @destroy: called during destroyage (optional) 411 * @dump: called during dumping process (optional) 412 * @owner: owner, must be set to THIS_MODULE 413 * @link: link to previous/next ematch module (internal use) 414 */ 415 struct tcf_ematch_ops { 416 int kind; 417 int datalen; 418 int (*change)(struct net *net, void *, 419 int, struct tcf_ematch *); 420 int (*match)(struct sk_buff *, struct tcf_ematch *, 421 struct tcf_pkt_info *); 422 void (*destroy)(struct tcf_ematch *); 423 int (*dump)(struct sk_buff *, struct tcf_ematch *); 424 struct module *owner; 425 struct list_head link; 426 }; 427 428 int tcf_em_register(struct tcf_ematch_ops *); 429 void tcf_em_unregister(struct tcf_ematch_ops *); 430 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 431 struct tcf_ematch_tree *); 432 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 433 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 434 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 435 struct tcf_pkt_info *); 436 437 /** 438 * tcf_em_tree_match - evaulate an ematch tree 439 * 440 * @skb: socket buffer of the packet in question 441 * @tree: ematch tree to be used for evaluation 442 * @info: packet information examined by classifier 443 * 444 * This function matches @skb against the ematch tree in @tree by going 445 * through all ematches respecting their logic relations returning 446 * as soon as the result is obvious. 447 * 448 * Returns 1 if the ematch tree as-one matches, no ematches are configured 449 * or ematch is not enabled in the kernel, otherwise 0 is returned. 450 */ 451 static inline int tcf_em_tree_match(struct sk_buff *skb, 452 struct tcf_ematch_tree *tree, 453 struct tcf_pkt_info *info) 454 { 455 if (tree->hdr.nmatches) 456 return __tcf_em_tree_match(skb, tree, info); 457 else 458 return 1; 459 } 460 461 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 462 463 #else /* CONFIG_NET_EMATCH */ 464 465 struct tcf_ematch_tree { 466 }; 467 468 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 469 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 470 #define tcf_em_tree_dump(skb, t, tlv) (0) 471 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 472 473 #endif /* CONFIG_NET_EMATCH */ 474 475 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 476 { 477 switch (layer) { 478 case TCF_LAYER_LINK: 479 return skb_mac_header(skb); 480 case TCF_LAYER_NETWORK: 481 return skb_network_header(skb); 482 case TCF_LAYER_TRANSPORT: 483 return skb_transport_header(skb); 484 } 485 486 return NULL; 487 } 488 489 static inline int tcf_valid_offset(const struct sk_buff *skb, 490 const unsigned char *ptr, const int len) 491 { 492 return likely((ptr + len) <= skb_tail_pointer(skb) && 493 ptr >= skb->head && 494 (ptr <= (ptr + len))); 495 } 496 497 static inline int 498 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 499 struct netlink_ext_ack *extack) 500 { 501 char indev[IFNAMSIZ]; 502 struct net_device *dev; 503 504 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { 505 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 506 "Interface name too long"); 507 return -EINVAL; 508 } 509 dev = __dev_get_by_name(net, indev); 510 if (!dev) { 511 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 512 "Network device not found"); 513 return -ENODEV; 514 } 515 return dev->ifindex; 516 } 517 518 static inline bool 519 tcf_match_indev(struct sk_buff *skb, int ifindex) 520 { 521 if (!ifindex) 522 return true; 523 if (!skb->skb_iif) 524 return false; 525 return ifindex == skb->skb_iif; 526 } 527 528 int tc_setup_flow_action(struct flow_action *flow_action, 529 const struct tcf_exts *exts); 530 void tc_cleanup_flow_action(struct flow_action *flow_action); 531 532 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 533 void *type_data, bool err_stop, bool rtnl_held); 534 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 535 enum tc_setup_type type, void *type_data, bool err_stop, 536 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 537 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 538 enum tc_setup_type type, void *type_data, bool err_stop, 539 u32 *old_flags, unsigned int *old_in_hw_count, 540 u32 *new_flags, unsigned int *new_in_hw_count, 541 bool rtnl_held); 542 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 543 enum tc_setup_type type, void *type_data, bool err_stop, 544 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 545 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 546 bool add, flow_setup_cb_t *cb, 547 enum tc_setup_type type, void *type_data, 548 void *cb_priv, u32 *flags, unsigned int *in_hw_count); 549 unsigned int tcf_exts_num_actions(struct tcf_exts *exts); 550 551 struct tc_cls_u32_knode { 552 struct tcf_exts *exts; 553 struct tcf_result *res; 554 struct tc_u32_sel *sel; 555 u32 handle; 556 u32 val; 557 u32 mask; 558 u32 link_handle; 559 u8 fshift; 560 }; 561 562 struct tc_cls_u32_hnode { 563 u32 handle; 564 u32 prio; 565 unsigned int divisor; 566 }; 567 568 enum tc_clsu32_command { 569 TC_CLSU32_NEW_KNODE, 570 TC_CLSU32_REPLACE_KNODE, 571 TC_CLSU32_DELETE_KNODE, 572 TC_CLSU32_NEW_HNODE, 573 TC_CLSU32_REPLACE_HNODE, 574 TC_CLSU32_DELETE_HNODE, 575 }; 576 577 struct tc_cls_u32_offload { 578 struct flow_cls_common_offload common; 579 /* knode values */ 580 enum tc_clsu32_command command; 581 union { 582 struct tc_cls_u32_knode knode; 583 struct tc_cls_u32_hnode hnode; 584 }; 585 }; 586 587 static inline bool tc_can_offload(const struct net_device *dev) 588 { 589 return dev->features & NETIF_F_HW_TC; 590 } 591 592 static inline bool tc_can_offload_extack(const struct net_device *dev, 593 struct netlink_ext_ack *extack) 594 { 595 bool can = tc_can_offload(dev); 596 597 if (!can) 598 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 599 600 return can; 601 } 602 603 static inline bool 604 tc_cls_can_offload_and_chain0(const struct net_device *dev, 605 struct flow_cls_common_offload *common) 606 { 607 if (!tc_can_offload_extack(dev, common->extack)) 608 return false; 609 if (common->chain_index) { 610 NL_SET_ERR_MSG(common->extack, 611 "Driver supports only offload of chain 0"); 612 return false; 613 } 614 return true; 615 } 616 617 static inline bool tc_skip_hw(u32 flags) 618 { 619 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 620 } 621 622 static inline bool tc_skip_sw(u32 flags) 623 { 624 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 625 } 626 627 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 628 static inline bool tc_flags_valid(u32 flags) 629 { 630 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 631 TCA_CLS_FLAGS_VERBOSE)) 632 return false; 633 634 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 635 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 636 return false; 637 638 return true; 639 } 640 641 static inline bool tc_in_hw(u32 flags) 642 { 643 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 644 } 645 646 static inline void 647 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, 648 const struct tcf_proto *tp, u32 flags, 649 struct netlink_ext_ack *extack) 650 { 651 cls_common->chain_index = tp->chain->index; 652 cls_common->protocol = tp->protocol; 653 cls_common->prio = tp->prio >> 16; 654 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 655 cls_common->extack = extack; 656 } 657 658 enum tc_matchall_command { 659 TC_CLSMATCHALL_REPLACE, 660 TC_CLSMATCHALL_DESTROY, 661 TC_CLSMATCHALL_STATS, 662 }; 663 664 struct tc_cls_matchall_offload { 665 struct flow_cls_common_offload common; 666 enum tc_matchall_command command; 667 struct flow_rule *rule; 668 struct flow_stats stats; 669 unsigned long cookie; 670 }; 671 672 enum tc_clsbpf_command { 673 TC_CLSBPF_OFFLOAD, 674 TC_CLSBPF_STATS, 675 }; 676 677 struct tc_cls_bpf_offload { 678 struct flow_cls_common_offload common; 679 enum tc_clsbpf_command command; 680 struct tcf_exts *exts; 681 struct bpf_prog *prog; 682 struct bpf_prog *oldprog; 683 const char *name; 684 bool exts_integrated; 685 }; 686 687 struct tc_mqprio_qopt_offload { 688 /* struct tc_mqprio_qopt must always be the first element */ 689 struct tc_mqprio_qopt qopt; 690 u16 mode; 691 u16 shaper; 692 u32 flags; 693 u64 min_rate[TC_QOPT_MAX_QUEUE]; 694 u64 max_rate[TC_QOPT_MAX_QUEUE]; 695 }; 696 697 /* This structure holds cookie structure that is passed from user 698 * to the kernel for actions and classifiers 699 */ 700 struct tc_cookie { 701 u8 *data; 702 u32 len; 703 struct rcu_head rcu; 704 }; 705 706 struct tc_qopt_offload_stats { 707 struct gnet_stats_basic_packed *bstats; 708 struct gnet_stats_queue *qstats; 709 }; 710 711 enum tc_mq_command { 712 TC_MQ_CREATE, 713 TC_MQ_DESTROY, 714 TC_MQ_STATS, 715 TC_MQ_GRAFT, 716 }; 717 718 struct tc_mq_opt_offload_graft_params { 719 unsigned long queue; 720 u32 child_handle; 721 }; 722 723 struct tc_mq_qopt_offload { 724 enum tc_mq_command command; 725 u32 handle; 726 union { 727 struct tc_qopt_offload_stats stats; 728 struct tc_mq_opt_offload_graft_params graft_params; 729 }; 730 }; 731 732 enum tc_red_command { 733 TC_RED_REPLACE, 734 TC_RED_DESTROY, 735 TC_RED_STATS, 736 TC_RED_XSTATS, 737 TC_RED_GRAFT, 738 }; 739 740 struct tc_red_qopt_offload_params { 741 u32 min; 742 u32 max; 743 u32 probability; 744 u32 limit; 745 bool is_ecn; 746 bool is_harddrop; 747 bool is_nodrop; 748 struct gnet_stats_queue *qstats; 749 }; 750 751 struct tc_red_qopt_offload { 752 enum tc_red_command command; 753 u32 handle; 754 u32 parent; 755 union { 756 struct tc_red_qopt_offload_params set; 757 struct tc_qopt_offload_stats stats; 758 struct red_stats *xstats; 759 u32 child_handle; 760 }; 761 }; 762 763 enum tc_gred_command { 764 TC_GRED_REPLACE, 765 TC_GRED_DESTROY, 766 TC_GRED_STATS, 767 }; 768 769 struct tc_gred_vq_qopt_offload_params { 770 bool present; 771 u32 limit; 772 u32 prio; 773 u32 min; 774 u32 max; 775 bool is_ecn; 776 bool is_harddrop; 777 u32 probability; 778 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 779 u32 *backlog; 780 }; 781 782 struct tc_gred_qopt_offload_params { 783 bool grio_on; 784 bool wred_on; 785 unsigned int dp_cnt; 786 unsigned int dp_def; 787 struct gnet_stats_queue *qstats; 788 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 789 }; 790 791 struct tc_gred_qopt_offload_stats { 792 struct gnet_stats_basic_packed bstats[MAX_DPs]; 793 struct gnet_stats_queue qstats[MAX_DPs]; 794 struct red_stats *xstats[MAX_DPs]; 795 }; 796 797 struct tc_gred_qopt_offload { 798 enum tc_gred_command command; 799 u32 handle; 800 u32 parent; 801 union { 802 struct tc_gred_qopt_offload_params set; 803 struct tc_gred_qopt_offload_stats stats; 804 }; 805 }; 806 807 enum tc_prio_command { 808 TC_PRIO_REPLACE, 809 TC_PRIO_DESTROY, 810 TC_PRIO_STATS, 811 TC_PRIO_GRAFT, 812 }; 813 814 struct tc_prio_qopt_offload_params { 815 int bands; 816 u8 priomap[TC_PRIO_MAX + 1]; 817 /* At the point of un-offloading the Qdisc, the reported backlog and 818 * qlen need to be reduced by the portion that is in HW. 819 */ 820 struct gnet_stats_queue *qstats; 821 }; 822 823 struct tc_prio_qopt_offload_graft_params { 824 u8 band; 825 u32 child_handle; 826 }; 827 828 struct tc_prio_qopt_offload { 829 enum tc_prio_command command; 830 u32 handle; 831 u32 parent; 832 union { 833 struct tc_prio_qopt_offload_params replace_params; 834 struct tc_qopt_offload_stats stats; 835 struct tc_prio_qopt_offload_graft_params graft_params; 836 }; 837 }; 838 839 enum tc_root_command { 840 TC_ROOT_GRAFT, 841 }; 842 843 struct tc_root_qopt_offload { 844 enum tc_root_command command; 845 u32 handle; 846 bool ingress; 847 }; 848 849 enum tc_ets_command { 850 TC_ETS_REPLACE, 851 TC_ETS_DESTROY, 852 TC_ETS_STATS, 853 TC_ETS_GRAFT, 854 }; 855 856 struct tc_ets_qopt_offload_replace_params { 857 unsigned int bands; 858 u8 priomap[TC_PRIO_MAX + 1]; 859 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */ 860 unsigned int weights[TCQ_ETS_MAX_BANDS]; 861 struct gnet_stats_queue *qstats; 862 }; 863 864 struct tc_ets_qopt_offload_graft_params { 865 u8 band; 866 u32 child_handle; 867 }; 868 869 struct tc_ets_qopt_offload { 870 enum tc_ets_command command; 871 u32 handle; 872 u32 parent; 873 union { 874 struct tc_ets_qopt_offload_replace_params replace_params; 875 struct tc_qopt_offload_stats stats; 876 struct tc_ets_qopt_offload_graft_params graft_params; 877 }; 878 }; 879 880 enum tc_tbf_command { 881 TC_TBF_REPLACE, 882 TC_TBF_DESTROY, 883 TC_TBF_STATS, 884 }; 885 886 struct tc_tbf_qopt_offload_replace_params { 887 struct psched_ratecfg rate; 888 u32 max_size; 889 struct gnet_stats_queue *qstats; 890 }; 891 892 struct tc_tbf_qopt_offload { 893 enum tc_tbf_command command; 894 u32 handle; 895 u32 parent; 896 union { 897 struct tc_tbf_qopt_offload_replace_params replace_params; 898 struct tc_qopt_offload_stats stats; 899 }; 900 }; 901 902 enum tc_fifo_command { 903 TC_FIFO_REPLACE, 904 TC_FIFO_DESTROY, 905 TC_FIFO_STATS, 906 }; 907 908 struct tc_fifo_qopt_offload { 909 enum tc_fifo_command command; 910 u32 handle; 911 u32 parent; 912 union { 913 struct tc_qopt_offload_stats stats; 914 }; 915 }; 916 917 #endif 918