1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 #include <net/flow_offload.h> 10 #include <net/net_namespace.h> 11 12 /* TC action not accessible from user space */ 13 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) 14 15 /* Basic packet classifier frontend definitions. */ 16 17 struct tcf_walker { 18 int stop; 19 int skip; 20 int count; 21 bool nonempty; 22 unsigned long cookie; 23 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 24 }; 25 26 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 27 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 28 29 struct tcf_block_ext_info { 30 enum flow_block_binder_type binder_type; 31 tcf_chain_head_change_t *chain_head_change; 32 void *chain_head_change_priv; 33 u32 block_index; 34 }; 35 36 struct tcf_block_cb; 37 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 38 39 #ifdef CONFIG_NET_CLS 40 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 41 u32 chain_index); 42 void tcf_chain_put_by_act(struct tcf_chain *chain); 43 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, 44 struct tcf_chain *chain); 45 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, 46 struct tcf_proto *tp, bool rtnl_held); 47 void tcf_block_netif_keep_dst(struct tcf_block *block); 48 int tcf_block_get(struct tcf_block **p_block, 49 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 50 struct netlink_ext_ack *extack); 51 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 52 struct tcf_block_ext_info *ei, 53 struct netlink_ext_ack *extack); 54 void tcf_block_put(struct tcf_block *block); 55 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 56 struct tcf_block_ext_info *ei); 57 58 static inline bool tcf_block_shared(struct tcf_block *block) 59 { 60 return block->index; 61 } 62 63 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 64 { 65 return block && block->index; 66 } 67 68 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 69 { 70 WARN_ON(tcf_block_shared(block)); 71 return block->q; 72 } 73 74 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 75 tc_indr_block_bind_cb_t *cb, void *cb_ident); 76 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 77 tc_indr_block_bind_cb_t *cb, void *cb_ident); 78 void __tc_indr_block_cb_unregister(struct net_device *dev, 79 tc_indr_block_bind_cb_t *cb, void *cb_ident); 80 void tc_indr_block_cb_unregister(struct net_device *dev, 81 tc_indr_block_bind_cb_t *cb, void *cb_ident); 82 83 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 84 struct tcf_result *res, bool compat_mode); 85 86 #else 87 static inline bool tcf_block_shared(struct tcf_block *block) 88 { 89 return false; 90 } 91 92 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 93 { 94 return false; 95 } 96 97 static inline 98 int tcf_block_get(struct tcf_block **p_block, 99 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 100 struct netlink_ext_ack *extack) 101 { 102 return 0; 103 } 104 105 static inline 106 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 107 struct tcf_block_ext_info *ei, 108 struct netlink_ext_ack *extack) 109 { 110 return 0; 111 } 112 113 static inline void tcf_block_put(struct tcf_block *block) 114 { 115 } 116 117 static inline 118 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 119 struct tcf_block_ext_info *ei) 120 { 121 } 122 123 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 124 { 125 return NULL; 126 } 127 128 static inline 129 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, 130 void *cb_priv) 131 { 132 return 0; 133 } 134 135 static inline 136 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, 137 void *cb_priv) 138 { 139 } 140 141 static inline 142 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 143 tc_indr_block_bind_cb_t *cb, void *cb_ident) 144 { 145 return 0; 146 } 147 148 static inline 149 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 150 tc_indr_block_bind_cb_t *cb, void *cb_ident) 151 { 152 return 0; 153 } 154 155 static inline 156 void __tc_indr_block_cb_unregister(struct net_device *dev, 157 tc_indr_block_bind_cb_t *cb, void *cb_ident) 158 { 159 } 160 161 static inline 162 void tc_indr_block_cb_unregister(struct net_device *dev, 163 tc_indr_block_bind_cb_t *cb, void *cb_ident) 164 { 165 } 166 167 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 168 struct tcf_result *res, bool compat_mode) 169 { 170 return TC_ACT_UNSPEC; 171 } 172 #endif 173 174 static inline unsigned long 175 __cls_set_class(unsigned long *clp, unsigned long cl) 176 { 177 return xchg(clp, cl); 178 } 179 180 static inline unsigned long 181 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl) 182 { 183 unsigned long old_cl; 184 185 sch_tree_lock(q); 186 old_cl = __cls_set_class(clp, cl); 187 sch_tree_unlock(q); 188 return old_cl; 189 } 190 191 static inline void 192 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 193 { 194 struct Qdisc *q = tp->chain->block->q; 195 unsigned long cl; 196 197 /* Check q as it is not set for shared blocks. In that case, 198 * setting class is not supported. 199 */ 200 if (!q) 201 return; 202 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 203 cl = cls_set_class(q, &r->class, cl); 204 if (cl) 205 q->ops->cl_ops->unbind_tcf(q, cl); 206 } 207 208 static inline void 209 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 210 { 211 struct Qdisc *q = tp->chain->block->q; 212 unsigned long cl; 213 214 if (!q) 215 return; 216 if ((cl = __cls_set_class(&r->class, 0)) != 0) 217 q->ops->cl_ops->unbind_tcf(q, cl); 218 } 219 220 struct tcf_exts { 221 #ifdef CONFIG_NET_CLS_ACT 222 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 223 int nr_actions; 224 struct tc_action **actions; 225 struct net *net; 226 #endif 227 /* Map to export classifier specific extension TLV types to the 228 * generic extensions API. Unsupported extensions must be set to 0. 229 */ 230 int action; 231 int police; 232 }; 233 234 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, 235 int action, int police) 236 { 237 #ifdef CONFIG_NET_CLS_ACT 238 exts->type = 0; 239 exts->nr_actions = 0; 240 exts->net = net; 241 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 242 GFP_KERNEL); 243 if (!exts->actions) 244 return -ENOMEM; 245 #endif 246 exts->action = action; 247 exts->police = police; 248 return 0; 249 } 250 251 /* Return false if the netns is being destroyed in cleanup_net(). Callers 252 * need to do cleanup synchronously in this case, otherwise may race with 253 * tc_action_net_exit(). Return true for other cases. 254 */ 255 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 256 { 257 #ifdef CONFIG_NET_CLS_ACT 258 exts->net = maybe_get_net(exts->net); 259 return exts->net != NULL; 260 #else 261 return true; 262 #endif 263 } 264 265 static inline void tcf_exts_put_net(struct tcf_exts *exts) 266 { 267 #ifdef CONFIG_NET_CLS_ACT 268 if (exts->net) 269 put_net(exts->net); 270 #endif 271 } 272 273 #ifdef CONFIG_NET_CLS_ACT 274 #define tcf_exts_for_each_action(i, a, exts) \ 275 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 276 #else 277 #define tcf_exts_for_each_action(i, a, exts) \ 278 for (; 0; (void)(i), (void)(a), (void)(exts)) 279 #endif 280 281 static inline void 282 tcf_exts_stats_update(const struct tcf_exts *exts, 283 u64 bytes, u64 packets, u64 lastuse) 284 { 285 #ifdef CONFIG_NET_CLS_ACT 286 int i; 287 288 preempt_disable(); 289 290 for (i = 0; i < exts->nr_actions; i++) { 291 struct tc_action *a = exts->actions[i]; 292 293 tcf_action_stats_update(a, bytes, packets, lastuse, true); 294 } 295 296 preempt_enable(); 297 #endif 298 } 299 300 /** 301 * tcf_exts_has_actions - check if at least one action is present 302 * @exts: tc filter extensions handle 303 * 304 * Returns true if at least one action is present. 305 */ 306 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 307 { 308 #ifdef CONFIG_NET_CLS_ACT 309 return exts->nr_actions; 310 #else 311 return false; 312 #endif 313 } 314 315 /** 316 * tcf_exts_exec - execute tc filter extensions 317 * @skb: socket buffer 318 * @exts: tc filter extensions handle 319 * @res: desired result 320 * 321 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 322 * a negative number if the filter must be considered unmatched or 323 * a positive action code (TC_ACT_*) which must be returned to the 324 * underlying layer. 325 */ 326 static inline int 327 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 328 struct tcf_result *res) 329 { 330 #ifdef CONFIG_NET_CLS_ACT 331 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 332 #endif 333 return TC_ACT_OK; 334 } 335 336 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 337 struct nlattr **tb, struct nlattr *rate_tlv, 338 struct tcf_exts *exts, bool ovr, bool rtnl_held, 339 struct netlink_ext_ack *extack); 340 void tcf_exts_destroy(struct tcf_exts *exts); 341 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 342 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 343 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 344 345 /** 346 * struct tcf_pkt_info - packet information 347 */ 348 struct tcf_pkt_info { 349 unsigned char * ptr; 350 int nexthdr; 351 }; 352 353 #ifdef CONFIG_NET_EMATCH 354 355 struct tcf_ematch_ops; 356 357 /** 358 * struct tcf_ematch - extended match (ematch) 359 * 360 * @matchid: identifier to allow userspace to reidentify a match 361 * @flags: flags specifying attributes and the relation to other matches 362 * @ops: the operations lookup table of the corresponding ematch module 363 * @datalen: length of the ematch specific configuration data 364 * @data: ematch specific data 365 */ 366 struct tcf_ematch { 367 struct tcf_ematch_ops * ops; 368 unsigned long data; 369 unsigned int datalen; 370 u16 matchid; 371 u16 flags; 372 struct net *net; 373 }; 374 375 static inline int tcf_em_is_container(struct tcf_ematch *em) 376 { 377 return !em->ops; 378 } 379 380 static inline int tcf_em_is_simple(struct tcf_ematch *em) 381 { 382 return em->flags & TCF_EM_SIMPLE; 383 } 384 385 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 386 { 387 return em->flags & TCF_EM_INVERT; 388 } 389 390 static inline int tcf_em_last_match(struct tcf_ematch *em) 391 { 392 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 393 } 394 395 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 396 { 397 if (tcf_em_last_match(em)) 398 return 1; 399 400 if (result == 0 && em->flags & TCF_EM_REL_AND) 401 return 1; 402 403 if (result != 0 && em->flags & TCF_EM_REL_OR) 404 return 1; 405 406 return 0; 407 } 408 409 /** 410 * struct tcf_ematch_tree - ematch tree handle 411 * 412 * @hdr: ematch tree header supplied by userspace 413 * @matches: array of ematches 414 */ 415 struct tcf_ematch_tree { 416 struct tcf_ematch_tree_hdr hdr; 417 struct tcf_ematch * matches; 418 419 }; 420 421 /** 422 * struct tcf_ematch_ops - ematch module operations 423 * 424 * @kind: identifier (kind) of this ematch module 425 * @datalen: length of expected configuration data (optional) 426 * @change: called during validation (optional) 427 * @match: called during ematch tree evaluation, must return 1/0 428 * @destroy: called during destroyage (optional) 429 * @dump: called during dumping process (optional) 430 * @owner: owner, must be set to THIS_MODULE 431 * @link: link to previous/next ematch module (internal use) 432 */ 433 struct tcf_ematch_ops { 434 int kind; 435 int datalen; 436 int (*change)(struct net *net, void *, 437 int, struct tcf_ematch *); 438 int (*match)(struct sk_buff *, struct tcf_ematch *, 439 struct tcf_pkt_info *); 440 void (*destroy)(struct tcf_ematch *); 441 int (*dump)(struct sk_buff *, struct tcf_ematch *); 442 struct module *owner; 443 struct list_head link; 444 }; 445 446 int tcf_em_register(struct tcf_ematch_ops *); 447 void tcf_em_unregister(struct tcf_ematch_ops *); 448 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 449 struct tcf_ematch_tree *); 450 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 451 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 452 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 453 struct tcf_pkt_info *); 454 455 /** 456 * tcf_em_tree_match - evaulate an ematch tree 457 * 458 * @skb: socket buffer of the packet in question 459 * @tree: ematch tree to be used for evaluation 460 * @info: packet information examined by classifier 461 * 462 * This function matches @skb against the ematch tree in @tree by going 463 * through all ematches respecting their logic relations returning 464 * as soon as the result is obvious. 465 * 466 * Returns 1 if the ematch tree as-one matches, no ematches are configured 467 * or ematch is not enabled in the kernel, otherwise 0 is returned. 468 */ 469 static inline int tcf_em_tree_match(struct sk_buff *skb, 470 struct tcf_ematch_tree *tree, 471 struct tcf_pkt_info *info) 472 { 473 if (tree->hdr.nmatches) 474 return __tcf_em_tree_match(skb, tree, info); 475 else 476 return 1; 477 } 478 479 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 480 481 #else /* CONFIG_NET_EMATCH */ 482 483 struct tcf_ematch_tree { 484 }; 485 486 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 487 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 488 #define tcf_em_tree_dump(skb, t, tlv) (0) 489 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 490 491 #endif /* CONFIG_NET_EMATCH */ 492 493 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 494 { 495 switch (layer) { 496 case TCF_LAYER_LINK: 497 return skb_mac_header(skb); 498 case TCF_LAYER_NETWORK: 499 return skb_network_header(skb); 500 case TCF_LAYER_TRANSPORT: 501 return skb_transport_header(skb); 502 } 503 504 return NULL; 505 } 506 507 static inline int tcf_valid_offset(const struct sk_buff *skb, 508 const unsigned char *ptr, const int len) 509 { 510 return likely((ptr + len) <= skb_tail_pointer(skb) && 511 ptr >= skb->head && 512 (ptr <= (ptr + len))); 513 } 514 515 static inline int 516 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 517 struct netlink_ext_ack *extack) 518 { 519 char indev[IFNAMSIZ]; 520 struct net_device *dev; 521 522 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { 523 NL_SET_ERR_MSG(extack, "Interface name too long"); 524 return -EINVAL; 525 } 526 dev = __dev_get_by_name(net, indev); 527 if (!dev) 528 return -ENODEV; 529 return dev->ifindex; 530 } 531 532 static inline bool 533 tcf_match_indev(struct sk_buff *skb, int ifindex) 534 { 535 if (!ifindex) 536 return true; 537 if (!skb->skb_iif) 538 return false; 539 return ifindex == skb->skb_iif; 540 } 541 542 int tc_setup_flow_action(struct flow_action *flow_action, 543 const struct tcf_exts *exts); 544 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 545 void *type_data, bool err_stop); 546 unsigned int tcf_exts_num_actions(struct tcf_exts *exts); 547 548 struct tc_cls_u32_knode { 549 struct tcf_exts *exts; 550 struct tcf_result *res; 551 struct tc_u32_sel *sel; 552 u32 handle; 553 u32 val; 554 u32 mask; 555 u32 link_handle; 556 u8 fshift; 557 }; 558 559 struct tc_cls_u32_hnode { 560 u32 handle; 561 u32 prio; 562 unsigned int divisor; 563 }; 564 565 enum tc_clsu32_command { 566 TC_CLSU32_NEW_KNODE, 567 TC_CLSU32_REPLACE_KNODE, 568 TC_CLSU32_DELETE_KNODE, 569 TC_CLSU32_NEW_HNODE, 570 TC_CLSU32_REPLACE_HNODE, 571 TC_CLSU32_DELETE_HNODE, 572 }; 573 574 struct tc_cls_u32_offload { 575 struct flow_cls_common_offload common; 576 /* knode values */ 577 enum tc_clsu32_command command; 578 union { 579 struct tc_cls_u32_knode knode; 580 struct tc_cls_u32_hnode hnode; 581 }; 582 }; 583 584 static inline bool tc_can_offload(const struct net_device *dev) 585 { 586 return dev->features & NETIF_F_HW_TC; 587 } 588 589 static inline bool tc_can_offload_extack(const struct net_device *dev, 590 struct netlink_ext_ack *extack) 591 { 592 bool can = tc_can_offload(dev); 593 594 if (!can) 595 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 596 597 return can; 598 } 599 600 static inline bool 601 tc_cls_can_offload_and_chain0(const struct net_device *dev, 602 struct flow_cls_common_offload *common) 603 { 604 if (!tc_can_offload_extack(dev, common->extack)) 605 return false; 606 if (common->chain_index) { 607 NL_SET_ERR_MSG(common->extack, 608 "Driver supports only offload of chain 0"); 609 return false; 610 } 611 return true; 612 } 613 614 static inline bool tc_skip_hw(u32 flags) 615 { 616 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 617 } 618 619 static inline bool tc_skip_sw(u32 flags) 620 { 621 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 622 } 623 624 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 625 static inline bool tc_flags_valid(u32 flags) 626 { 627 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 628 TCA_CLS_FLAGS_VERBOSE)) 629 return false; 630 631 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 632 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 633 return false; 634 635 return true; 636 } 637 638 static inline bool tc_in_hw(u32 flags) 639 { 640 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 641 } 642 643 static inline void 644 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, 645 const struct tcf_proto *tp, u32 flags, 646 struct netlink_ext_ack *extack) 647 { 648 cls_common->chain_index = tp->chain->index; 649 cls_common->protocol = tp->protocol; 650 cls_common->prio = tp->prio; 651 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 652 cls_common->extack = extack; 653 } 654 655 enum tc_matchall_command { 656 TC_CLSMATCHALL_REPLACE, 657 TC_CLSMATCHALL_DESTROY, 658 TC_CLSMATCHALL_STATS, 659 }; 660 661 struct tc_cls_matchall_offload { 662 struct flow_cls_common_offload common; 663 enum tc_matchall_command command; 664 struct flow_rule *rule; 665 struct flow_stats stats; 666 unsigned long cookie; 667 }; 668 669 enum tc_clsbpf_command { 670 TC_CLSBPF_OFFLOAD, 671 TC_CLSBPF_STATS, 672 }; 673 674 struct tc_cls_bpf_offload { 675 struct flow_cls_common_offload common; 676 enum tc_clsbpf_command command; 677 struct tcf_exts *exts; 678 struct bpf_prog *prog; 679 struct bpf_prog *oldprog; 680 const char *name; 681 bool exts_integrated; 682 }; 683 684 struct tc_mqprio_qopt_offload { 685 /* struct tc_mqprio_qopt must always be the first element */ 686 struct tc_mqprio_qopt qopt; 687 u16 mode; 688 u16 shaper; 689 u32 flags; 690 u64 min_rate[TC_QOPT_MAX_QUEUE]; 691 u64 max_rate[TC_QOPT_MAX_QUEUE]; 692 }; 693 694 /* This structure holds cookie structure that is passed from user 695 * to the kernel for actions and classifiers 696 */ 697 struct tc_cookie { 698 u8 *data; 699 u32 len; 700 struct rcu_head rcu; 701 }; 702 703 struct tc_qopt_offload_stats { 704 struct gnet_stats_basic_packed *bstats; 705 struct gnet_stats_queue *qstats; 706 }; 707 708 enum tc_mq_command { 709 TC_MQ_CREATE, 710 TC_MQ_DESTROY, 711 TC_MQ_STATS, 712 TC_MQ_GRAFT, 713 }; 714 715 struct tc_mq_opt_offload_graft_params { 716 unsigned long queue; 717 u32 child_handle; 718 }; 719 720 struct tc_mq_qopt_offload { 721 enum tc_mq_command command; 722 u32 handle; 723 union { 724 struct tc_qopt_offload_stats stats; 725 struct tc_mq_opt_offload_graft_params graft_params; 726 }; 727 }; 728 729 enum tc_red_command { 730 TC_RED_REPLACE, 731 TC_RED_DESTROY, 732 TC_RED_STATS, 733 TC_RED_XSTATS, 734 TC_RED_GRAFT, 735 }; 736 737 struct tc_red_qopt_offload_params { 738 u32 min; 739 u32 max; 740 u32 probability; 741 u32 limit; 742 bool is_ecn; 743 bool is_harddrop; 744 struct gnet_stats_queue *qstats; 745 }; 746 747 struct tc_red_qopt_offload { 748 enum tc_red_command command; 749 u32 handle; 750 u32 parent; 751 union { 752 struct tc_red_qopt_offload_params set; 753 struct tc_qopt_offload_stats stats; 754 struct red_stats *xstats; 755 u32 child_handle; 756 }; 757 }; 758 759 enum tc_gred_command { 760 TC_GRED_REPLACE, 761 TC_GRED_DESTROY, 762 TC_GRED_STATS, 763 }; 764 765 struct tc_gred_vq_qopt_offload_params { 766 bool present; 767 u32 limit; 768 u32 prio; 769 u32 min; 770 u32 max; 771 bool is_ecn; 772 bool is_harddrop; 773 u32 probability; 774 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 775 u32 *backlog; 776 }; 777 778 struct tc_gred_qopt_offload_params { 779 bool grio_on; 780 bool wred_on; 781 unsigned int dp_cnt; 782 unsigned int dp_def; 783 struct gnet_stats_queue *qstats; 784 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 785 }; 786 787 struct tc_gred_qopt_offload_stats { 788 struct gnet_stats_basic_packed bstats[MAX_DPs]; 789 struct gnet_stats_queue qstats[MAX_DPs]; 790 struct red_stats *xstats[MAX_DPs]; 791 }; 792 793 struct tc_gred_qopt_offload { 794 enum tc_gred_command command; 795 u32 handle; 796 u32 parent; 797 union { 798 struct tc_gred_qopt_offload_params set; 799 struct tc_gred_qopt_offload_stats stats; 800 }; 801 }; 802 803 enum tc_prio_command { 804 TC_PRIO_REPLACE, 805 TC_PRIO_DESTROY, 806 TC_PRIO_STATS, 807 TC_PRIO_GRAFT, 808 }; 809 810 struct tc_prio_qopt_offload_params { 811 int bands; 812 u8 priomap[TC_PRIO_MAX + 1]; 813 /* In case that a prio qdisc is offloaded and now is changed to a 814 * non-offloadedable config, it needs to update the backlog & qlen 815 * values to negate the HW backlog & qlen values (and only them). 816 */ 817 struct gnet_stats_queue *qstats; 818 }; 819 820 struct tc_prio_qopt_offload_graft_params { 821 u8 band; 822 u32 child_handle; 823 }; 824 825 struct tc_prio_qopt_offload { 826 enum tc_prio_command command; 827 u32 handle; 828 u32 parent; 829 union { 830 struct tc_prio_qopt_offload_params replace_params; 831 struct tc_qopt_offload_stats stats; 832 struct tc_prio_qopt_offload_graft_params graft_params; 833 }; 834 }; 835 836 enum tc_root_command { 837 TC_ROOT_GRAFT, 838 }; 839 840 struct tc_root_qopt_offload { 841 enum tc_root_command command; 842 u32 handle; 843 bool ingress; 844 }; 845 846 #endif 847