1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 #include <net/net_namespace.h> 10 11 /* TC action not accessible from user space */ 12 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) 13 14 /* Basic packet classifier frontend definitions. */ 15 16 struct tcf_walker { 17 int stop; 18 int skip; 19 int count; 20 bool nonempty; 21 unsigned long cookie; 22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 23 }; 24 25 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 27 28 struct tcf_block_ext_info { 29 enum flow_block_binder_type binder_type; 30 tcf_chain_head_change_t *chain_head_change; 31 void *chain_head_change_priv; 32 u32 block_index; 33 }; 34 35 struct tcf_qevent { 36 struct tcf_block *block; 37 struct tcf_block_ext_info info; 38 struct tcf_proto __rcu *filter_chain; 39 }; 40 41 struct tcf_block_cb; 42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 43 44 #ifdef CONFIG_NET_CLS 45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 46 u32 chain_index); 47 void tcf_chain_put_by_act(struct tcf_chain *chain); 48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, 49 struct tcf_chain *chain); 50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, 51 struct tcf_proto *tp); 52 void tcf_block_netif_keep_dst(struct tcf_block *block); 53 int tcf_block_get(struct tcf_block **p_block, 54 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 55 struct netlink_ext_ack *extack); 56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 57 struct tcf_block_ext_info *ei, 58 struct netlink_ext_ack *extack); 59 void tcf_block_put(struct tcf_block *block); 60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 61 struct tcf_block_ext_info *ei); 62 63 static inline bool tcf_block_shared(struct tcf_block *block) 64 { 65 return block->index; 66 } 67 68 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 69 { 70 return block && block->index; 71 } 72 73 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 74 { 75 WARN_ON(tcf_block_shared(block)); 76 return block->q; 77 } 78 79 int tcf_classify(struct sk_buff *skb, 80 const struct tcf_block *block, 81 const struct tcf_proto *tp, struct tcf_result *res, 82 bool compat_mode); 83 84 #else 85 static inline bool tcf_block_shared(struct tcf_block *block) 86 { 87 return false; 88 } 89 90 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 91 { 92 return false; 93 } 94 95 static inline 96 int tcf_block_get(struct tcf_block **p_block, 97 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 98 struct netlink_ext_ack *extack) 99 { 100 return 0; 101 } 102 103 static inline 104 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 105 struct tcf_block_ext_info *ei, 106 struct netlink_ext_ack *extack) 107 { 108 return 0; 109 } 110 111 static inline void tcf_block_put(struct tcf_block *block) 112 { 113 } 114 115 static inline 116 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 117 struct tcf_block_ext_info *ei) 118 { 119 } 120 121 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 122 { 123 return NULL; 124 } 125 126 static inline 127 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, 128 void *cb_priv) 129 { 130 return 0; 131 } 132 133 static inline 134 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, 135 void *cb_priv) 136 { 137 } 138 139 static inline int tcf_classify(struct sk_buff *skb, 140 const struct tcf_block *block, 141 const struct tcf_proto *tp, 142 struct tcf_result *res, bool compat_mode) 143 { 144 return TC_ACT_UNSPEC; 145 } 146 147 #endif 148 149 static inline unsigned long 150 __cls_set_class(unsigned long *clp, unsigned long cl) 151 { 152 return xchg(clp, cl); 153 } 154 155 static inline void 156 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) 157 { 158 unsigned long cl; 159 160 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 161 cl = __cls_set_class(&r->class, cl); 162 if (cl) 163 q->ops->cl_ops->unbind_tcf(q, cl); 164 } 165 166 static inline void 167 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 168 { 169 struct Qdisc *q = tp->chain->block->q; 170 171 /* Check q as it is not set for shared blocks. In that case, 172 * setting class is not supported. 173 */ 174 if (!q) 175 return; 176 sch_tree_lock(q); 177 __tcf_bind_filter(q, r, base); 178 sch_tree_unlock(q); 179 } 180 181 static inline void 182 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) 183 { 184 unsigned long cl; 185 186 if ((cl = __cls_set_class(&r->class, 0)) != 0) 187 q->ops->cl_ops->unbind_tcf(q, cl); 188 } 189 190 static inline void 191 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 192 { 193 struct Qdisc *q = tp->chain->block->q; 194 195 if (!q) 196 return; 197 __tcf_unbind_filter(q, r); 198 } 199 200 struct tcf_exts { 201 #ifdef CONFIG_NET_CLS_ACT 202 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 203 int nr_actions; 204 struct tc_action **actions; 205 struct net *net; 206 #endif 207 /* Map to export classifier specific extension TLV types to the 208 * generic extensions API. Unsupported extensions must be set to 0. 209 */ 210 int action; 211 int police; 212 }; 213 214 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, 215 int action, int police) 216 { 217 #ifdef CONFIG_NET_CLS_ACT 218 exts->type = 0; 219 exts->nr_actions = 0; 220 exts->net = net; 221 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 222 GFP_KERNEL); 223 if (!exts->actions) 224 return -ENOMEM; 225 #endif 226 exts->action = action; 227 exts->police = police; 228 return 0; 229 } 230 231 /* Return false if the netns is being destroyed in cleanup_net(). Callers 232 * need to do cleanup synchronously in this case, otherwise may race with 233 * tc_action_net_exit(). Return true for other cases. 234 */ 235 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 236 { 237 #ifdef CONFIG_NET_CLS_ACT 238 exts->net = maybe_get_net(exts->net); 239 return exts->net != NULL; 240 #else 241 return true; 242 #endif 243 } 244 245 static inline void tcf_exts_put_net(struct tcf_exts *exts) 246 { 247 #ifdef CONFIG_NET_CLS_ACT 248 if (exts->net) 249 put_net(exts->net); 250 #endif 251 } 252 253 #ifdef CONFIG_NET_CLS_ACT 254 #define tcf_exts_for_each_action(i, a, exts) \ 255 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 256 #else 257 #define tcf_exts_for_each_action(i, a, exts) \ 258 for (; 0; (void)(i), (void)(a), (void)(exts)) 259 #endif 260 261 static inline void 262 tcf_exts_stats_update(const struct tcf_exts *exts, 263 u64 bytes, u64 packets, u64 drops, u64 lastuse, 264 u8 used_hw_stats, bool used_hw_stats_valid) 265 { 266 #ifdef CONFIG_NET_CLS_ACT 267 int i; 268 269 preempt_disable(); 270 271 for (i = 0; i < exts->nr_actions; i++) { 272 struct tc_action *a = exts->actions[i]; 273 274 tcf_action_stats_update(a, bytes, packets, drops, 275 lastuse, true); 276 a->used_hw_stats = used_hw_stats; 277 a->used_hw_stats_valid = used_hw_stats_valid; 278 } 279 280 preempt_enable(); 281 #endif 282 } 283 284 /** 285 * tcf_exts_has_actions - check if at least one action is present 286 * @exts: tc filter extensions handle 287 * 288 * Returns true if at least one action is present. 289 */ 290 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 291 { 292 #ifdef CONFIG_NET_CLS_ACT 293 return exts->nr_actions; 294 #else 295 return false; 296 #endif 297 } 298 299 /** 300 * tcf_exts_exec - execute tc filter extensions 301 * @skb: socket buffer 302 * @exts: tc filter extensions handle 303 * @res: desired result 304 * 305 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 306 * a negative number if the filter must be considered unmatched or 307 * a positive action code (TC_ACT_*) which must be returned to the 308 * underlying layer. 309 */ 310 static inline int 311 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 312 struct tcf_result *res) 313 { 314 #ifdef CONFIG_NET_CLS_ACT 315 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 316 #endif 317 return TC_ACT_OK; 318 } 319 320 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 321 struct nlattr **tb, struct nlattr *rate_tlv, 322 struct tcf_exts *exts, bool ovr, bool rtnl_held, 323 struct netlink_ext_ack *extack); 324 void tcf_exts_destroy(struct tcf_exts *exts); 325 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 326 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 327 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts); 328 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 329 330 /** 331 * struct tcf_pkt_info - packet information 332 */ 333 struct tcf_pkt_info { 334 unsigned char * ptr; 335 int nexthdr; 336 }; 337 338 #ifdef CONFIG_NET_EMATCH 339 340 struct tcf_ematch_ops; 341 342 /** 343 * struct tcf_ematch - extended match (ematch) 344 * 345 * @matchid: identifier to allow userspace to reidentify a match 346 * @flags: flags specifying attributes and the relation to other matches 347 * @ops: the operations lookup table of the corresponding ematch module 348 * @datalen: length of the ematch specific configuration data 349 * @data: ematch specific data 350 */ 351 struct tcf_ematch { 352 struct tcf_ematch_ops * ops; 353 unsigned long data; 354 unsigned int datalen; 355 u16 matchid; 356 u16 flags; 357 struct net *net; 358 }; 359 360 static inline int tcf_em_is_container(struct tcf_ematch *em) 361 { 362 return !em->ops; 363 } 364 365 static inline int tcf_em_is_simple(struct tcf_ematch *em) 366 { 367 return em->flags & TCF_EM_SIMPLE; 368 } 369 370 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 371 { 372 return em->flags & TCF_EM_INVERT; 373 } 374 375 static inline int tcf_em_last_match(struct tcf_ematch *em) 376 { 377 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 378 } 379 380 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 381 { 382 if (tcf_em_last_match(em)) 383 return 1; 384 385 if (result == 0 && em->flags & TCF_EM_REL_AND) 386 return 1; 387 388 if (result != 0 && em->flags & TCF_EM_REL_OR) 389 return 1; 390 391 return 0; 392 } 393 394 /** 395 * struct tcf_ematch_tree - ematch tree handle 396 * 397 * @hdr: ematch tree header supplied by userspace 398 * @matches: array of ematches 399 */ 400 struct tcf_ematch_tree { 401 struct tcf_ematch_tree_hdr hdr; 402 struct tcf_ematch * matches; 403 404 }; 405 406 /** 407 * struct tcf_ematch_ops - ematch module operations 408 * 409 * @kind: identifier (kind) of this ematch module 410 * @datalen: length of expected configuration data (optional) 411 * @change: called during validation (optional) 412 * @match: called during ematch tree evaluation, must return 1/0 413 * @destroy: called during destroyage (optional) 414 * @dump: called during dumping process (optional) 415 * @owner: owner, must be set to THIS_MODULE 416 * @link: link to previous/next ematch module (internal use) 417 */ 418 struct tcf_ematch_ops { 419 int kind; 420 int datalen; 421 int (*change)(struct net *net, void *, 422 int, struct tcf_ematch *); 423 int (*match)(struct sk_buff *, struct tcf_ematch *, 424 struct tcf_pkt_info *); 425 void (*destroy)(struct tcf_ematch *); 426 int (*dump)(struct sk_buff *, struct tcf_ematch *); 427 struct module *owner; 428 struct list_head link; 429 }; 430 431 int tcf_em_register(struct tcf_ematch_ops *); 432 void tcf_em_unregister(struct tcf_ematch_ops *); 433 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 434 struct tcf_ematch_tree *); 435 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 436 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 437 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 438 struct tcf_pkt_info *); 439 440 /** 441 * tcf_em_tree_match - evaulate an ematch tree 442 * 443 * @skb: socket buffer of the packet in question 444 * @tree: ematch tree to be used for evaluation 445 * @info: packet information examined by classifier 446 * 447 * This function matches @skb against the ematch tree in @tree by going 448 * through all ematches respecting their logic relations returning 449 * as soon as the result is obvious. 450 * 451 * Returns 1 if the ematch tree as-one matches, no ematches are configured 452 * or ematch is not enabled in the kernel, otherwise 0 is returned. 453 */ 454 static inline int tcf_em_tree_match(struct sk_buff *skb, 455 struct tcf_ematch_tree *tree, 456 struct tcf_pkt_info *info) 457 { 458 if (tree->hdr.nmatches) 459 return __tcf_em_tree_match(skb, tree, info); 460 else 461 return 1; 462 } 463 464 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 465 466 #else /* CONFIG_NET_EMATCH */ 467 468 struct tcf_ematch_tree { 469 }; 470 471 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 472 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 473 #define tcf_em_tree_dump(skb, t, tlv) (0) 474 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 475 476 #endif /* CONFIG_NET_EMATCH */ 477 478 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 479 { 480 switch (layer) { 481 case TCF_LAYER_LINK: 482 return skb_mac_header(skb); 483 case TCF_LAYER_NETWORK: 484 return skb_network_header(skb); 485 case TCF_LAYER_TRANSPORT: 486 return skb_transport_header(skb); 487 } 488 489 return NULL; 490 } 491 492 static inline int tcf_valid_offset(const struct sk_buff *skb, 493 const unsigned char *ptr, const int len) 494 { 495 return likely((ptr + len) <= skb_tail_pointer(skb) && 496 ptr >= skb->head && 497 (ptr <= (ptr + len))); 498 } 499 500 static inline int 501 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 502 struct netlink_ext_ack *extack) 503 { 504 char indev[IFNAMSIZ]; 505 struct net_device *dev; 506 507 if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) { 508 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 509 "Interface name too long"); 510 return -EINVAL; 511 } 512 dev = __dev_get_by_name(net, indev); 513 if (!dev) { 514 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 515 "Network device not found"); 516 return -ENODEV; 517 } 518 return dev->ifindex; 519 } 520 521 static inline bool 522 tcf_match_indev(struct sk_buff *skb, int ifindex) 523 { 524 if (!ifindex) 525 return true; 526 if (!skb->skb_iif) 527 return false; 528 return ifindex == skb->skb_iif; 529 } 530 531 int tc_setup_flow_action(struct flow_action *flow_action, 532 const struct tcf_exts *exts); 533 void tc_cleanup_flow_action(struct flow_action *flow_action); 534 535 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 536 void *type_data, bool err_stop, bool rtnl_held); 537 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 538 enum tc_setup_type type, void *type_data, bool err_stop, 539 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 540 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 541 enum tc_setup_type type, void *type_data, bool err_stop, 542 u32 *old_flags, unsigned int *old_in_hw_count, 543 u32 *new_flags, unsigned int *new_in_hw_count, 544 bool rtnl_held); 545 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 546 enum tc_setup_type type, void *type_data, bool err_stop, 547 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 548 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 549 bool add, flow_setup_cb_t *cb, 550 enum tc_setup_type type, void *type_data, 551 void *cb_priv, u32 *flags, unsigned int *in_hw_count); 552 unsigned int tcf_exts_num_actions(struct tcf_exts *exts); 553 554 #ifdef CONFIG_NET_CLS_ACT 555 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 556 enum flow_block_binder_type binder_type, 557 struct nlattr *block_index_attr, 558 struct netlink_ext_ack *extack); 559 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); 560 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 561 struct netlink_ext_ack *extack); 562 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 563 struct sk_buff **to_free, int *ret); 564 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); 565 #else 566 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 567 enum flow_block_binder_type binder_type, 568 struct nlattr *block_index_attr, 569 struct netlink_ext_ack *extack) 570 { 571 return 0; 572 } 573 574 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 575 { 576 } 577 578 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 579 struct netlink_ext_ack *extack) 580 { 581 return 0; 582 } 583 584 static inline struct sk_buff * 585 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 586 struct sk_buff **to_free, int *ret) 587 { 588 return skb; 589 } 590 591 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 592 { 593 return 0; 594 } 595 #endif 596 597 struct tc_cls_u32_knode { 598 struct tcf_exts *exts; 599 struct tcf_result *res; 600 struct tc_u32_sel *sel; 601 u32 handle; 602 u32 val; 603 u32 mask; 604 u32 link_handle; 605 u8 fshift; 606 }; 607 608 struct tc_cls_u32_hnode { 609 u32 handle; 610 u32 prio; 611 unsigned int divisor; 612 }; 613 614 enum tc_clsu32_command { 615 TC_CLSU32_NEW_KNODE, 616 TC_CLSU32_REPLACE_KNODE, 617 TC_CLSU32_DELETE_KNODE, 618 TC_CLSU32_NEW_HNODE, 619 TC_CLSU32_REPLACE_HNODE, 620 TC_CLSU32_DELETE_HNODE, 621 }; 622 623 struct tc_cls_u32_offload { 624 struct flow_cls_common_offload common; 625 /* knode values */ 626 enum tc_clsu32_command command; 627 union { 628 struct tc_cls_u32_knode knode; 629 struct tc_cls_u32_hnode hnode; 630 }; 631 }; 632 633 static inline bool tc_can_offload(const struct net_device *dev) 634 { 635 return dev->features & NETIF_F_HW_TC; 636 } 637 638 static inline bool tc_can_offload_extack(const struct net_device *dev, 639 struct netlink_ext_ack *extack) 640 { 641 bool can = tc_can_offload(dev); 642 643 if (!can) 644 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 645 646 return can; 647 } 648 649 static inline bool 650 tc_cls_can_offload_and_chain0(const struct net_device *dev, 651 struct flow_cls_common_offload *common) 652 { 653 if (!tc_can_offload_extack(dev, common->extack)) 654 return false; 655 if (common->chain_index) { 656 NL_SET_ERR_MSG(common->extack, 657 "Driver supports only offload of chain 0"); 658 return false; 659 } 660 return true; 661 } 662 663 static inline bool tc_skip_hw(u32 flags) 664 { 665 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 666 } 667 668 static inline bool tc_skip_sw(u32 flags) 669 { 670 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 671 } 672 673 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 674 static inline bool tc_flags_valid(u32 flags) 675 { 676 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 677 TCA_CLS_FLAGS_VERBOSE)) 678 return false; 679 680 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 681 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 682 return false; 683 684 return true; 685 } 686 687 static inline bool tc_in_hw(u32 flags) 688 { 689 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 690 } 691 692 static inline void 693 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, 694 const struct tcf_proto *tp, u32 flags, 695 struct netlink_ext_ack *extack) 696 { 697 cls_common->chain_index = tp->chain->index; 698 cls_common->protocol = tp->protocol; 699 cls_common->prio = tp->prio >> 16; 700 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 701 cls_common->extack = extack; 702 } 703 704 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 705 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) 706 { 707 struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); 708 709 if (tc_skb_ext) 710 memset(tc_skb_ext, 0, sizeof(*tc_skb_ext)); 711 return tc_skb_ext; 712 } 713 #endif 714 715 enum tc_matchall_command { 716 TC_CLSMATCHALL_REPLACE, 717 TC_CLSMATCHALL_DESTROY, 718 TC_CLSMATCHALL_STATS, 719 }; 720 721 struct tc_cls_matchall_offload { 722 struct flow_cls_common_offload common; 723 enum tc_matchall_command command; 724 struct flow_rule *rule; 725 struct flow_stats stats; 726 unsigned long cookie; 727 }; 728 729 enum tc_clsbpf_command { 730 TC_CLSBPF_OFFLOAD, 731 TC_CLSBPF_STATS, 732 }; 733 734 struct tc_cls_bpf_offload { 735 struct flow_cls_common_offload common; 736 enum tc_clsbpf_command command; 737 struct tcf_exts *exts; 738 struct bpf_prog *prog; 739 struct bpf_prog *oldprog; 740 const char *name; 741 bool exts_integrated; 742 }; 743 744 struct tc_mqprio_qopt_offload { 745 /* struct tc_mqprio_qopt must always be the first element */ 746 struct tc_mqprio_qopt qopt; 747 u16 mode; 748 u16 shaper; 749 u32 flags; 750 u64 min_rate[TC_QOPT_MAX_QUEUE]; 751 u64 max_rate[TC_QOPT_MAX_QUEUE]; 752 }; 753 754 /* This structure holds cookie structure that is passed from user 755 * to the kernel for actions and classifiers 756 */ 757 struct tc_cookie { 758 u8 *data; 759 u32 len; 760 struct rcu_head rcu; 761 }; 762 763 struct tc_qopt_offload_stats { 764 struct gnet_stats_basic_packed *bstats; 765 struct gnet_stats_queue *qstats; 766 }; 767 768 enum tc_mq_command { 769 TC_MQ_CREATE, 770 TC_MQ_DESTROY, 771 TC_MQ_STATS, 772 TC_MQ_GRAFT, 773 }; 774 775 struct tc_mq_opt_offload_graft_params { 776 unsigned long queue; 777 u32 child_handle; 778 }; 779 780 struct tc_mq_qopt_offload { 781 enum tc_mq_command command; 782 u32 handle; 783 union { 784 struct tc_qopt_offload_stats stats; 785 struct tc_mq_opt_offload_graft_params graft_params; 786 }; 787 }; 788 789 enum tc_htb_command { 790 /* Root */ 791 TC_HTB_CREATE, /* Initialize HTB offload. */ 792 TC_HTB_DESTROY, /* Destroy HTB offload. */ 793 794 /* Classes */ 795 /* Allocate qid and create leaf. */ 796 TC_HTB_LEAF_ALLOC_QUEUE, 797 /* Convert leaf to inner, preserve and return qid, create new leaf. */ 798 TC_HTB_LEAF_TO_INNER, 799 /* Delete leaf, while siblings remain. */ 800 TC_HTB_LEAF_DEL, 801 /* Delete leaf, convert parent to leaf, preserving qid. */ 802 TC_HTB_LEAF_DEL_LAST, 803 /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */ 804 TC_HTB_LEAF_DEL_LAST_FORCE, 805 /* Modify parameters of a node. */ 806 TC_HTB_NODE_MODIFY, 807 808 /* Class qdisc */ 809 TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */ 810 }; 811 812 struct tc_htb_qopt_offload { 813 struct netlink_ext_ack *extack; 814 enum tc_htb_command command; 815 u16 classid; 816 u32 parent_classid; 817 u16 qid; 818 u16 moved_qid; 819 u64 rate; 820 u64 ceil; 821 }; 822 823 #define TC_HTB_CLASSID_ROOT U32_MAX 824 825 enum tc_red_command { 826 TC_RED_REPLACE, 827 TC_RED_DESTROY, 828 TC_RED_STATS, 829 TC_RED_XSTATS, 830 TC_RED_GRAFT, 831 }; 832 833 struct tc_red_qopt_offload_params { 834 u32 min; 835 u32 max; 836 u32 probability; 837 u32 limit; 838 bool is_ecn; 839 bool is_harddrop; 840 bool is_nodrop; 841 struct gnet_stats_queue *qstats; 842 }; 843 844 struct tc_red_qopt_offload { 845 enum tc_red_command command; 846 u32 handle; 847 u32 parent; 848 union { 849 struct tc_red_qopt_offload_params set; 850 struct tc_qopt_offload_stats stats; 851 struct red_stats *xstats; 852 u32 child_handle; 853 }; 854 }; 855 856 enum tc_gred_command { 857 TC_GRED_REPLACE, 858 TC_GRED_DESTROY, 859 TC_GRED_STATS, 860 }; 861 862 struct tc_gred_vq_qopt_offload_params { 863 bool present; 864 u32 limit; 865 u32 prio; 866 u32 min; 867 u32 max; 868 bool is_ecn; 869 bool is_harddrop; 870 u32 probability; 871 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 872 u32 *backlog; 873 }; 874 875 struct tc_gred_qopt_offload_params { 876 bool grio_on; 877 bool wred_on; 878 unsigned int dp_cnt; 879 unsigned int dp_def; 880 struct gnet_stats_queue *qstats; 881 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 882 }; 883 884 struct tc_gred_qopt_offload_stats { 885 struct gnet_stats_basic_packed bstats[MAX_DPs]; 886 struct gnet_stats_queue qstats[MAX_DPs]; 887 struct red_stats *xstats[MAX_DPs]; 888 }; 889 890 struct tc_gred_qopt_offload { 891 enum tc_gred_command command; 892 u32 handle; 893 u32 parent; 894 union { 895 struct tc_gred_qopt_offload_params set; 896 struct tc_gred_qopt_offload_stats stats; 897 }; 898 }; 899 900 enum tc_prio_command { 901 TC_PRIO_REPLACE, 902 TC_PRIO_DESTROY, 903 TC_PRIO_STATS, 904 TC_PRIO_GRAFT, 905 }; 906 907 struct tc_prio_qopt_offload_params { 908 int bands; 909 u8 priomap[TC_PRIO_MAX + 1]; 910 /* At the point of un-offloading the Qdisc, the reported backlog and 911 * qlen need to be reduced by the portion that is in HW. 912 */ 913 struct gnet_stats_queue *qstats; 914 }; 915 916 struct tc_prio_qopt_offload_graft_params { 917 u8 band; 918 u32 child_handle; 919 }; 920 921 struct tc_prio_qopt_offload { 922 enum tc_prio_command command; 923 u32 handle; 924 u32 parent; 925 union { 926 struct tc_prio_qopt_offload_params replace_params; 927 struct tc_qopt_offload_stats stats; 928 struct tc_prio_qopt_offload_graft_params graft_params; 929 }; 930 }; 931 932 enum tc_root_command { 933 TC_ROOT_GRAFT, 934 }; 935 936 struct tc_root_qopt_offload { 937 enum tc_root_command command; 938 u32 handle; 939 bool ingress; 940 }; 941 942 enum tc_ets_command { 943 TC_ETS_REPLACE, 944 TC_ETS_DESTROY, 945 TC_ETS_STATS, 946 TC_ETS_GRAFT, 947 }; 948 949 struct tc_ets_qopt_offload_replace_params { 950 unsigned int bands; 951 u8 priomap[TC_PRIO_MAX + 1]; 952 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */ 953 unsigned int weights[TCQ_ETS_MAX_BANDS]; 954 struct gnet_stats_queue *qstats; 955 }; 956 957 struct tc_ets_qopt_offload_graft_params { 958 u8 band; 959 u32 child_handle; 960 }; 961 962 struct tc_ets_qopt_offload { 963 enum tc_ets_command command; 964 u32 handle; 965 u32 parent; 966 union { 967 struct tc_ets_qopt_offload_replace_params replace_params; 968 struct tc_qopt_offload_stats stats; 969 struct tc_ets_qopt_offload_graft_params graft_params; 970 }; 971 }; 972 973 enum tc_tbf_command { 974 TC_TBF_REPLACE, 975 TC_TBF_DESTROY, 976 TC_TBF_STATS, 977 }; 978 979 struct tc_tbf_qopt_offload_replace_params { 980 struct psched_ratecfg rate; 981 u32 max_size; 982 struct gnet_stats_queue *qstats; 983 }; 984 985 struct tc_tbf_qopt_offload { 986 enum tc_tbf_command command; 987 u32 handle; 988 u32 parent; 989 union { 990 struct tc_tbf_qopt_offload_replace_params replace_params; 991 struct tc_qopt_offload_stats stats; 992 }; 993 }; 994 995 enum tc_fifo_command { 996 TC_FIFO_REPLACE, 997 TC_FIFO_DESTROY, 998 TC_FIFO_STATS, 999 }; 1000 1001 struct tc_fifo_qopt_offload { 1002 enum tc_fifo_command command; 1003 u32 handle; 1004 u32 parent; 1005 union { 1006 struct tc_qopt_offload_stats stats; 1007 }; 1008 }; 1009 1010 #endif 1011