1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 #include <net/net_namespace.h> 10 11 /* TC action not accessible from user space */ 12 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) 13 14 /* Basic packet classifier frontend definitions. */ 15 16 struct tcf_walker { 17 int stop; 18 int skip; 19 int count; 20 bool nonempty; 21 unsigned long cookie; 22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 23 }; 24 25 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 27 28 struct tcf_block_ext_info { 29 enum flow_block_binder_type binder_type; 30 tcf_chain_head_change_t *chain_head_change; 31 void *chain_head_change_priv; 32 u32 block_index; 33 }; 34 35 struct tcf_qevent { 36 struct tcf_block *block; 37 struct tcf_block_ext_info info; 38 struct tcf_proto __rcu *filter_chain; 39 }; 40 41 struct tcf_block_cb; 42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 43 44 #ifdef CONFIG_NET_CLS 45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 46 u32 chain_index); 47 void tcf_chain_put_by_act(struct tcf_chain *chain); 48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, 49 struct tcf_chain *chain); 50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, 51 struct tcf_proto *tp); 52 void tcf_block_netif_keep_dst(struct tcf_block *block); 53 int tcf_block_get(struct tcf_block **p_block, 54 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 55 struct netlink_ext_ack *extack); 56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 57 struct tcf_block_ext_info *ei, 58 struct netlink_ext_ack *extack); 59 void tcf_block_put(struct tcf_block *block); 60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 61 struct tcf_block_ext_info *ei); 62 63 static inline bool tcf_block_shared(struct tcf_block *block) 64 { 65 return block->index; 66 } 67 68 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 69 { 70 return block && block->index; 71 } 72 73 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 74 { 75 WARN_ON(tcf_block_shared(block)); 76 return block->q; 77 } 78 79 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 80 struct tcf_result *res, bool compat_mode); 81 int tcf_classify_ingress(struct sk_buff *skb, 82 const struct tcf_block *ingress_block, 83 const struct tcf_proto *tp, struct tcf_result *res, 84 bool compat_mode); 85 86 #else 87 static inline bool tcf_block_shared(struct tcf_block *block) 88 { 89 return false; 90 } 91 92 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 93 { 94 return false; 95 } 96 97 static inline 98 int tcf_block_get(struct tcf_block **p_block, 99 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 100 struct netlink_ext_ack *extack) 101 { 102 return 0; 103 } 104 105 static inline 106 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 107 struct tcf_block_ext_info *ei, 108 struct netlink_ext_ack *extack) 109 { 110 return 0; 111 } 112 113 static inline void tcf_block_put(struct tcf_block *block) 114 { 115 } 116 117 static inline 118 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 119 struct tcf_block_ext_info *ei) 120 { 121 } 122 123 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 124 { 125 return NULL; 126 } 127 128 static inline 129 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, 130 void *cb_priv) 131 { 132 return 0; 133 } 134 135 static inline 136 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, 137 void *cb_priv) 138 { 139 } 140 141 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 142 struct tcf_result *res, bool compat_mode) 143 { 144 return TC_ACT_UNSPEC; 145 } 146 147 static inline int tcf_classify_ingress(struct sk_buff *skb, 148 const struct tcf_block *ingress_block, 149 const struct tcf_proto *tp, 150 struct tcf_result *res, bool compat_mode) 151 { 152 return TC_ACT_UNSPEC; 153 } 154 155 #endif 156 157 static inline unsigned long 158 __cls_set_class(unsigned long *clp, unsigned long cl) 159 { 160 return xchg(clp, cl); 161 } 162 163 static inline void 164 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) 165 { 166 unsigned long cl; 167 168 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 169 cl = __cls_set_class(&r->class, cl); 170 if (cl) 171 q->ops->cl_ops->unbind_tcf(q, cl); 172 } 173 174 static inline void 175 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 176 { 177 struct Qdisc *q = tp->chain->block->q; 178 179 /* Check q as it is not set for shared blocks. In that case, 180 * setting class is not supported. 181 */ 182 if (!q) 183 return; 184 sch_tree_lock(q); 185 __tcf_bind_filter(q, r, base); 186 sch_tree_unlock(q); 187 } 188 189 static inline void 190 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) 191 { 192 unsigned long cl; 193 194 if ((cl = __cls_set_class(&r->class, 0)) != 0) 195 q->ops->cl_ops->unbind_tcf(q, cl); 196 } 197 198 static inline void 199 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 200 { 201 struct Qdisc *q = tp->chain->block->q; 202 203 if (!q) 204 return; 205 __tcf_unbind_filter(q, r); 206 } 207 208 struct tcf_exts { 209 #ifdef CONFIG_NET_CLS_ACT 210 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 211 int nr_actions; 212 struct tc_action **actions; 213 struct net *net; 214 #endif 215 /* Map to export classifier specific extension TLV types to the 216 * generic extensions API. Unsupported extensions must be set to 0. 217 */ 218 int action; 219 int police; 220 }; 221 222 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, 223 int action, int police) 224 { 225 #ifdef CONFIG_NET_CLS_ACT 226 exts->type = 0; 227 exts->nr_actions = 0; 228 exts->net = net; 229 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 230 GFP_KERNEL); 231 if (!exts->actions) 232 return -ENOMEM; 233 #endif 234 exts->action = action; 235 exts->police = police; 236 return 0; 237 } 238 239 /* Return false if the netns is being destroyed in cleanup_net(). Callers 240 * need to do cleanup synchronously in this case, otherwise may race with 241 * tc_action_net_exit(). Return true for other cases. 242 */ 243 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 244 { 245 #ifdef CONFIG_NET_CLS_ACT 246 exts->net = maybe_get_net(exts->net); 247 return exts->net != NULL; 248 #else 249 return true; 250 #endif 251 } 252 253 static inline void tcf_exts_put_net(struct tcf_exts *exts) 254 { 255 #ifdef CONFIG_NET_CLS_ACT 256 if (exts->net) 257 put_net(exts->net); 258 #endif 259 } 260 261 #ifdef CONFIG_NET_CLS_ACT 262 #define tcf_exts_for_each_action(i, a, exts) \ 263 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 264 #else 265 #define tcf_exts_for_each_action(i, a, exts) \ 266 for (; 0; (void)(i), (void)(a), (void)(exts)) 267 #endif 268 269 static inline void 270 tcf_exts_stats_update(const struct tcf_exts *exts, 271 u64 bytes, u64 packets, u64 drops, u64 lastuse, 272 u8 used_hw_stats, bool used_hw_stats_valid) 273 { 274 #ifdef CONFIG_NET_CLS_ACT 275 int i; 276 277 preempt_disable(); 278 279 for (i = 0; i < exts->nr_actions; i++) { 280 struct tc_action *a = exts->actions[i]; 281 282 tcf_action_stats_update(a, bytes, packets, drops, 283 lastuse, true); 284 a->used_hw_stats = used_hw_stats; 285 a->used_hw_stats_valid = used_hw_stats_valid; 286 } 287 288 preempt_enable(); 289 #endif 290 } 291 292 /** 293 * tcf_exts_has_actions - check if at least one action is present 294 * @exts: tc filter extensions handle 295 * 296 * Returns true if at least one action is present. 297 */ 298 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 299 { 300 #ifdef CONFIG_NET_CLS_ACT 301 return exts->nr_actions; 302 #else 303 return false; 304 #endif 305 } 306 307 /** 308 * tcf_exts_exec - execute tc filter extensions 309 * @skb: socket buffer 310 * @exts: tc filter extensions handle 311 * @res: desired result 312 * 313 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 314 * a negative number if the filter must be considered unmatched or 315 * a positive action code (TC_ACT_*) which must be returned to the 316 * underlying layer. 317 */ 318 static inline int 319 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 320 struct tcf_result *res) 321 { 322 #ifdef CONFIG_NET_CLS_ACT 323 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 324 #endif 325 return TC_ACT_OK; 326 } 327 328 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 329 struct nlattr **tb, struct nlattr *rate_tlv, 330 struct tcf_exts *exts, bool ovr, bool rtnl_held, 331 struct netlink_ext_ack *extack); 332 void tcf_exts_destroy(struct tcf_exts *exts); 333 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 334 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 335 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts); 336 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 337 338 /** 339 * struct tcf_pkt_info - packet information 340 * 341 * @ptr: start of the pkt data 342 * @nexthdr: offset of the next header 343 */ 344 struct tcf_pkt_info { 345 unsigned char * ptr; 346 int nexthdr; 347 }; 348 349 #ifdef CONFIG_NET_EMATCH 350 351 struct tcf_ematch_ops; 352 353 /** 354 * struct tcf_ematch - extended match (ematch) 355 * 356 * @matchid: identifier to allow userspace to reidentify a match 357 * @flags: flags specifying attributes and the relation to other matches 358 * @ops: the operations lookup table of the corresponding ematch module 359 * @datalen: length of the ematch specific configuration data 360 * @data: ematch specific data 361 * @net: the network namespace 362 */ 363 struct tcf_ematch { 364 struct tcf_ematch_ops * ops; 365 unsigned long data; 366 unsigned int datalen; 367 u16 matchid; 368 u16 flags; 369 struct net *net; 370 }; 371 372 static inline int tcf_em_is_container(struct tcf_ematch *em) 373 { 374 return !em->ops; 375 } 376 377 static inline int tcf_em_is_simple(struct tcf_ematch *em) 378 { 379 return em->flags & TCF_EM_SIMPLE; 380 } 381 382 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 383 { 384 return em->flags & TCF_EM_INVERT; 385 } 386 387 static inline int tcf_em_last_match(struct tcf_ematch *em) 388 { 389 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 390 } 391 392 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 393 { 394 if (tcf_em_last_match(em)) 395 return 1; 396 397 if (result == 0 && em->flags & TCF_EM_REL_AND) 398 return 1; 399 400 if (result != 0 && em->flags & TCF_EM_REL_OR) 401 return 1; 402 403 return 0; 404 } 405 406 /** 407 * struct tcf_ematch_tree - ematch tree handle 408 * 409 * @hdr: ematch tree header supplied by userspace 410 * @matches: array of ematches 411 */ 412 struct tcf_ematch_tree { 413 struct tcf_ematch_tree_hdr hdr; 414 struct tcf_ematch * matches; 415 416 }; 417 418 /** 419 * struct tcf_ematch_ops - ematch module operations 420 * 421 * @kind: identifier (kind) of this ematch module 422 * @datalen: length of expected configuration data (optional) 423 * @change: called during validation (optional) 424 * @match: called during ematch tree evaluation, must return 1/0 425 * @destroy: called during destroyage (optional) 426 * @dump: called during dumping process (optional) 427 * @owner: owner, must be set to THIS_MODULE 428 * @link: link to previous/next ematch module (internal use) 429 */ 430 struct tcf_ematch_ops { 431 int kind; 432 int datalen; 433 int (*change)(struct net *net, void *, 434 int, struct tcf_ematch *); 435 int (*match)(struct sk_buff *, struct tcf_ematch *, 436 struct tcf_pkt_info *); 437 void (*destroy)(struct tcf_ematch *); 438 int (*dump)(struct sk_buff *, struct tcf_ematch *); 439 struct module *owner; 440 struct list_head link; 441 }; 442 443 int tcf_em_register(struct tcf_ematch_ops *); 444 void tcf_em_unregister(struct tcf_ematch_ops *); 445 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 446 struct tcf_ematch_tree *); 447 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 448 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 449 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 450 struct tcf_pkt_info *); 451 452 /** 453 * tcf_em_tree_match - evaulate an ematch tree 454 * 455 * @skb: socket buffer of the packet in question 456 * @tree: ematch tree to be used for evaluation 457 * @info: packet information examined by classifier 458 * 459 * This function matches @skb against the ematch tree in @tree by going 460 * through all ematches respecting their logic relations returning 461 * as soon as the result is obvious. 462 * 463 * Returns 1 if the ematch tree as-one matches, no ematches are configured 464 * or ematch is not enabled in the kernel, otherwise 0 is returned. 465 */ 466 static inline int tcf_em_tree_match(struct sk_buff *skb, 467 struct tcf_ematch_tree *tree, 468 struct tcf_pkt_info *info) 469 { 470 if (tree->hdr.nmatches) 471 return __tcf_em_tree_match(skb, tree, info); 472 else 473 return 1; 474 } 475 476 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 477 478 #else /* CONFIG_NET_EMATCH */ 479 480 struct tcf_ematch_tree { 481 }; 482 483 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 484 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 485 #define tcf_em_tree_dump(skb, t, tlv) (0) 486 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 487 488 #endif /* CONFIG_NET_EMATCH */ 489 490 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 491 { 492 switch (layer) { 493 case TCF_LAYER_LINK: 494 return skb_mac_header(skb); 495 case TCF_LAYER_NETWORK: 496 return skb_network_header(skb); 497 case TCF_LAYER_TRANSPORT: 498 return skb_transport_header(skb); 499 } 500 501 return NULL; 502 } 503 504 static inline int tcf_valid_offset(const struct sk_buff *skb, 505 const unsigned char *ptr, const int len) 506 { 507 return likely((ptr + len) <= skb_tail_pointer(skb) && 508 ptr >= skb->head && 509 (ptr <= (ptr + len))); 510 } 511 512 static inline int 513 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 514 struct netlink_ext_ack *extack) 515 { 516 char indev[IFNAMSIZ]; 517 struct net_device *dev; 518 519 if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) { 520 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 521 "Interface name too long"); 522 return -EINVAL; 523 } 524 dev = __dev_get_by_name(net, indev); 525 if (!dev) { 526 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 527 "Network device not found"); 528 return -ENODEV; 529 } 530 return dev->ifindex; 531 } 532 533 static inline bool 534 tcf_match_indev(struct sk_buff *skb, int ifindex) 535 { 536 if (!ifindex) 537 return true; 538 if (!skb->skb_iif) 539 return false; 540 return ifindex == skb->skb_iif; 541 } 542 543 int tc_setup_flow_action(struct flow_action *flow_action, 544 const struct tcf_exts *exts); 545 void tc_cleanup_flow_action(struct flow_action *flow_action); 546 547 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 548 void *type_data, bool err_stop, bool rtnl_held); 549 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 550 enum tc_setup_type type, void *type_data, bool err_stop, 551 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 552 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 553 enum tc_setup_type type, void *type_data, bool err_stop, 554 u32 *old_flags, unsigned int *old_in_hw_count, 555 u32 *new_flags, unsigned int *new_in_hw_count, 556 bool rtnl_held); 557 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 558 enum tc_setup_type type, void *type_data, bool err_stop, 559 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 560 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 561 bool add, flow_setup_cb_t *cb, 562 enum tc_setup_type type, void *type_data, 563 void *cb_priv, u32 *flags, unsigned int *in_hw_count); 564 unsigned int tcf_exts_num_actions(struct tcf_exts *exts); 565 566 #ifdef CONFIG_NET_CLS_ACT 567 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 568 enum flow_block_binder_type binder_type, 569 struct nlattr *block_index_attr, 570 struct netlink_ext_ack *extack); 571 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); 572 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 573 struct netlink_ext_ack *extack); 574 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 575 struct sk_buff **to_free, int *ret); 576 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); 577 #else 578 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 579 enum flow_block_binder_type binder_type, 580 struct nlattr *block_index_attr, 581 struct netlink_ext_ack *extack) 582 { 583 return 0; 584 } 585 586 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 587 { 588 } 589 590 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 591 struct netlink_ext_ack *extack) 592 { 593 return 0; 594 } 595 596 static inline struct sk_buff * 597 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 598 struct sk_buff **to_free, int *ret) 599 { 600 return skb; 601 } 602 603 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 604 { 605 return 0; 606 } 607 #endif 608 609 struct tc_cls_u32_knode { 610 struct tcf_exts *exts; 611 struct tcf_result *res; 612 struct tc_u32_sel *sel; 613 u32 handle; 614 u32 val; 615 u32 mask; 616 u32 link_handle; 617 u8 fshift; 618 }; 619 620 struct tc_cls_u32_hnode { 621 u32 handle; 622 u32 prio; 623 unsigned int divisor; 624 }; 625 626 enum tc_clsu32_command { 627 TC_CLSU32_NEW_KNODE, 628 TC_CLSU32_REPLACE_KNODE, 629 TC_CLSU32_DELETE_KNODE, 630 TC_CLSU32_NEW_HNODE, 631 TC_CLSU32_REPLACE_HNODE, 632 TC_CLSU32_DELETE_HNODE, 633 }; 634 635 struct tc_cls_u32_offload { 636 struct flow_cls_common_offload common; 637 /* knode values */ 638 enum tc_clsu32_command command; 639 union { 640 struct tc_cls_u32_knode knode; 641 struct tc_cls_u32_hnode hnode; 642 }; 643 }; 644 645 static inline bool tc_can_offload(const struct net_device *dev) 646 { 647 return dev->features & NETIF_F_HW_TC; 648 } 649 650 static inline bool tc_can_offload_extack(const struct net_device *dev, 651 struct netlink_ext_ack *extack) 652 { 653 bool can = tc_can_offload(dev); 654 655 if (!can) 656 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 657 658 return can; 659 } 660 661 static inline bool 662 tc_cls_can_offload_and_chain0(const struct net_device *dev, 663 struct flow_cls_common_offload *common) 664 { 665 if (!tc_can_offload_extack(dev, common->extack)) 666 return false; 667 if (common->chain_index) { 668 NL_SET_ERR_MSG(common->extack, 669 "Driver supports only offload of chain 0"); 670 return false; 671 } 672 return true; 673 } 674 675 static inline bool tc_skip_hw(u32 flags) 676 { 677 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 678 } 679 680 static inline bool tc_skip_sw(u32 flags) 681 { 682 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 683 } 684 685 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 686 static inline bool tc_flags_valid(u32 flags) 687 { 688 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 689 TCA_CLS_FLAGS_VERBOSE)) 690 return false; 691 692 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 693 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 694 return false; 695 696 return true; 697 } 698 699 static inline bool tc_in_hw(u32 flags) 700 { 701 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 702 } 703 704 static inline void 705 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, 706 const struct tcf_proto *tp, u32 flags, 707 struct netlink_ext_ack *extack) 708 { 709 cls_common->chain_index = tp->chain->index; 710 cls_common->protocol = tp->protocol; 711 cls_common->prio = tp->prio >> 16; 712 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 713 cls_common->extack = extack; 714 } 715 716 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 717 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) 718 { 719 struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); 720 721 if (tc_skb_ext) 722 memset(tc_skb_ext, 0, sizeof(*tc_skb_ext)); 723 return tc_skb_ext; 724 } 725 #endif 726 727 enum tc_matchall_command { 728 TC_CLSMATCHALL_REPLACE, 729 TC_CLSMATCHALL_DESTROY, 730 TC_CLSMATCHALL_STATS, 731 }; 732 733 struct tc_cls_matchall_offload { 734 struct flow_cls_common_offload common; 735 enum tc_matchall_command command; 736 struct flow_rule *rule; 737 struct flow_stats stats; 738 unsigned long cookie; 739 }; 740 741 enum tc_clsbpf_command { 742 TC_CLSBPF_OFFLOAD, 743 TC_CLSBPF_STATS, 744 }; 745 746 struct tc_cls_bpf_offload { 747 struct flow_cls_common_offload common; 748 enum tc_clsbpf_command command; 749 struct tcf_exts *exts; 750 struct bpf_prog *prog; 751 struct bpf_prog *oldprog; 752 const char *name; 753 bool exts_integrated; 754 }; 755 756 struct tc_mqprio_qopt_offload { 757 /* struct tc_mqprio_qopt must always be the first element */ 758 struct tc_mqprio_qopt qopt; 759 u16 mode; 760 u16 shaper; 761 u32 flags; 762 u64 min_rate[TC_QOPT_MAX_QUEUE]; 763 u64 max_rate[TC_QOPT_MAX_QUEUE]; 764 }; 765 766 /* This structure holds cookie structure that is passed from user 767 * to the kernel for actions and classifiers 768 */ 769 struct tc_cookie { 770 u8 *data; 771 u32 len; 772 struct rcu_head rcu; 773 }; 774 775 struct tc_qopt_offload_stats { 776 struct gnet_stats_basic_packed *bstats; 777 struct gnet_stats_queue *qstats; 778 }; 779 780 enum tc_mq_command { 781 TC_MQ_CREATE, 782 TC_MQ_DESTROY, 783 TC_MQ_STATS, 784 TC_MQ_GRAFT, 785 }; 786 787 struct tc_mq_opt_offload_graft_params { 788 unsigned long queue; 789 u32 child_handle; 790 }; 791 792 struct tc_mq_qopt_offload { 793 enum tc_mq_command command; 794 u32 handle; 795 union { 796 struct tc_qopt_offload_stats stats; 797 struct tc_mq_opt_offload_graft_params graft_params; 798 }; 799 }; 800 801 enum tc_htb_command { 802 /* Root */ 803 TC_HTB_CREATE, /* Initialize HTB offload. */ 804 TC_HTB_DESTROY, /* Destroy HTB offload. */ 805 806 /* Classes */ 807 /* Allocate qid and create leaf. */ 808 TC_HTB_LEAF_ALLOC_QUEUE, 809 /* Convert leaf to inner, preserve and return qid, create new leaf. */ 810 TC_HTB_LEAF_TO_INNER, 811 /* Delete leaf, while siblings remain. */ 812 TC_HTB_LEAF_DEL, 813 /* Delete leaf, convert parent to leaf, preserving qid. */ 814 TC_HTB_LEAF_DEL_LAST, 815 /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */ 816 TC_HTB_LEAF_DEL_LAST_FORCE, 817 /* Modify parameters of a node. */ 818 TC_HTB_NODE_MODIFY, 819 820 /* Class qdisc */ 821 TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */ 822 }; 823 824 struct tc_htb_qopt_offload { 825 struct netlink_ext_ack *extack; 826 enum tc_htb_command command; 827 u16 classid; 828 u32 parent_classid; 829 u16 qid; 830 u16 moved_qid; 831 u64 rate; 832 u64 ceil; 833 }; 834 835 #define TC_HTB_CLASSID_ROOT U32_MAX 836 837 enum tc_red_command { 838 TC_RED_REPLACE, 839 TC_RED_DESTROY, 840 TC_RED_STATS, 841 TC_RED_XSTATS, 842 TC_RED_GRAFT, 843 }; 844 845 struct tc_red_qopt_offload_params { 846 u32 min; 847 u32 max; 848 u32 probability; 849 u32 limit; 850 bool is_ecn; 851 bool is_harddrop; 852 bool is_nodrop; 853 struct gnet_stats_queue *qstats; 854 }; 855 856 struct tc_red_qopt_offload { 857 enum tc_red_command command; 858 u32 handle; 859 u32 parent; 860 union { 861 struct tc_red_qopt_offload_params set; 862 struct tc_qopt_offload_stats stats; 863 struct red_stats *xstats; 864 u32 child_handle; 865 }; 866 }; 867 868 enum tc_gred_command { 869 TC_GRED_REPLACE, 870 TC_GRED_DESTROY, 871 TC_GRED_STATS, 872 }; 873 874 struct tc_gred_vq_qopt_offload_params { 875 bool present; 876 u32 limit; 877 u32 prio; 878 u32 min; 879 u32 max; 880 bool is_ecn; 881 bool is_harddrop; 882 u32 probability; 883 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 884 u32 *backlog; 885 }; 886 887 struct tc_gred_qopt_offload_params { 888 bool grio_on; 889 bool wred_on; 890 unsigned int dp_cnt; 891 unsigned int dp_def; 892 struct gnet_stats_queue *qstats; 893 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 894 }; 895 896 struct tc_gred_qopt_offload_stats { 897 struct gnet_stats_basic_packed bstats[MAX_DPs]; 898 struct gnet_stats_queue qstats[MAX_DPs]; 899 struct red_stats *xstats[MAX_DPs]; 900 }; 901 902 struct tc_gred_qopt_offload { 903 enum tc_gred_command command; 904 u32 handle; 905 u32 parent; 906 union { 907 struct tc_gred_qopt_offload_params set; 908 struct tc_gred_qopt_offload_stats stats; 909 }; 910 }; 911 912 enum tc_prio_command { 913 TC_PRIO_REPLACE, 914 TC_PRIO_DESTROY, 915 TC_PRIO_STATS, 916 TC_PRIO_GRAFT, 917 }; 918 919 struct tc_prio_qopt_offload_params { 920 int bands; 921 u8 priomap[TC_PRIO_MAX + 1]; 922 /* At the point of un-offloading the Qdisc, the reported backlog and 923 * qlen need to be reduced by the portion that is in HW. 924 */ 925 struct gnet_stats_queue *qstats; 926 }; 927 928 struct tc_prio_qopt_offload_graft_params { 929 u8 band; 930 u32 child_handle; 931 }; 932 933 struct tc_prio_qopt_offload { 934 enum tc_prio_command command; 935 u32 handle; 936 u32 parent; 937 union { 938 struct tc_prio_qopt_offload_params replace_params; 939 struct tc_qopt_offload_stats stats; 940 struct tc_prio_qopt_offload_graft_params graft_params; 941 }; 942 }; 943 944 enum tc_root_command { 945 TC_ROOT_GRAFT, 946 }; 947 948 struct tc_root_qopt_offload { 949 enum tc_root_command command; 950 u32 handle; 951 bool ingress; 952 }; 953 954 enum tc_ets_command { 955 TC_ETS_REPLACE, 956 TC_ETS_DESTROY, 957 TC_ETS_STATS, 958 TC_ETS_GRAFT, 959 }; 960 961 struct tc_ets_qopt_offload_replace_params { 962 unsigned int bands; 963 u8 priomap[TC_PRIO_MAX + 1]; 964 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */ 965 unsigned int weights[TCQ_ETS_MAX_BANDS]; 966 struct gnet_stats_queue *qstats; 967 }; 968 969 struct tc_ets_qopt_offload_graft_params { 970 u8 band; 971 u32 child_handle; 972 }; 973 974 struct tc_ets_qopt_offload { 975 enum tc_ets_command command; 976 u32 handle; 977 u32 parent; 978 union { 979 struct tc_ets_qopt_offload_replace_params replace_params; 980 struct tc_qopt_offload_stats stats; 981 struct tc_ets_qopt_offload_graft_params graft_params; 982 }; 983 }; 984 985 enum tc_tbf_command { 986 TC_TBF_REPLACE, 987 TC_TBF_DESTROY, 988 TC_TBF_STATS, 989 }; 990 991 struct tc_tbf_qopt_offload_replace_params { 992 struct psched_ratecfg rate; 993 u32 max_size; 994 struct gnet_stats_queue *qstats; 995 }; 996 997 struct tc_tbf_qopt_offload { 998 enum tc_tbf_command command; 999 u32 handle; 1000 u32 parent; 1001 union { 1002 struct tc_tbf_qopt_offload_replace_params replace_params; 1003 struct tc_qopt_offload_stats stats; 1004 }; 1005 }; 1006 1007 enum tc_fifo_command { 1008 TC_FIFO_REPLACE, 1009 TC_FIFO_DESTROY, 1010 TC_FIFO_STATS, 1011 }; 1012 1013 struct tc_fifo_qopt_offload { 1014 enum tc_fifo_command command; 1015 u32 handle; 1016 u32 parent; 1017 union { 1018 struct tc_qopt_offload_stats stats; 1019 }; 1020 }; 1021 1022 #endif 1023