1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 #include <net/flow_offload.h> 10 #include <net/net_namespace.h> 11 12 /* TC action not accessible from user space */ 13 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) 14 15 /* Basic packet classifier frontend definitions. */ 16 17 struct tcf_walker { 18 int stop; 19 int skip; 20 int count; 21 bool nonempty; 22 unsigned long cookie; 23 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 24 }; 25 26 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 27 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 28 29 enum tcf_block_binder_type { 30 TCF_BLOCK_BINDER_TYPE_UNSPEC, 31 TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS, 32 TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 33 }; 34 35 struct tcf_block_ext_info { 36 enum tcf_block_binder_type binder_type; 37 tcf_chain_head_change_t *chain_head_change; 38 void *chain_head_change_priv; 39 u32 block_index; 40 }; 41 42 struct tcf_block_cb; 43 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 44 45 #ifdef CONFIG_NET_CLS 46 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 47 u32 chain_index); 48 void tcf_chain_put_by_act(struct tcf_chain *chain); 49 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, 50 struct tcf_chain *chain); 51 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, 52 struct tcf_proto *tp, bool rtnl_held); 53 void tcf_block_netif_keep_dst(struct tcf_block *block); 54 int tcf_block_get(struct tcf_block **p_block, 55 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 56 struct netlink_ext_ack *extack); 57 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 58 struct tcf_block_ext_info *ei, 59 struct netlink_ext_ack *extack); 60 void tcf_block_put(struct tcf_block *block); 61 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 62 struct tcf_block_ext_info *ei); 63 64 static inline bool tcf_block_shared(struct tcf_block *block) 65 { 66 return block->index; 67 } 68 69 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 70 { 71 WARN_ON(tcf_block_shared(block)); 72 return block->q; 73 } 74 75 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb); 76 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 77 tc_setup_cb_t *cb, void *cb_ident); 78 void tcf_block_cb_incref(struct tcf_block_cb *block_cb); 79 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); 80 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 81 tc_setup_cb_t *cb, void *cb_ident, 82 void *cb_priv, 83 struct netlink_ext_ack *extack); 84 int tcf_block_cb_register(struct tcf_block *block, 85 tc_setup_cb_t *cb, void *cb_ident, 86 void *cb_priv, struct netlink_ext_ack *extack); 87 void __tcf_block_cb_unregister(struct tcf_block *block, 88 struct tcf_block_cb *block_cb); 89 void tcf_block_cb_unregister(struct tcf_block *block, 90 tc_setup_cb_t *cb, void *cb_ident); 91 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 92 tc_indr_block_bind_cb_t *cb, void *cb_ident); 93 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 94 tc_indr_block_bind_cb_t *cb, void *cb_ident); 95 void __tc_indr_block_cb_unregister(struct net_device *dev, 96 tc_indr_block_bind_cb_t *cb, void *cb_ident); 97 void tc_indr_block_cb_unregister(struct net_device *dev, 98 tc_indr_block_bind_cb_t *cb, void *cb_ident); 99 100 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 101 struct tcf_result *res, bool compat_mode); 102 103 #else 104 static inline bool tcf_block_shared(struct tcf_block *block) 105 { 106 return false; 107 } 108 109 static inline 110 int tcf_block_get(struct tcf_block **p_block, 111 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 112 struct netlink_ext_ack *extack) 113 { 114 return 0; 115 } 116 117 static inline 118 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 119 struct tcf_block_ext_info *ei, 120 struct netlink_ext_ack *extack) 121 { 122 return 0; 123 } 124 125 static inline void tcf_block_put(struct tcf_block *block) 126 { 127 } 128 129 static inline 130 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 131 struct tcf_block_ext_info *ei) 132 { 133 } 134 135 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 136 { 137 return NULL; 138 } 139 140 static inline 141 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb, 142 void *cb_priv) 143 { 144 return 0; 145 } 146 147 static inline 148 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb, 149 void *cb_priv) 150 { 151 } 152 153 static inline 154 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb) 155 { 156 return NULL; 157 } 158 159 static inline 160 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 161 tc_setup_cb_t *cb, void *cb_ident) 162 { 163 return NULL; 164 } 165 166 static inline 167 void tcf_block_cb_incref(struct tcf_block_cb *block_cb) 168 { 169 } 170 171 static inline 172 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) 173 { 174 return 0; 175 } 176 177 static inline 178 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 179 tc_setup_cb_t *cb, void *cb_ident, 180 void *cb_priv, 181 struct netlink_ext_ack *extack) 182 { 183 return NULL; 184 } 185 186 static inline 187 int tcf_block_cb_register(struct tcf_block *block, 188 tc_setup_cb_t *cb, void *cb_ident, 189 void *cb_priv, struct netlink_ext_ack *extack) 190 { 191 return 0; 192 } 193 194 static inline 195 void __tcf_block_cb_unregister(struct tcf_block *block, 196 struct tcf_block_cb *block_cb) 197 { 198 } 199 200 static inline 201 void tcf_block_cb_unregister(struct tcf_block *block, 202 tc_setup_cb_t *cb, void *cb_ident) 203 { 204 } 205 206 static inline 207 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 208 tc_indr_block_bind_cb_t *cb, void *cb_ident) 209 { 210 return 0; 211 } 212 213 static inline 214 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv, 215 tc_indr_block_bind_cb_t *cb, void *cb_ident) 216 { 217 return 0; 218 } 219 220 static inline 221 void __tc_indr_block_cb_unregister(struct net_device *dev, 222 tc_indr_block_bind_cb_t *cb, void *cb_ident) 223 { 224 } 225 226 static inline 227 void tc_indr_block_cb_unregister(struct net_device *dev, 228 tc_indr_block_bind_cb_t *cb, void *cb_ident) 229 { 230 } 231 232 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 233 struct tcf_result *res, bool compat_mode) 234 { 235 return TC_ACT_UNSPEC; 236 } 237 #endif 238 239 static inline unsigned long 240 __cls_set_class(unsigned long *clp, unsigned long cl) 241 { 242 return xchg(clp, cl); 243 } 244 245 static inline unsigned long 246 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl) 247 { 248 unsigned long old_cl; 249 250 sch_tree_lock(q); 251 old_cl = __cls_set_class(clp, cl); 252 sch_tree_unlock(q); 253 return old_cl; 254 } 255 256 static inline void 257 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 258 { 259 struct Qdisc *q = tp->chain->block->q; 260 unsigned long cl; 261 262 /* Check q as it is not set for shared blocks. In that case, 263 * setting class is not supported. 264 */ 265 if (!q) 266 return; 267 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 268 cl = cls_set_class(q, &r->class, cl); 269 if (cl) 270 q->ops->cl_ops->unbind_tcf(q, cl); 271 } 272 273 static inline void 274 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 275 { 276 struct Qdisc *q = tp->chain->block->q; 277 unsigned long cl; 278 279 if (!q) 280 return; 281 if ((cl = __cls_set_class(&r->class, 0)) != 0) 282 q->ops->cl_ops->unbind_tcf(q, cl); 283 } 284 285 struct tcf_exts { 286 #ifdef CONFIG_NET_CLS_ACT 287 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 288 int nr_actions; 289 struct tc_action **actions; 290 struct net *net; 291 #endif 292 /* Map to export classifier specific extension TLV types to the 293 * generic extensions API. Unsupported extensions must be set to 0. 294 */ 295 int action; 296 int police; 297 }; 298 299 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, 300 int action, int police) 301 { 302 #ifdef CONFIG_NET_CLS_ACT 303 exts->type = 0; 304 exts->nr_actions = 0; 305 exts->net = net; 306 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 307 GFP_KERNEL); 308 if (!exts->actions) 309 return -ENOMEM; 310 #endif 311 exts->action = action; 312 exts->police = police; 313 return 0; 314 } 315 316 /* Return false if the netns is being destroyed in cleanup_net(). Callers 317 * need to do cleanup synchronously in this case, otherwise may race with 318 * tc_action_net_exit(). Return true for other cases. 319 */ 320 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 321 { 322 #ifdef CONFIG_NET_CLS_ACT 323 exts->net = maybe_get_net(exts->net); 324 return exts->net != NULL; 325 #else 326 return true; 327 #endif 328 } 329 330 static inline void tcf_exts_put_net(struct tcf_exts *exts) 331 { 332 #ifdef CONFIG_NET_CLS_ACT 333 if (exts->net) 334 put_net(exts->net); 335 #endif 336 } 337 338 #ifdef CONFIG_NET_CLS_ACT 339 #define tcf_exts_for_each_action(i, a, exts) \ 340 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 341 #else 342 #define tcf_exts_for_each_action(i, a, exts) \ 343 for (; 0; (void)(i), (void)(a), (void)(exts)) 344 #endif 345 346 static inline void 347 tcf_exts_stats_update(const struct tcf_exts *exts, 348 u64 bytes, u64 packets, u64 lastuse) 349 { 350 #ifdef CONFIG_NET_CLS_ACT 351 int i; 352 353 preempt_disable(); 354 355 for (i = 0; i < exts->nr_actions; i++) { 356 struct tc_action *a = exts->actions[i]; 357 358 tcf_action_stats_update(a, bytes, packets, lastuse, true); 359 } 360 361 preempt_enable(); 362 #endif 363 } 364 365 /** 366 * tcf_exts_has_actions - check if at least one action is present 367 * @exts: tc filter extensions handle 368 * 369 * Returns true if at least one action is present. 370 */ 371 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 372 { 373 #ifdef CONFIG_NET_CLS_ACT 374 return exts->nr_actions; 375 #else 376 return false; 377 #endif 378 } 379 380 /** 381 * tcf_exts_exec - execute tc filter extensions 382 * @skb: socket buffer 383 * @exts: tc filter extensions handle 384 * @res: desired result 385 * 386 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 387 * a negative number if the filter must be considered unmatched or 388 * a positive action code (TC_ACT_*) which must be returned to the 389 * underlying layer. 390 */ 391 static inline int 392 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 393 struct tcf_result *res) 394 { 395 #ifdef CONFIG_NET_CLS_ACT 396 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 397 #endif 398 return TC_ACT_OK; 399 } 400 401 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 402 struct nlattr **tb, struct nlattr *rate_tlv, 403 struct tcf_exts *exts, bool ovr, bool rtnl_held, 404 struct netlink_ext_ack *extack); 405 void tcf_exts_destroy(struct tcf_exts *exts); 406 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 407 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 408 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 409 410 /** 411 * struct tcf_pkt_info - packet information 412 */ 413 struct tcf_pkt_info { 414 unsigned char * ptr; 415 int nexthdr; 416 }; 417 418 #ifdef CONFIG_NET_EMATCH 419 420 struct tcf_ematch_ops; 421 422 /** 423 * struct tcf_ematch - extended match (ematch) 424 * 425 * @matchid: identifier to allow userspace to reidentify a match 426 * @flags: flags specifying attributes and the relation to other matches 427 * @ops: the operations lookup table of the corresponding ematch module 428 * @datalen: length of the ematch specific configuration data 429 * @data: ematch specific data 430 */ 431 struct tcf_ematch { 432 struct tcf_ematch_ops * ops; 433 unsigned long data; 434 unsigned int datalen; 435 u16 matchid; 436 u16 flags; 437 struct net *net; 438 }; 439 440 static inline int tcf_em_is_container(struct tcf_ematch *em) 441 { 442 return !em->ops; 443 } 444 445 static inline int tcf_em_is_simple(struct tcf_ematch *em) 446 { 447 return em->flags & TCF_EM_SIMPLE; 448 } 449 450 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 451 { 452 return em->flags & TCF_EM_INVERT; 453 } 454 455 static inline int tcf_em_last_match(struct tcf_ematch *em) 456 { 457 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 458 } 459 460 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 461 { 462 if (tcf_em_last_match(em)) 463 return 1; 464 465 if (result == 0 && em->flags & TCF_EM_REL_AND) 466 return 1; 467 468 if (result != 0 && em->flags & TCF_EM_REL_OR) 469 return 1; 470 471 return 0; 472 } 473 474 /** 475 * struct tcf_ematch_tree - ematch tree handle 476 * 477 * @hdr: ematch tree header supplied by userspace 478 * @matches: array of ematches 479 */ 480 struct tcf_ematch_tree { 481 struct tcf_ematch_tree_hdr hdr; 482 struct tcf_ematch * matches; 483 484 }; 485 486 /** 487 * struct tcf_ematch_ops - ematch module operations 488 * 489 * @kind: identifier (kind) of this ematch module 490 * @datalen: length of expected configuration data (optional) 491 * @change: called during validation (optional) 492 * @match: called during ematch tree evaluation, must return 1/0 493 * @destroy: called during destroyage (optional) 494 * @dump: called during dumping process (optional) 495 * @owner: owner, must be set to THIS_MODULE 496 * @link: link to previous/next ematch module (internal use) 497 */ 498 struct tcf_ematch_ops { 499 int kind; 500 int datalen; 501 int (*change)(struct net *net, void *, 502 int, struct tcf_ematch *); 503 int (*match)(struct sk_buff *, struct tcf_ematch *, 504 struct tcf_pkt_info *); 505 void (*destroy)(struct tcf_ematch *); 506 int (*dump)(struct sk_buff *, struct tcf_ematch *); 507 struct module *owner; 508 struct list_head link; 509 }; 510 511 int tcf_em_register(struct tcf_ematch_ops *); 512 void tcf_em_unregister(struct tcf_ematch_ops *); 513 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 514 struct tcf_ematch_tree *); 515 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 516 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 517 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 518 struct tcf_pkt_info *); 519 520 /** 521 * tcf_em_tree_match - evaulate an ematch tree 522 * 523 * @skb: socket buffer of the packet in question 524 * @tree: ematch tree to be used for evaluation 525 * @info: packet information examined by classifier 526 * 527 * This function matches @skb against the ematch tree in @tree by going 528 * through all ematches respecting their logic relations returning 529 * as soon as the result is obvious. 530 * 531 * Returns 1 if the ematch tree as-one matches, no ematches are configured 532 * or ematch is not enabled in the kernel, otherwise 0 is returned. 533 */ 534 static inline int tcf_em_tree_match(struct sk_buff *skb, 535 struct tcf_ematch_tree *tree, 536 struct tcf_pkt_info *info) 537 { 538 if (tree->hdr.nmatches) 539 return __tcf_em_tree_match(skb, tree, info); 540 else 541 return 1; 542 } 543 544 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 545 546 #else /* CONFIG_NET_EMATCH */ 547 548 struct tcf_ematch_tree { 549 }; 550 551 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 552 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 553 #define tcf_em_tree_dump(skb, t, tlv) (0) 554 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 555 556 #endif /* CONFIG_NET_EMATCH */ 557 558 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 559 { 560 switch (layer) { 561 case TCF_LAYER_LINK: 562 return skb_mac_header(skb); 563 case TCF_LAYER_NETWORK: 564 return skb_network_header(skb); 565 case TCF_LAYER_TRANSPORT: 566 return skb_transport_header(skb); 567 } 568 569 return NULL; 570 } 571 572 static inline int tcf_valid_offset(const struct sk_buff *skb, 573 const unsigned char *ptr, const int len) 574 { 575 return likely((ptr + len) <= skb_tail_pointer(skb) && 576 ptr >= skb->head && 577 (ptr <= (ptr + len))); 578 } 579 580 static inline int 581 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 582 struct netlink_ext_ack *extack) 583 { 584 char indev[IFNAMSIZ]; 585 struct net_device *dev; 586 587 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { 588 NL_SET_ERR_MSG(extack, "Interface name too long"); 589 return -EINVAL; 590 } 591 dev = __dev_get_by_name(net, indev); 592 if (!dev) 593 return -ENODEV; 594 return dev->ifindex; 595 } 596 597 static inline bool 598 tcf_match_indev(struct sk_buff *skb, int ifindex) 599 { 600 if (!ifindex) 601 return true; 602 if (!skb->skb_iif) 603 return false; 604 return ifindex == skb->skb_iif; 605 } 606 607 int tc_setup_flow_action(struct flow_action *flow_action, 608 const struct tcf_exts *exts); 609 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 610 void *type_data, bool err_stop); 611 unsigned int tcf_exts_num_actions(struct tcf_exts *exts); 612 613 enum tc_block_command { 614 TC_BLOCK_BIND, 615 TC_BLOCK_UNBIND, 616 }; 617 618 struct tc_block_offload { 619 enum tc_block_command command; 620 enum tcf_block_binder_type binder_type; 621 struct tcf_block *block; 622 struct netlink_ext_ack *extack; 623 }; 624 625 struct tc_cls_common_offload { 626 u32 chain_index; 627 __be16 protocol; 628 u32 prio; 629 struct netlink_ext_ack *extack; 630 }; 631 632 struct tc_cls_u32_knode { 633 struct tcf_exts *exts; 634 struct tcf_result *res; 635 struct tc_u32_sel *sel; 636 u32 handle; 637 u32 val; 638 u32 mask; 639 u32 link_handle; 640 u8 fshift; 641 }; 642 643 struct tc_cls_u32_hnode { 644 u32 handle; 645 u32 prio; 646 unsigned int divisor; 647 }; 648 649 enum tc_clsu32_command { 650 TC_CLSU32_NEW_KNODE, 651 TC_CLSU32_REPLACE_KNODE, 652 TC_CLSU32_DELETE_KNODE, 653 TC_CLSU32_NEW_HNODE, 654 TC_CLSU32_REPLACE_HNODE, 655 TC_CLSU32_DELETE_HNODE, 656 }; 657 658 struct tc_cls_u32_offload { 659 struct tc_cls_common_offload common; 660 /* knode values */ 661 enum tc_clsu32_command command; 662 union { 663 struct tc_cls_u32_knode knode; 664 struct tc_cls_u32_hnode hnode; 665 }; 666 }; 667 668 static inline bool tc_can_offload(const struct net_device *dev) 669 { 670 return dev->features & NETIF_F_HW_TC; 671 } 672 673 static inline bool tc_can_offload_extack(const struct net_device *dev, 674 struct netlink_ext_ack *extack) 675 { 676 bool can = tc_can_offload(dev); 677 678 if (!can) 679 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 680 681 return can; 682 } 683 684 static inline bool 685 tc_cls_can_offload_and_chain0(const struct net_device *dev, 686 struct tc_cls_common_offload *common) 687 { 688 if (!tc_can_offload_extack(dev, common->extack)) 689 return false; 690 if (common->chain_index) { 691 NL_SET_ERR_MSG(common->extack, 692 "Driver supports only offload of chain 0"); 693 return false; 694 } 695 return true; 696 } 697 698 static inline bool tc_skip_hw(u32 flags) 699 { 700 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 701 } 702 703 static inline bool tc_skip_sw(u32 flags) 704 { 705 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 706 } 707 708 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 709 static inline bool tc_flags_valid(u32 flags) 710 { 711 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 712 TCA_CLS_FLAGS_VERBOSE)) 713 return false; 714 715 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 716 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 717 return false; 718 719 return true; 720 } 721 722 static inline bool tc_in_hw(u32 flags) 723 { 724 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 725 } 726 727 static inline void 728 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, 729 const struct tcf_proto *tp, u32 flags, 730 struct netlink_ext_ack *extack) 731 { 732 cls_common->chain_index = tp->chain->index; 733 cls_common->protocol = tp->protocol; 734 cls_common->prio = tp->prio; 735 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 736 cls_common->extack = extack; 737 } 738 739 enum tc_fl_command { 740 TC_CLSFLOWER_REPLACE, 741 TC_CLSFLOWER_DESTROY, 742 TC_CLSFLOWER_STATS, 743 TC_CLSFLOWER_TMPLT_CREATE, 744 TC_CLSFLOWER_TMPLT_DESTROY, 745 }; 746 747 struct tc_cls_flower_offload { 748 struct tc_cls_common_offload common; 749 enum tc_fl_command command; 750 unsigned long cookie; 751 struct flow_rule *rule; 752 struct flow_stats stats; 753 u32 classid; 754 }; 755 756 static inline struct flow_rule * 757 tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) 758 { 759 return tc_flow_cmd->rule; 760 } 761 762 enum tc_matchall_command { 763 TC_CLSMATCHALL_REPLACE, 764 TC_CLSMATCHALL_DESTROY, 765 TC_CLSMATCHALL_STATS, 766 }; 767 768 struct tc_cls_matchall_offload { 769 struct tc_cls_common_offload common; 770 enum tc_matchall_command command; 771 struct flow_rule *rule; 772 struct flow_stats stats; 773 unsigned long cookie; 774 }; 775 776 enum tc_clsbpf_command { 777 TC_CLSBPF_OFFLOAD, 778 TC_CLSBPF_STATS, 779 }; 780 781 struct tc_cls_bpf_offload { 782 struct tc_cls_common_offload common; 783 enum tc_clsbpf_command command; 784 struct tcf_exts *exts; 785 struct bpf_prog *prog; 786 struct bpf_prog *oldprog; 787 const char *name; 788 bool exts_integrated; 789 }; 790 791 struct tc_mqprio_qopt_offload { 792 /* struct tc_mqprio_qopt must always be the first element */ 793 struct tc_mqprio_qopt qopt; 794 u16 mode; 795 u16 shaper; 796 u32 flags; 797 u64 min_rate[TC_QOPT_MAX_QUEUE]; 798 u64 max_rate[TC_QOPT_MAX_QUEUE]; 799 }; 800 801 /* This structure holds cookie structure that is passed from user 802 * to the kernel for actions and classifiers 803 */ 804 struct tc_cookie { 805 u8 *data; 806 u32 len; 807 struct rcu_head rcu; 808 }; 809 810 struct tc_qopt_offload_stats { 811 struct gnet_stats_basic_packed *bstats; 812 struct gnet_stats_queue *qstats; 813 }; 814 815 enum tc_mq_command { 816 TC_MQ_CREATE, 817 TC_MQ_DESTROY, 818 TC_MQ_STATS, 819 TC_MQ_GRAFT, 820 }; 821 822 struct tc_mq_opt_offload_graft_params { 823 unsigned long queue; 824 u32 child_handle; 825 }; 826 827 struct tc_mq_qopt_offload { 828 enum tc_mq_command command; 829 u32 handle; 830 union { 831 struct tc_qopt_offload_stats stats; 832 struct tc_mq_opt_offload_graft_params graft_params; 833 }; 834 }; 835 836 enum tc_red_command { 837 TC_RED_REPLACE, 838 TC_RED_DESTROY, 839 TC_RED_STATS, 840 TC_RED_XSTATS, 841 TC_RED_GRAFT, 842 }; 843 844 struct tc_red_qopt_offload_params { 845 u32 min; 846 u32 max; 847 u32 probability; 848 u32 limit; 849 bool is_ecn; 850 bool is_harddrop; 851 struct gnet_stats_queue *qstats; 852 }; 853 854 struct tc_red_qopt_offload { 855 enum tc_red_command command; 856 u32 handle; 857 u32 parent; 858 union { 859 struct tc_red_qopt_offload_params set; 860 struct tc_qopt_offload_stats stats; 861 struct red_stats *xstats; 862 u32 child_handle; 863 }; 864 }; 865 866 enum tc_gred_command { 867 TC_GRED_REPLACE, 868 TC_GRED_DESTROY, 869 TC_GRED_STATS, 870 }; 871 872 struct tc_gred_vq_qopt_offload_params { 873 bool present; 874 u32 limit; 875 u32 prio; 876 u32 min; 877 u32 max; 878 bool is_ecn; 879 bool is_harddrop; 880 u32 probability; 881 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 882 u32 *backlog; 883 }; 884 885 struct tc_gred_qopt_offload_params { 886 bool grio_on; 887 bool wred_on; 888 unsigned int dp_cnt; 889 unsigned int dp_def; 890 struct gnet_stats_queue *qstats; 891 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 892 }; 893 894 struct tc_gred_qopt_offload_stats { 895 struct gnet_stats_basic_packed bstats[MAX_DPs]; 896 struct gnet_stats_queue qstats[MAX_DPs]; 897 struct red_stats *xstats[MAX_DPs]; 898 }; 899 900 struct tc_gred_qopt_offload { 901 enum tc_gred_command command; 902 u32 handle; 903 u32 parent; 904 union { 905 struct tc_gred_qopt_offload_params set; 906 struct tc_gred_qopt_offload_stats stats; 907 }; 908 }; 909 910 enum tc_prio_command { 911 TC_PRIO_REPLACE, 912 TC_PRIO_DESTROY, 913 TC_PRIO_STATS, 914 TC_PRIO_GRAFT, 915 }; 916 917 struct tc_prio_qopt_offload_params { 918 int bands; 919 u8 priomap[TC_PRIO_MAX + 1]; 920 /* In case that a prio qdisc is offloaded and now is changed to a 921 * non-offloadedable config, it needs to update the backlog & qlen 922 * values to negate the HW backlog & qlen values (and only them). 923 */ 924 struct gnet_stats_queue *qstats; 925 }; 926 927 struct tc_prio_qopt_offload_graft_params { 928 u8 band; 929 u32 child_handle; 930 }; 931 932 struct tc_prio_qopt_offload { 933 enum tc_prio_command command; 934 u32 handle; 935 u32 parent; 936 union { 937 struct tc_prio_qopt_offload_params replace_params; 938 struct tc_qopt_offload_stats stats; 939 struct tc_prio_qopt_offload_graft_params graft_params; 940 }; 941 }; 942 943 enum tc_root_command { 944 TC_ROOT_GRAFT, 945 }; 946 947 struct tc_root_qopt_offload { 948 enum tc_root_command command; 949 u32 handle; 950 bool ingress; 951 }; 952 953 #endif 954