1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 #include <net/net_namespace.h> 10 11 /* TC action not accessible from user space */ 12 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) 13 14 /* Basic packet classifier frontend definitions. */ 15 16 struct tcf_walker { 17 int stop; 18 int skip; 19 int count; 20 bool nonempty; 21 unsigned long cookie; 22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 23 }; 24 25 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 27 28 struct tcf_block_ext_info { 29 enum flow_block_binder_type binder_type; 30 tcf_chain_head_change_t *chain_head_change; 31 void *chain_head_change_priv; 32 u32 block_index; 33 }; 34 35 struct tcf_qevent { 36 struct tcf_block *block; 37 struct tcf_block_ext_info info; 38 struct tcf_proto __rcu *filter_chain; 39 }; 40 41 struct tcf_block_cb; 42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 43 44 #ifdef CONFIG_NET_CLS 45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 46 u32 chain_index); 47 void tcf_chain_put_by_act(struct tcf_chain *chain); 48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, 49 struct tcf_chain *chain); 50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, 51 struct tcf_proto *tp); 52 void tcf_block_netif_keep_dst(struct tcf_block *block); 53 int tcf_block_get(struct tcf_block **p_block, 54 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 55 struct netlink_ext_ack *extack); 56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 57 struct tcf_block_ext_info *ei, 58 struct netlink_ext_ack *extack); 59 void tcf_block_put(struct tcf_block *block); 60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 61 struct tcf_block_ext_info *ei); 62 63 static inline bool tcf_block_shared(struct tcf_block *block) 64 { 65 return block->index; 66 } 67 68 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 69 { 70 return block && block->index; 71 } 72 73 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 74 { 75 WARN_ON(tcf_block_shared(block)); 76 return block->q; 77 } 78 79 int tcf_classify(struct sk_buff *skb, 80 const struct tcf_block *block, 81 const struct tcf_proto *tp, struct tcf_result *res, 82 bool compat_mode); 83 84 #else 85 static inline bool tcf_block_shared(struct tcf_block *block) 86 { 87 return false; 88 } 89 90 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 91 { 92 return false; 93 } 94 95 static inline 96 int tcf_block_get(struct tcf_block **p_block, 97 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 98 struct netlink_ext_ack *extack) 99 { 100 return 0; 101 } 102 103 static inline 104 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 105 struct tcf_block_ext_info *ei, 106 struct netlink_ext_ack *extack) 107 { 108 return 0; 109 } 110 111 static inline void tcf_block_put(struct tcf_block *block) 112 { 113 } 114 115 static inline 116 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 117 struct tcf_block_ext_info *ei) 118 { 119 } 120 121 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 122 { 123 return NULL; 124 } 125 126 static inline 127 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, 128 void *cb_priv) 129 { 130 return 0; 131 } 132 133 static inline 134 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, 135 void *cb_priv) 136 { 137 } 138 139 static inline int tcf_classify(struct sk_buff *skb, 140 const struct tcf_block *block, 141 const struct tcf_proto *tp, 142 struct tcf_result *res, bool compat_mode) 143 { 144 return TC_ACT_UNSPEC; 145 } 146 147 #endif 148 149 static inline unsigned long 150 __cls_set_class(unsigned long *clp, unsigned long cl) 151 { 152 return xchg(clp, cl); 153 } 154 155 static inline void 156 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) 157 { 158 unsigned long cl; 159 160 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 161 cl = __cls_set_class(&r->class, cl); 162 if (cl) 163 q->ops->cl_ops->unbind_tcf(q, cl); 164 } 165 166 static inline void 167 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 168 { 169 struct Qdisc *q = tp->chain->block->q; 170 171 /* Check q as it is not set for shared blocks. In that case, 172 * setting class is not supported. 173 */ 174 if (!q) 175 return; 176 sch_tree_lock(q); 177 __tcf_bind_filter(q, r, base); 178 sch_tree_unlock(q); 179 } 180 181 static inline void 182 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) 183 { 184 unsigned long cl; 185 186 if ((cl = __cls_set_class(&r->class, 0)) != 0) 187 q->ops->cl_ops->unbind_tcf(q, cl); 188 } 189 190 static inline void 191 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 192 { 193 struct Qdisc *q = tp->chain->block->q; 194 195 if (!q) 196 return; 197 __tcf_unbind_filter(q, r); 198 } 199 200 struct tcf_exts { 201 #ifdef CONFIG_NET_CLS_ACT 202 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 203 int nr_actions; 204 struct tc_action **actions; 205 struct net *net; 206 netns_tracker ns_tracker; 207 #endif 208 /* Map to export classifier specific extension TLV types to the 209 * generic extensions API. Unsupported extensions must be set to 0. 210 */ 211 int action; 212 int police; 213 }; 214 215 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, 216 int action, int police) 217 { 218 #ifdef CONFIG_NET_CLS_ACT 219 exts->type = 0; 220 exts->nr_actions = 0; 221 exts->net = net; 222 netns_tracker_alloc(net, &exts->ns_tracker, GFP_KERNEL); 223 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 224 GFP_KERNEL); 225 if (!exts->actions) 226 return -ENOMEM; 227 #endif 228 exts->action = action; 229 exts->police = police; 230 return 0; 231 } 232 233 /* Return false if the netns is being destroyed in cleanup_net(). Callers 234 * need to do cleanup synchronously in this case, otherwise may race with 235 * tc_action_net_exit(). Return true for other cases. 236 */ 237 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 238 { 239 #ifdef CONFIG_NET_CLS_ACT 240 exts->net = maybe_get_net(exts->net); 241 if (exts->net) 242 netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL); 243 return exts->net != NULL; 244 #else 245 return true; 246 #endif 247 } 248 249 static inline void tcf_exts_put_net(struct tcf_exts *exts) 250 { 251 #ifdef CONFIG_NET_CLS_ACT 252 if (exts->net) 253 put_net_track(exts->net, &exts->ns_tracker); 254 #endif 255 } 256 257 #ifdef CONFIG_NET_CLS_ACT 258 #define tcf_exts_for_each_action(i, a, exts) \ 259 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 260 #else 261 #define tcf_exts_for_each_action(i, a, exts) \ 262 for (; 0; (void)(i), (void)(a), (void)(exts)) 263 #endif 264 265 #define tcf_act_for_each_action(i, a, actions) \ 266 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++) 267 268 static inline void 269 tcf_exts_hw_stats_update(const struct tcf_exts *exts, 270 u64 bytes, u64 packets, u64 drops, u64 lastuse, 271 u8 used_hw_stats, bool used_hw_stats_valid) 272 { 273 #ifdef CONFIG_NET_CLS_ACT 274 int i; 275 276 for (i = 0; i < exts->nr_actions; i++) { 277 struct tc_action *a = exts->actions[i]; 278 279 /* if stats from hw, just skip */ 280 if (tcf_action_update_hw_stats(a)) { 281 preempt_disable(); 282 tcf_action_stats_update(a, bytes, packets, drops, 283 lastuse, true); 284 preempt_enable(); 285 286 a->used_hw_stats = used_hw_stats; 287 a->used_hw_stats_valid = used_hw_stats_valid; 288 } 289 } 290 #endif 291 } 292 293 /** 294 * tcf_exts_has_actions - check if at least one action is present 295 * @exts: tc filter extensions handle 296 * 297 * Returns true if at least one action is present. 298 */ 299 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 300 { 301 #ifdef CONFIG_NET_CLS_ACT 302 return exts->nr_actions; 303 #else 304 return false; 305 #endif 306 } 307 308 /** 309 * tcf_exts_exec - execute tc filter extensions 310 * @skb: socket buffer 311 * @exts: tc filter extensions handle 312 * @res: desired result 313 * 314 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 315 * a negative number if the filter must be considered unmatched or 316 * a positive action code (TC_ACT_*) which must be returned to the 317 * underlying layer. 318 */ 319 static inline int 320 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 321 struct tcf_result *res) 322 { 323 #ifdef CONFIG_NET_CLS_ACT 324 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 325 #endif 326 return TC_ACT_OK; 327 } 328 329 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 330 struct nlattr **tb, struct nlattr *rate_tlv, 331 struct tcf_exts *exts, u32 flags, 332 struct netlink_ext_ack *extack); 333 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 334 struct nlattr *rate_tlv, struct tcf_exts *exts, 335 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack); 336 void tcf_exts_destroy(struct tcf_exts *exts); 337 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 338 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 339 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts); 340 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 341 342 /** 343 * struct tcf_pkt_info - packet information 344 * 345 * @ptr: start of the pkt data 346 * @nexthdr: offset of the next header 347 */ 348 struct tcf_pkt_info { 349 unsigned char * ptr; 350 int nexthdr; 351 }; 352 353 #ifdef CONFIG_NET_EMATCH 354 355 struct tcf_ematch_ops; 356 357 /** 358 * struct tcf_ematch - extended match (ematch) 359 * 360 * @matchid: identifier to allow userspace to reidentify a match 361 * @flags: flags specifying attributes and the relation to other matches 362 * @ops: the operations lookup table of the corresponding ematch module 363 * @datalen: length of the ematch specific configuration data 364 * @data: ematch specific data 365 * @net: the network namespace 366 */ 367 struct tcf_ematch { 368 struct tcf_ematch_ops * ops; 369 unsigned long data; 370 unsigned int datalen; 371 u16 matchid; 372 u16 flags; 373 struct net *net; 374 }; 375 376 static inline int tcf_em_is_container(struct tcf_ematch *em) 377 { 378 return !em->ops; 379 } 380 381 static inline int tcf_em_is_simple(struct tcf_ematch *em) 382 { 383 return em->flags & TCF_EM_SIMPLE; 384 } 385 386 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 387 { 388 return em->flags & TCF_EM_INVERT; 389 } 390 391 static inline int tcf_em_last_match(struct tcf_ematch *em) 392 { 393 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 394 } 395 396 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 397 { 398 if (tcf_em_last_match(em)) 399 return 1; 400 401 if (result == 0 && em->flags & TCF_EM_REL_AND) 402 return 1; 403 404 if (result != 0 && em->flags & TCF_EM_REL_OR) 405 return 1; 406 407 return 0; 408 } 409 410 /** 411 * struct tcf_ematch_tree - ematch tree handle 412 * 413 * @hdr: ematch tree header supplied by userspace 414 * @matches: array of ematches 415 */ 416 struct tcf_ematch_tree { 417 struct tcf_ematch_tree_hdr hdr; 418 struct tcf_ematch * matches; 419 420 }; 421 422 /** 423 * struct tcf_ematch_ops - ematch module operations 424 * 425 * @kind: identifier (kind) of this ematch module 426 * @datalen: length of expected configuration data (optional) 427 * @change: called during validation (optional) 428 * @match: called during ematch tree evaluation, must return 1/0 429 * @destroy: called during destroyage (optional) 430 * @dump: called during dumping process (optional) 431 * @owner: owner, must be set to THIS_MODULE 432 * @link: link to previous/next ematch module (internal use) 433 */ 434 struct tcf_ematch_ops { 435 int kind; 436 int datalen; 437 int (*change)(struct net *net, void *, 438 int, struct tcf_ematch *); 439 int (*match)(struct sk_buff *, struct tcf_ematch *, 440 struct tcf_pkt_info *); 441 void (*destroy)(struct tcf_ematch *); 442 int (*dump)(struct sk_buff *, struct tcf_ematch *); 443 struct module *owner; 444 struct list_head link; 445 }; 446 447 int tcf_em_register(struct tcf_ematch_ops *); 448 void tcf_em_unregister(struct tcf_ematch_ops *); 449 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 450 struct tcf_ematch_tree *); 451 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 452 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 453 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 454 struct tcf_pkt_info *); 455 456 /** 457 * tcf_em_tree_match - evaulate an ematch tree 458 * 459 * @skb: socket buffer of the packet in question 460 * @tree: ematch tree to be used for evaluation 461 * @info: packet information examined by classifier 462 * 463 * This function matches @skb against the ematch tree in @tree by going 464 * through all ematches respecting their logic relations returning 465 * as soon as the result is obvious. 466 * 467 * Returns 1 if the ematch tree as-one matches, no ematches are configured 468 * or ematch is not enabled in the kernel, otherwise 0 is returned. 469 */ 470 static inline int tcf_em_tree_match(struct sk_buff *skb, 471 struct tcf_ematch_tree *tree, 472 struct tcf_pkt_info *info) 473 { 474 if (tree->hdr.nmatches) 475 return __tcf_em_tree_match(skb, tree, info); 476 else 477 return 1; 478 } 479 480 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 481 482 #else /* CONFIG_NET_EMATCH */ 483 484 struct tcf_ematch_tree { 485 }; 486 487 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 488 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 489 #define tcf_em_tree_dump(skb, t, tlv) (0) 490 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 491 492 #endif /* CONFIG_NET_EMATCH */ 493 494 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 495 { 496 switch (layer) { 497 case TCF_LAYER_LINK: 498 return skb_mac_header(skb); 499 case TCF_LAYER_NETWORK: 500 return skb_network_header(skb); 501 case TCF_LAYER_TRANSPORT: 502 return skb_transport_header(skb); 503 } 504 505 return NULL; 506 } 507 508 static inline int tcf_valid_offset(const struct sk_buff *skb, 509 const unsigned char *ptr, const int len) 510 { 511 return likely((ptr + len) <= skb_tail_pointer(skb) && 512 ptr >= skb->head && 513 (ptr <= (ptr + len))); 514 } 515 516 static inline int 517 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 518 struct netlink_ext_ack *extack) 519 { 520 char indev[IFNAMSIZ]; 521 struct net_device *dev; 522 523 if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) { 524 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 525 "Interface name too long"); 526 return -EINVAL; 527 } 528 dev = __dev_get_by_name(net, indev); 529 if (!dev) { 530 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 531 "Network device not found"); 532 return -ENODEV; 533 } 534 return dev->ifindex; 535 } 536 537 static inline bool 538 tcf_match_indev(struct sk_buff *skb, int ifindex) 539 { 540 if (!ifindex) 541 return true; 542 if (!skb->skb_iif) 543 return false; 544 return ifindex == skb->skb_iif; 545 } 546 547 int tc_setup_offload_action(struct flow_action *flow_action, 548 const struct tcf_exts *exts); 549 void tc_cleanup_offload_action(struct flow_action *flow_action); 550 int tc_setup_action(struct flow_action *flow_action, 551 struct tc_action *actions[]); 552 553 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 554 void *type_data, bool err_stop, bool rtnl_held); 555 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 556 enum tc_setup_type type, void *type_data, bool err_stop, 557 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 558 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 559 enum tc_setup_type type, void *type_data, bool err_stop, 560 u32 *old_flags, unsigned int *old_in_hw_count, 561 u32 *new_flags, unsigned int *new_in_hw_count, 562 bool rtnl_held); 563 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 564 enum tc_setup_type type, void *type_data, bool err_stop, 565 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 566 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 567 bool add, flow_setup_cb_t *cb, 568 enum tc_setup_type type, void *type_data, 569 void *cb_priv, u32 *flags, unsigned int *in_hw_count); 570 unsigned int tcf_exts_num_actions(struct tcf_exts *exts); 571 572 #ifdef CONFIG_NET_CLS_ACT 573 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 574 enum flow_block_binder_type binder_type, 575 struct nlattr *block_index_attr, 576 struct netlink_ext_ack *extack); 577 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); 578 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 579 struct netlink_ext_ack *extack); 580 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 581 struct sk_buff **to_free, int *ret); 582 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); 583 #else 584 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 585 enum flow_block_binder_type binder_type, 586 struct nlattr *block_index_attr, 587 struct netlink_ext_ack *extack) 588 { 589 return 0; 590 } 591 592 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 593 { 594 } 595 596 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 597 struct netlink_ext_ack *extack) 598 { 599 return 0; 600 } 601 602 static inline struct sk_buff * 603 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 604 struct sk_buff **to_free, int *ret) 605 { 606 return skb; 607 } 608 609 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 610 { 611 return 0; 612 } 613 #endif 614 615 struct tc_cls_u32_knode { 616 struct tcf_exts *exts; 617 struct tcf_result *res; 618 struct tc_u32_sel *sel; 619 u32 handle; 620 u32 val; 621 u32 mask; 622 u32 link_handle; 623 u8 fshift; 624 }; 625 626 struct tc_cls_u32_hnode { 627 u32 handle; 628 u32 prio; 629 unsigned int divisor; 630 }; 631 632 enum tc_clsu32_command { 633 TC_CLSU32_NEW_KNODE, 634 TC_CLSU32_REPLACE_KNODE, 635 TC_CLSU32_DELETE_KNODE, 636 TC_CLSU32_NEW_HNODE, 637 TC_CLSU32_REPLACE_HNODE, 638 TC_CLSU32_DELETE_HNODE, 639 }; 640 641 struct tc_cls_u32_offload { 642 struct flow_cls_common_offload common; 643 /* knode values */ 644 enum tc_clsu32_command command; 645 union { 646 struct tc_cls_u32_knode knode; 647 struct tc_cls_u32_hnode hnode; 648 }; 649 }; 650 651 static inline bool tc_can_offload(const struct net_device *dev) 652 { 653 return dev->features & NETIF_F_HW_TC; 654 } 655 656 static inline bool tc_can_offload_extack(const struct net_device *dev, 657 struct netlink_ext_ack *extack) 658 { 659 bool can = tc_can_offload(dev); 660 661 if (!can) 662 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 663 664 return can; 665 } 666 667 static inline bool 668 tc_cls_can_offload_and_chain0(const struct net_device *dev, 669 struct flow_cls_common_offload *common) 670 { 671 if (!tc_can_offload_extack(dev, common->extack)) 672 return false; 673 if (common->chain_index) { 674 NL_SET_ERR_MSG(common->extack, 675 "Driver supports only offload of chain 0"); 676 return false; 677 } 678 return true; 679 } 680 681 static inline bool tc_skip_hw(u32 flags) 682 { 683 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 684 } 685 686 static inline bool tc_skip_sw(u32 flags) 687 { 688 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 689 } 690 691 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 692 static inline bool tc_flags_valid(u32 flags) 693 { 694 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 695 TCA_CLS_FLAGS_VERBOSE)) 696 return false; 697 698 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 699 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 700 return false; 701 702 return true; 703 } 704 705 static inline bool tc_in_hw(u32 flags) 706 { 707 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 708 } 709 710 static inline void 711 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, 712 const struct tcf_proto *tp, u32 flags, 713 struct netlink_ext_ack *extack) 714 { 715 cls_common->chain_index = tp->chain->index; 716 cls_common->protocol = tp->protocol; 717 cls_common->prio = tp->prio >> 16; 718 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 719 cls_common->extack = extack; 720 } 721 722 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 723 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) 724 { 725 struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); 726 727 if (tc_skb_ext) 728 memset(tc_skb_ext, 0, sizeof(*tc_skb_ext)); 729 return tc_skb_ext; 730 } 731 #endif 732 733 enum tc_matchall_command { 734 TC_CLSMATCHALL_REPLACE, 735 TC_CLSMATCHALL_DESTROY, 736 TC_CLSMATCHALL_STATS, 737 }; 738 739 struct tc_cls_matchall_offload { 740 struct flow_cls_common_offload common; 741 enum tc_matchall_command command; 742 struct flow_rule *rule; 743 struct flow_stats stats; 744 unsigned long cookie; 745 }; 746 747 enum tc_clsbpf_command { 748 TC_CLSBPF_OFFLOAD, 749 TC_CLSBPF_STATS, 750 }; 751 752 struct tc_cls_bpf_offload { 753 struct flow_cls_common_offload common; 754 enum tc_clsbpf_command command; 755 struct tcf_exts *exts; 756 struct bpf_prog *prog; 757 struct bpf_prog *oldprog; 758 const char *name; 759 bool exts_integrated; 760 }; 761 762 struct tc_mqprio_qopt_offload { 763 /* struct tc_mqprio_qopt must always be the first element */ 764 struct tc_mqprio_qopt qopt; 765 u16 mode; 766 u16 shaper; 767 u32 flags; 768 u64 min_rate[TC_QOPT_MAX_QUEUE]; 769 u64 max_rate[TC_QOPT_MAX_QUEUE]; 770 }; 771 772 /* This structure holds cookie structure that is passed from user 773 * to the kernel for actions and classifiers 774 */ 775 struct tc_cookie { 776 u8 *data; 777 u32 len; 778 struct rcu_head rcu; 779 }; 780 781 struct tc_qopt_offload_stats { 782 struct gnet_stats_basic_sync *bstats; 783 struct gnet_stats_queue *qstats; 784 }; 785 786 enum tc_mq_command { 787 TC_MQ_CREATE, 788 TC_MQ_DESTROY, 789 TC_MQ_STATS, 790 TC_MQ_GRAFT, 791 }; 792 793 struct tc_mq_opt_offload_graft_params { 794 unsigned long queue; 795 u32 child_handle; 796 }; 797 798 struct tc_mq_qopt_offload { 799 enum tc_mq_command command; 800 u32 handle; 801 union { 802 struct tc_qopt_offload_stats stats; 803 struct tc_mq_opt_offload_graft_params graft_params; 804 }; 805 }; 806 807 enum tc_htb_command { 808 /* Root */ 809 TC_HTB_CREATE, /* Initialize HTB offload. */ 810 TC_HTB_DESTROY, /* Destroy HTB offload. */ 811 812 /* Classes */ 813 /* Allocate qid and create leaf. */ 814 TC_HTB_LEAF_ALLOC_QUEUE, 815 /* Convert leaf to inner, preserve and return qid, create new leaf. */ 816 TC_HTB_LEAF_TO_INNER, 817 /* Delete leaf, while siblings remain. */ 818 TC_HTB_LEAF_DEL, 819 /* Delete leaf, convert parent to leaf, preserving qid. */ 820 TC_HTB_LEAF_DEL_LAST, 821 /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */ 822 TC_HTB_LEAF_DEL_LAST_FORCE, 823 /* Modify parameters of a node. */ 824 TC_HTB_NODE_MODIFY, 825 826 /* Class qdisc */ 827 TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */ 828 }; 829 830 struct tc_htb_qopt_offload { 831 struct netlink_ext_ack *extack; 832 enum tc_htb_command command; 833 u32 parent_classid; 834 u16 classid; 835 u16 qid; 836 u64 rate; 837 u64 ceil; 838 }; 839 840 #define TC_HTB_CLASSID_ROOT U32_MAX 841 842 enum tc_red_command { 843 TC_RED_REPLACE, 844 TC_RED_DESTROY, 845 TC_RED_STATS, 846 TC_RED_XSTATS, 847 TC_RED_GRAFT, 848 }; 849 850 struct tc_red_qopt_offload_params { 851 u32 min; 852 u32 max; 853 u32 probability; 854 u32 limit; 855 bool is_ecn; 856 bool is_harddrop; 857 bool is_nodrop; 858 struct gnet_stats_queue *qstats; 859 }; 860 861 struct tc_red_qopt_offload { 862 enum tc_red_command command; 863 u32 handle; 864 u32 parent; 865 union { 866 struct tc_red_qopt_offload_params set; 867 struct tc_qopt_offload_stats stats; 868 struct red_stats *xstats; 869 u32 child_handle; 870 }; 871 }; 872 873 enum tc_gred_command { 874 TC_GRED_REPLACE, 875 TC_GRED_DESTROY, 876 TC_GRED_STATS, 877 }; 878 879 struct tc_gred_vq_qopt_offload_params { 880 bool present; 881 u32 limit; 882 u32 prio; 883 u32 min; 884 u32 max; 885 bool is_ecn; 886 bool is_harddrop; 887 u32 probability; 888 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 889 u32 *backlog; 890 }; 891 892 struct tc_gred_qopt_offload_params { 893 bool grio_on; 894 bool wred_on; 895 unsigned int dp_cnt; 896 unsigned int dp_def; 897 struct gnet_stats_queue *qstats; 898 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 899 }; 900 901 struct tc_gred_qopt_offload_stats { 902 struct gnet_stats_basic_sync bstats[MAX_DPs]; 903 struct gnet_stats_queue qstats[MAX_DPs]; 904 struct red_stats *xstats[MAX_DPs]; 905 }; 906 907 struct tc_gred_qopt_offload { 908 enum tc_gred_command command; 909 u32 handle; 910 u32 parent; 911 union { 912 struct tc_gred_qopt_offload_params set; 913 struct tc_gred_qopt_offload_stats stats; 914 }; 915 }; 916 917 enum tc_prio_command { 918 TC_PRIO_REPLACE, 919 TC_PRIO_DESTROY, 920 TC_PRIO_STATS, 921 TC_PRIO_GRAFT, 922 }; 923 924 struct tc_prio_qopt_offload_params { 925 int bands; 926 u8 priomap[TC_PRIO_MAX + 1]; 927 /* At the point of un-offloading the Qdisc, the reported backlog and 928 * qlen need to be reduced by the portion that is in HW. 929 */ 930 struct gnet_stats_queue *qstats; 931 }; 932 933 struct tc_prio_qopt_offload_graft_params { 934 u8 band; 935 u32 child_handle; 936 }; 937 938 struct tc_prio_qopt_offload { 939 enum tc_prio_command command; 940 u32 handle; 941 u32 parent; 942 union { 943 struct tc_prio_qopt_offload_params replace_params; 944 struct tc_qopt_offload_stats stats; 945 struct tc_prio_qopt_offload_graft_params graft_params; 946 }; 947 }; 948 949 enum tc_root_command { 950 TC_ROOT_GRAFT, 951 }; 952 953 struct tc_root_qopt_offload { 954 enum tc_root_command command; 955 u32 handle; 956 bool ingress; 957 }; 958 959 enum tc_ets_command { 960 TC_ETS_REPLACE, 961 TC_ETS_DESTROY, 962 TC_ETS_STATS, 963 TC_ETS_GRAFT, 964 }; 965 966 struct tc_ets_qopt_offload_replace_params { 967 unsigned int bands; 968 u8 priomap[TC_PRIO_MAX + 1]; 969 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */ 970 unsigned int weights[TCQ_ETS_MAX_BANDS]; 971 struct gnet_stats_queue *qstats; 972 }; 973 974 struct tc_ets_qopt_offload_graft_params { 975 u8 band; 976 u32 child_handle; 977 }; 978 979 struct tc_ets_qopt_offload { 980 enum tc_ets_command command; 981 u32 handle; 982 u32 parent; 983 union { 984 struct tc_ets_qopt_offload_replace_params replace_params; 985 struct tc_qopt_offload_stats stats; 986 struct tc_ets_qopt_offload_graft_params graft_params; 987 }; 988 }; 989 990 enum tc_tbf_command { 991 TC_TBF_REPLACE, 992 TC_TBF_DESTROY, 993 TC_TBF_STATS, 994 TC_TBF_GRAFT, 995 }; 996 997 struct tc_tbf_qopt_offload_replace_params { 998 struct psched_ratecfg rate; 999 u32 max_size; 1000 struct gnet_stats_queue *qstats; 1001 }; 1002 1003 struct tc_tbf_qopt_offload { 1004 enum tc_tbf_command command; 1005 u32 handle; 1006 u32 parent; 1007 union { 1008 struct tc_tbf_qopt_offload_replace_params replace_params; 1009 struct tc_qopt_offload_stats stats; 1010 u32 child_handle; 1011 }; 1012 }; 1013 1014 enum tc_fifo_command { 1015 TC_FIFO_REPLACE, 1016 TC_FIFO_DESTROY, 1017 TC_FIFO_STATS, 1018 }; 1019 1020 struct tc_fifo_qopt_offload { 1021 enum tc_fifo_command command; 1022 u32 handle; 1023 u32 parent; 1024 union { 1025 struct tc_qopt_offload_stats stats; 1026 }; 1027 }; 1028 1029 #endif 1030