1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __NET_PKT_CLS_H 3 #define __NET_PKT_CLS_H 4 5 #include <linux/pkt_cls.h> 6 #include <linux/workqueue.h> 7 #include <net/sch_generic.h> 8 #include <net/act_api.h> 9 #include <net/net_namespace.h> 10 11 /* TC action not accessible from user space */ 12 #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) 13 14 /* Basic packet classifier frontend definitions. */ 15 16 struct tcf_walker { 17 int stop; 18 int skip; 19 int count; 20 bool nonempty; 21 unsigned long cookie; 22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); 23 }; 24 25 int register_tcf_proto_ops(struct tcf_proto_ops *ops); 26 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops); 27 28 struct tcf_block_ext_info { 29 enum flow_block_binder_type binder_type; 30 tcf_chain_head_change_t *chain_head_change; 31 void *chain_head_change_priv; 32 u32 block_index; 33 }; 34 35 struct tcf_qevent { 36 struct tcf_block *block; 37 struct tcf_block_ext_info info; 38 struct tcf_proto __rcu *filter_chain; 39 }; 40 41 struct tcf_block_cb; 42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); 43 44 #ifdef CONFIG_NET_CLS 45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, 46 u32 chain_index); 47 void tcf_chain_put_by_act(struct tcf_chain *chain); 48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, 49 struct tcf_chain *chain); 50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, 51 struct tcf_proto *tp); 52 void tcf_block_netif_keep_dst(struct tcf_block *block); 53 int tcf_block_get(struct tcf_block **p_block, 54 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 55 struct netlink_ext_ack *extack); 56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 57 struct tcf_block_ext_info *ei, 58 struct netlink_ext_ack *extack); 59 void tcf_block_put(struct tcf_block *block); 60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 61 struct tcf_block_ext_info *ei); 62 63 static inline bool tcf_block_shared(struct tcf_block *block) 64 { 65 return block->index; 66 } 67 68 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 69 { 70 return block && block->index; 71 } 72 73 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 74 { 75 WARN_ON(tcf_block_shared(block)); 76 return block->q; 77 } 78 79 int tcf_classify(struct sk_buff *skb, 80 const struct tcf_block *block, 81 const struct tcf_proto *tp, struct tcf_result *res, 82 bool compat_mode); 83 84 static inline bool tc_cls_stats_dump(struct tcf_proto *tp, 85 struct tcf_walker *arg, 86 void *filter) 87 { 88 if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) { 89 arg->stop = 1; 90 return false; 91 } 92 93 arg->count++; 94 return true; 95 } 96 97 #else 98 static inline bool tcf_block_shared(struct tcf_block *block) 99 { 100 return false; 101 } 102 103 static inline bool tcf_block_non_null_shared(struct tcf_block *block) 104 { 105 return false; 106 } 107 108 static inline 109 int tcf_block_get(struct tcf_block **p_block, 110 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 111 struct netlink_ext_ack *extack) 112 { 113 return 0; 114 } 115 116 static inline 117 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 118 struct tcf_block_ext_info *ei, 119 struct netlink_ext_ack *extack) 120 { 121 return 0; 122 } 123 124 static inline void tcf_block_put(struct tcf_block *block) 125 { 126 } 127 128 static inline 129 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 130 struct tcf_block_ext_info *ei) 131 { 132 } 133 134 static inline struct Qdisc *tcf_block_q(struct tcf_block *block) 135 { 136 return NULL; 137 } 138 139 static inline 140 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, 141 void *cb_priv) 142 { 143 return 0; 144 } 145 146 static inline 147 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, 148 void *cb_priv) 149 { 150 } 151 152 static inline int tcf_classify(struct sk_buff *skb, 153 const struct tcf_block *block, 154 const struct tcf_proto *tp, 155 struct tcf_result *res, bool compat_mode) 156 { 157 return TC_ACT_UNSPEC; 158 } 159 160 #endif 161 162 static inline unsigned long 163 __cls_set_class(unsigned long *clp, unsigned long cl) 164 { 165 return xchg(clp, cl); 166 } 167 168 static inline void 169 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) 170 { 171 unsigned long cl; 172 173 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); 174 cl = __cls_set_class(&r->class, cl); 175 if (cl) 176 q->ops->cl_ops->unbind_tcf(q, cl); 177 } 178 179 static inline void 180 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) 181 { 182 struct Qdisc *q = tp->chain->block->q; 183 184 /* Check q as it is not set for shared blocks. In that case, 185 * setting class is not supported. 186 */ 187 if (!q) 188 return; 189 sch_tree_lock(q); 190 __tcf_bind_filter(q, r, base); 191 sch_tree_unlock(q); 192 } 193 194 static inline void 195 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) 196 { 197 unsigned long cl; 198 199 if ((cl = __cls_set_class(&r->class, 0)) != 0) 200 q->ops->cl_ops->unbind_tcf(q, cl); 201 } 202 203 static inline void 204 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) 205 { 206 struct Qdisc *q = tp->chain->block->q; 207 208 if (!q) 209 return; 210 __tcf_unbind_filter(q, r); 211 } 212 213 static inline void tc_cls_bind_class(u32 classid, unsigned long cl, 214 void *q, struct tcf_result *res, 215 unsigned long base) 216 { 217 if (res->classid == classid) { 218 if (cl) 219 __tcf_bind_filter(q, res, base); 220 else 221 __tcf_unbind_filter(q, res); 222 } 223 } 224 225 struct tcf_exts { 226 #ifdef CONFIG_NET_CLS_ACT 227 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 228 int nr_actions; 229 struct tc_action **actions; 230 struct net *net; 231 netns_tracker ns_tracker; 232 #endif 233 /* Map to export classifier specific extension TLV types to the 234 * generic extensions API. Unsupported extensions must be set to 0. 235 */ 236 int action; 237 int police; 238 }; 239 240 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, 241 int action, int police) 242 { 243 #ifdef CONFIG_NET_CLS_ACT 244 exts->type = 0; 245 exts->nr_actions = 0; 246 /* Note: we do not own yet a reference on net. 247 * This reference might be taken later from tcf_exts_get_net(). 248 */ 249 exts->net = net; 250 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 251 GFP_KERNEL); 252 if (!exts->actions) 253 return -ENOMEM; 254 #endif 255 exts->action = action; 256 exts->police = police; 257 return 0; 258 } 259 260 /* Return false if the netns is being destroyed in cleanup_net(). Callers 261 * need to do cleanup synchronously in this case, otherwise may race with 262 * tc_action_net_exit(). Return true for other cases. 263 */ 264 static inline bool tcf_exts_get_net(struct tcf_exts *exts) 265 { 266 #ifdef CONFIG_NET_CLS_ACT 267 exts->net = maybe_get_net(exts->net); 268 if (exts->net) 269 netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL); 270 return exts->net != NULL; 271 #else 272 return true; 273 #endif 274 } 275 276 static inline void tcf_exts_put_net(struct tcf_exts *exts) 277 { 278 #ifdef CONFIG_NET_CLS_ACT 279 if (exts->net) 280 put_net_track(exts->net, &exts->ns_tracker); 281 #endif 282 } 283 284 #ifdef CONFIG_NET_CLS_ACT 285 #define tcf_exts_for_each_action(i, a, exts) \ 286 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) 287 #else 288 #define tcf_exts_for_each_action(i, a, exts) \ 289 for (; 0; (void)(i), (void)(a), (void)(exts)) 290 #endif 291 292 #define tcf_act_for_each_action(i, a, actions) \ 293 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++) 294 295 static inline bool tc_act_in_hw(struct tc_action *act) 296 { 297 return !!act->in_hw_count; 298 } 299 300 static inline void 301 tcf_exts_hw_stats_update(const struct tcf_exts *exts, 302 struct flow_stats *stats, 303 bool use_act_stats) 304 { 305 #ifdef CONFIG_NET_CLS_ACT 306 int i; 307 308 for (i = 0; i < exts->nr_actions; i++) { 309 struct tc_action *a = exts->actions[i]; 310 311 if (use_act_stats || tc_act_in_hw(a)) { 312 if (!tcf_action_update_hw_stats(a)) 313 continue; 314 } 315 316 preempt_disable(); 317 tcf_action_stats_update(a, stats->bytes, stats->pkts, stats->drops, 318 stats->lastused, true); 319 preempt_enable(); 320 321 a->used_hw_stats = stats->used_hw_stats; 322 a->used_hw_stats_valid = stats->used_hw_stats_valid; 323 } 324 #endif 325 } 326 327 /** 328 * tcf_exts_has_actions - check if at least one action is present 329 * @exts: tc filter extensions handle 330 * 331 * Returns true if at least one action is present. 332 */ 333 static inline bool tcf_exts_has_actions(struct tcf_exts *exts) 334 { 335 #ifdef CONFIG_NET_CLS_ACT 336 return exts->nr_actions; 337 #else 338 return false; 339 #endif 340 } 341 342 /** 343 * tcf_exts_exec - execute tc filter extensions 344 * @skb: socket buffer 345 * @exts: tc filter extensions handle 346 * @res: desired result 347 * 348 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, 349 * a negative number if the filter must be considered unmatched or 350 * a positive action code (TC_ACT_*) which must be returned to the 351 * underlying layer. 352 */ 353 static inline int 354 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, 355 struct tcf_result *res) 356 { 357 #ifdef CONFIG_NET_CLS_ACT 358 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); 359 #endif 360 return TC_ACT_OK; 361 } 362 363 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, 364 struct nlattr **tb, struct nlattr *rate_tlv, 365 struct tcf_exts *exts, u32 flags, 366 struct netlink_ext_ack *extack); 367 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 368 struct nlattr *rate_tlv, struct tcf_exts *exts, 369 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack); 370 void tcf_exts_destroy(struct tcf_exts *exts); 371 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); 372 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); 373 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts); 374 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); 375 376 /** 377 * struct tcf_pkt_info - packet information 378 * 379 * @ptr: start of the pkt data 380 * @nexthdr: offset of the next header 381 */ 382 struct tcf_pkt_info { 383 unsigned char * ptr; 384 int nexthdr; 385 }; 386 387 #ifdef CONFIG_NET_EMATCH 388 389 struct tcf_ematch_ops; 390 391 /** 392 * struct tcf_ematch - extended match (ematch) 393 * 394 * @matchid: identifier to allow userspace to reidentify a match 395 * @flags: flags specifying attributes and the relation to other matches 396 * @ops: the operations lookup table of the corresponding ematch module 397 * @datalen: length of the ematch specific configuration data 398 * @data: ematch specific data 399 * @net: the network namespace 400 */ 401 struct tcf_ematch { 402 struct tcf_ematch_ops * ops; 403 unsigned long data; 404 unsigned int datalen; 405 u16 matchid; 406 u16 flags; 407 struct net *net; 408 }; 409 410 static inline int tcf_em_is_container(struct tcf_ematch *em) 411 { 412 return !em->ops; 413 } 414 415 static inline int tcf_em_is_simple(struct tcf_ematch *em) 416 { 417 return em->flags & TCF_EM_SIMPLE; 418 } 419 420 static inline int tcf_em_is_inverted(struct tcf_ematch *em) 421 { 422 return em->flags & TCF_EM_INVERT; 423 } 424 425 static inline int tcf_em_last_match(struct tcf_ematch *em) 426 { 427 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; 428 } 429 430 static inline int tcf_em_early_end(struct tcf_ematch *em, int result) 431 { 432 if (tcf_em_last_match(em)) 433 return 1; 434 435 if (result == 0 && em->flags & TCF_EM_REL_AND) 436 return 1; 437 438 if (result != 0 && em->flags & TCF_EM_REL_OR) 439 return 1; 440 441 return 0; 442 } 443 444 /** 445 * struct tcf_ematch_tree - ematch tree handle 446 * 447 * @hdr: ematch tree header supplied by userspace 448 * @matches: array of ematches 449 */ 450 struct tcf_ematch_tree { 451 struct tcf_ematch_tree_hdr hdr; 452 struct tcf_ematch * matches; 453 454 }; 455 456 /** 457 * struct tcf_ematch_ops - ematch module operations 458 * 459 * @kind: identifier (kind) of this ematch module 460 * @datalen: length of expected configuration data (optional) 461 * @change: called during validation (optional) 462 * @match: called during ematch tree evaluation, must return 1/0 463 * @destroy: called during destroyage (optional) 464 * @dump: called during dumping process (optional) 465 * @owner: owner, must be set to THIS_MODULE 466 * @link: link to previous/next ematch module (internal use) 467 */ 468 struct tcf_ematch_ops { 469 int kind; 470 int datalen; 471 int (*change)(struct net *net, void *, 472 int, struct tcf_ematch *); 473 int (*match)(struct sk_buff *, struct tcf_ematch *, 474 struct tcf_pkt_info *); 475 void (*destroy)(struct tcf_ematch *); 476 int (*dump)(struct sk_buff *, struct tcf_ematch *); 477 struct module *owner; 478 struct list_head link; 479 }; 480 481 int tcf_em_register(struct tcf_ematch_ops *); 482 void tcf_em_unregister(struct tcf_ematch_ops *); 483 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, 484 struct tcf_ematch_tree *); 485 void tcf_em_tree_destroy(struct tcf_ematch_tree *); 486 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); 487 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, 488 struct tcf_pkt_info *); 489 490 /** 491 * tcf_em_tree_match - evaulate an ematch tree 492 * 493 * @skb: socket buffer of the packet in question 494 * @tree: ematch tree to be used for evaluation 495 * @info: packet information examined by classifier 496 * 497 * This function matches @skb against the ematch tree in @tree by going 498 * through all ematches respecting their logic relations returning 499 * as soon as the result is obvious. 500 * 501 * Returns 1 if the ematch tree as-one matches, no ematches are configured 502 * or ematch is not enabled in the kernel, otherwise 0 is returned. 503 */ 504 static inline int tcf_em_tree_match(struct sk_buff *skb, 505 struct tcf_ematch_tree *tree, 506 struct tcf_pkt_info *info) 507 { 508 if (tree->hdr.nmatches) 509 return __tcf_em_tree_match(skb, tree, info); 510 else 511 return 1; 512 } 513 514 #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) 515 516 #else /* CONFIG_NET_EMATCH */ 517 518 struct tcf_ematch_tree { 519 }; 520 521 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) 522 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) 523 #define tcf_em_tree_dump(skb, t, tlv) (0) 524 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) 525 526 #endif /* CONFIG_NET_EMATCH */ 527 528 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) 529 { 530 switch (layer) { 531 case TCF_LAYER_LINK: 532 return skb_mac_header(skb); 533 case TCF_LAYER_NETWORK: 534 return skb_network_header(skb); 535 case TCF_LAYER_TRANSPORT: 536 return skb_transport_header(skb); 537 } 538 539 return NULL; 540 } 541 542 static inline int tcf_valid_offset(const struct sk_buff *skb, 543 const unsigned char *ptr, const int len) 544 { 545 return likely((ptr + len) <= skb_tail_pointer(skb) && 546 ptr >= skb->head && 547 (ptr <= (ptr + len))); 548 } 549 550 static inline int 551 tcf_change_indev(struct net *net, struct nlattr *indev_tlv, 552 struct netlink_ext_ack *extack) 553 { 554 char indev[IFNAMSIZ]; 555 struct net_device *dev; 556 557 if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) { 558 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 559 "Interface name too long"); 560 return -EINVAL; 561 } 562 dev = __dev_get_by_name(net, indev); 563 if (!dev) { 564 NL_SET_ERR_MSG_ATTR(extack, indev_tlv, 565 "Network device not found"); 566 return -ENODEV; 567 } 568 return dev->ifindex; 569 } 570 571 static inline bool 572 tcf_match_indev(struct sk_buff *skb, int ifindex) 573 { 574 if (!ifindex) 575 return true; 576 if (!skb->skb_iif) 577 return false; 578 return ifindex == skb->skb_iif; 579 } 580 581 int tc_setup_offload_action(struct flow_action *flow_action, 582 const struct tcf_exts *exts, 583 struct netlink_ext_ack *extack); 584 void tc_cleanup_offload_action(struct flow_action *flow_action); 585 int tc_setup_action(struct flow_action *flow_action, 586 struct tc_action *actions[], 587 struct netlink_ext_ack *extack); 588 589 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 590 void *type_data, bool err_stop, bool rtnl_held); 591 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 592 enum tc_setup_type type, void *type_data, bool err_stop, 593 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 594 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 595 enum tc_setup_type type, void *type_data, bool err_stop, 596 u32 *old_flags, unsigned int *old_in_hw_count, 597 u32 *new_flags, unsigned int *new_in_hw_count, 598 bool rtnl_held); 599 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 600 enum tc_setup_type type, void *type_data, bool err_stop, 601 u32 *flags, unsigned int *in_hw_count, bool rtnl_held); 602 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 603 bool add, flow_setup_cb_t *cb, 604 enum tc_setup_type type, void *type_data, 605 void *cb_priv, u32 *flags, unsigned int *in_hw_count); 606 unsigned int tcf_exts_num_actions(struct tcf_exts *exts); 607 608 #ifdef CONFIG_NET_CLS_ACT 609 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 610 enum flow_block_binder_type binder_type, 611 struct nlattr *block_index_attr, 612 struct netlink_ext_ack *extack); 613 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); 614 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 615 struct netlink_ext_ack *extack); 616 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 617 struct sk_buff **to_free, int *ret); 618 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); 619 #else 620 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 621 enum flow_block_binder_type binder_type, 622 struct nlattr *block_index_attr, 623 struct netlink_ext_ack *extack) 624 { 625 return 0; 626 } 627 628 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 629 { 630 } 631 632 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 633 struct netlink_ext_ack *extack) 634 { 635 return 0; 636 } 637 638 static inline struct sk_buff * 639 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 640 struct sk_buff **to_free, int *ret) 641 { 642 return skb; 643 } 644 645 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 646 { 647 return 0; 648 } 649 #endif 650 651 struct tc_cls_u32_knode { 652 struct tcf_exts *exts; 653 struct tcf_result *res; 654 struct tc_u32_sel *sel; 655 u32 handle; 656 u32 val; 657 u32 mask; 658 u32 link_handle; 659 u8 fshift; 660 }; 661 662 struct tc_cls_u32_hnode { 663 u32 handle; 664 u32 prio; 665 unsigned int divisor; 666 }; 667 668 enum tc_clsu32_command { 669 TC_CLSU32_NEW_KNODE, 670 TC_CLSU32_REPLACE_KNODE, 671 TC_CLSU32_DELETE_KNODE, 672 TC_CLSU32_NEW_HNODE, 673 TC_CLSU32_REPLACE_HNODE, 674 TC_CLSU32_DELETE_HNODE, 675 }; 676 677 struct tc_cls_u32_offload { 678 struct flow_cls_common_offload common; 679 /* knode values */ 680 enum tc_clsu32_command command; 681 union { 682 struct tc_cls_u32_knode knode; 683 struct tc_cls_u32_hnode hnode; 684 }; 685 }; 686 687 static inline bool tc_can_offload(const struct net_device *dev) 688 { 689 return dev->features & NETIF_F_HW_TC; 690 } 691 692 static inline bool tc_can_offload_extack(const struct net_device *dev, 693 struct netlink_ext_ack *extack) 694 { 695 bool can = tc_can_offload(dev); 696 697 if (!can) 698 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); 699 700 return can; 701 } 702 703 static inline bool 704 tc_cls_can_offload_and_chain0(const struct net_device *dev, 705 struct flow_cls_common_offload *common) 706 { 707 if (!tc_can_offload_extack(dev, common->extack)) 708 return false; 709 if (common->chain_index) { 710 NL_SET_ERR_MSG(common->extack, 711 "Driver supports only offload of chain 0"); 712 return false; 713 } 714 return true; 715 } 716 717 static inline bool tc_skip_hw(u32 flags) 718 { 719 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; 720 } 721 722 static inline bool tc_skip_sw(u32 flags) 723 { 724 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; 725 } 726 727 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 728 static inline bool tc_flags_valid(u32 flags) 729 { 730 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | 731 TCA_CLS_FLAGS_VERBOSE)) 732 return false; 733 734 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; 735 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) 736 return false; 737 738 return true; 739 } 740 741 static inline bool tc_in_hw(u32 flags) 742 { 743 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; 744 } 745 746 static inline void 747 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, 748 const struct tcf_proto *tp, u32 flags, 749 struct netlink_ext_ack *extack) 750 { 751 cls_common->chain_index = tp->chain->index; 752 cls_common->protocol = tp->protocol; 753 cls_common->prio = tp->prio >> 16; 754 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) 755 cls_common->extack = extack; 756 } 757 758 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 759 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) 760 { 761 struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); 762 763 if (tc_skb_ext) 764 memset(tc_skb_ext, 0, sizeof(*tc_skb_ext)); 765 return tc_skb_ext; 766 } 767 #endif 768 769 enum tc_matchall_command { 770 TC_CLSMATCHALL_REPLACE, 771 TC_CLSMATCHALL_DESTROY, 772 TC_CLSMATCHALL_STATS, 773 }; 774 775 struct tc_cls_matchall_offload { 776 struct flow_cls_common_offload common; 777 enum tc_matchall_command command; 778 struct flow_rule *rule; 779 struct flow_stats stats; 780 bool use_act_stats; 781 unsigned long cookie; 782 }; 783 784 enum tc_clsbpf_command { 785 TC_CLSBPF_OFFLOAD, 786 TC_CLSBPF_STATS, 787 }; 788 789 struct tc_cls_bpf_offload { 790 struct flow_cls_common_offload common; 791 enum tc_clsbpf_command command; 792 struct tcf_exts *exts; 793 struct bpf_prog *prog; 794 struct bpf_prog *oldprog; 795 const char *name; 796 bool exts_integrated; 797 }; 798 799 /* This structure holds cookie structure that is passed from user 800 * to the kernel for actions and classifiers 801 */ 802 struct tc_cookie { 803 u8 *data; 804 u32 len; 805 struct rcu_head rcu; 806 }; 807 808 struct tc_qopt_offload_stats { 809 struct gnet_stats_basic_sync *bstats; 810 struct gnet_stats_queue *qstats; 811 }; 812 813 enum tc_mq_command { 814 TC_MQ_CREATE, 815 TC_MQ_DESTROY, 816 TC_MQ_STATS, 817 TC_MQ_GRAFT, 818 }; 819 820 struct tc_mq_opt_offload_graft_params { 821 unsigned long queue; 822 u32 child_handle; 823 }; 824 825 struct tc_mq_qopt_offload { 826 enum tc_mq_command command; 827 u32 handle; 828 union { 829 struct tc_qopt_offload_stats stats; 830 struct tc_mq_opt_offload_graft_params graft_params; 831 }; 832 }; 833 834 enum tc_htb_command { 835 /* Root */ 836 TC_HTB_CREATE, /* Initialize HTB offload. */ 837 TC_HTB_DESTROY, /* Destroy HTB offload. */ 838 839 /* Classes */ 840 /* Allocate qid and create leaf. */ 841 TC_HTB_LEAF_ALLOC_QUEUE, 842 /* Convert leaf to inner, preserve and return qid, create new leaf. */ 843 TC_HTB_LEAF_TO_INNER, 844 /* Delete leaf, while siblings remain. */ 845 TC_HTB_LEAF_DEL, 846 /* Delete leaf, convert parent to leaf, preserving qid. */ 847 TC_HTB_LEAF_DEL_LAST, 848 /* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */ 849 TC_HTB_LEAF_DEL_LAST_FORCE, 850 /* Modify parameters of a node. */ 851 TC_HTB_NODE_MODIFY, 852 853 /* Class qdisc */ 854 TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */ 855 }; 856 857 struct tc_htb_qopt_offload { 858 struct netlink_ext_ack *extack; 859 enum tc_htb_command command; 860 u32 parent_classid; 861 u16 classid; 862 u16 qid; 863 u64 rate; 864 u64 ceil; 865 }; 866 867 #define TC_HTB_CLASSID_ROOT U32_MAX 868 869 enum tc_red_command { 870 TC_RED_REPLACE, 871 TC_RED_DESTROY, 872 TC_RED_STATS, 873 TC_RED_XSTATS, 874 TC_RED_GRAFT, 875 }; 876 877 struct tc_red_qopt_offload_params { 878 u32 min; 879 u32 max; 880 u32 probability; 881 u32 limit; 882 bool is_ecn; 883 bool is_harddrop; 884 bool is_nodrop; 885 struct gnet_stats_queue *qstats; 886 }; 887 888 struct tc_red_qopt_offload { 889 enum tc_red_command command; 890 u32 handle; 891 u32 parent; 892 union { 893 struct tc_red_qopt_offload_params set; 894 struct tc_qopt_offload_stats stats; 895 struct red_stats *xstats; 896 u32 child_handle; 897 }; 898 }; 899 900 enum tc_gred_command { 901 TC_GRED_REPLACE, 902 TC_GRED_DESTROY, 903 TC_GRED_STATS, 904 }; 905 906 struct tc_gred_vq_qopt_offload_params { 907 bool present; 908 u32 limit; 909 u32 prio; 910 u32 min; 911 u32 max; 912 bool is_ecn; 913 bool is_harddrop; 914 u32 probability; 915 /* Only need backlog, see struct tc_prio_qopt_offload_params */ 916 u32 *backlog; 917 }; 918 919 struct tc_gred_qopt_offload_params { 920 bool grio_on; 921 bool wred_on; 922 unsigned int dp_cnt; 923 unsigned int dp_def; 924 struct gnet_stats_queue *qstats; 925 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; 926 }; 927 928 struct tc_gred_qopt_offload_stats { 929 struct gnet_stats_basic_sync bstats[MAX_DPs]; 930 struct gnet_stats_queue qstats[MAX_DPs]; 931 struct red_stats *xstats[MAX_DPs]; 932 }; 933 934 struct tc_gred_qopt_offload { 935 enum tc_gred_command command; 936 u32 handle; 937 u32 parent; 938 union { 939 struct tc_gred_qopt_offload_params set; 940 struct tc_gred_qopt_offload_stats stats; 941 }; 942 }; 943 944 enum tc_prio_command { 945 TC_PRIO_REPLACE, 946 TC_PRIO_DESTROY, 947 TC_PRIO_STATS, 948 TC_PRIO_GRAFT, 949 }; 950 951 struct tc_prio_qopt_offload_params { 952 int bands; 953 u8 priomap[TC_PRIO_MAX + 1]; 954 /* At the point of un-offloading the Qdisc, the reported backlog and 955 * qlen need to be reduced by the portion that is in HW. 956 */ 957 struct gnet_stats_queue *qstats; 958 }; 959 960 struct tc_prio_qopt_offload_graft_params { 961 u8 band; 962 u32 child_handle; 963 }; 964 965 struct tc_prio_qopt_offload { 966 enum tc_prio_command command; 967 u32 handle; 968 u32 parent; 969 union { 970 struct tc_prio_qopt_offload_params replace_params; 971 struct tc_qopt_offload_stats stats; 972 struct tc_prio_qopt_offload_graft_params graft_params; 973 }; 974 }; 975 976 enum tc_root_command { 977 TC_ROOT_GRAFT, 978 }; 979 980 struct tc_root_qopt_offload { 981 enum tc_root_command command; 982 u32 handle; 983 bool ingress; 984 }; 985 986 enum tc_ets_command { 987 TC_ETS_REPLACE, 988 TC_ETS_DESTROY, 989 TC_ETS_STATS, 990 TC_ETS_GRAFT, 991 }; 992 993 struct tc_ets_qopt_offload_replace_params { 994 unsigned int bands; 995 u8 priomap[TC_PRIO_MAX + 1]; 996 unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */ 997 unsigned int weights[TCQ_ETS_MAX_BANDS]; 998 struct gnet_stats_queue *qstats; 999 }; 1000 1001 struct tc_ets_qopt_offload_graft_params { 1002 u8 band; 1003 u32 child_handle; 1004 }; 1005 1006 struct tc_ets_qopt_offload { 1007 enum tc_ets_command command; 1008 u32 handle; 1009 u32 parent; 1010 union { 1011 struct tc_ets_qopt_offload_replace_params replace_params; 1012 struct tc_qopt_offload_stats stats; 1013 struct tc_ets_qopt_offload_graft_params graft_params; 1014 }; 1015 }; 1016 1017 enum tc_tbf_command { 1018 TC_TBF_REPLACE, 1019 TC_TBF_DESTROY, 1020 TC_TBF_STATS, 1021 TC_TBF_GRAFT, 1022 }; 1023 1024 struct tc_tbf_qopt_offload_replace_params { 1025 struct psched_ratecfg rate; 1026 u32 max_size; 1027 struct gnet_stats_queue *qstats; 1028 }; 1029 1030 struct tc_tbf_qopt_offload { 1031 enum tc_tbf_command command; 1032 u32 handle; 1033 u32 parent; 1034 union { 1035 struct tc_tbf_qopt_offload_replace_params replace_params; 1036 struct tc_qopt_offload_stats stats; 1037 u32 child_handle; 1038 }; 1039 }; 1040 1041 enum tc_fifo_command { 1042 TC_FIFO_REPLACE, 1043 TC_FIFO_DESTROY, 1044 TC_FIFO_STATS, 1045 }; 1046 1047 struct tc_fifo_qopt_offload { 1048 enum tc_fifo_command command; 1049 u32 handle; 1050 u32 parent; 1051 union { 1052 struct tc_qopt_offload_stats stats; 1053 }; 1054 }; 1055 1056 #ifdef CONFIG_NET_CLS_ACT 1057 DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc); 1058 void tc_skb_ext_tc_enable(void); 1059 void tc_skb_ext_tc_disable(void); 1060 #define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc) 1061 #else /* CONFIG_NET_CLS_ACT */ 1062 static inline void tc_skb_ext_tc_enable(void) { } 1063 static inline void tc_skb_ext_tc_disable(void) { } 1064 #define tc_skb_ext_tc_enabled() false 1065 #endif 1066 1067 #endif 1068