1 #ifndef _NET_FLOW_OFFLOAD_H 2 #define _NET_FLOW_OFFLOAD_H 3 4 #include <linux/kernel.h> 5 #include <linux/list.h> 6 #include <linux/netlink.h> 7 #include <net/flow_dissector.h> 8 9 struct flow_match { 10 struct flow_dissector *dissector; 11 void *mask; 12 void *key; 13 }; 14 15 struct flow_match_meta { 16 struct flow_dissector_key_meta *key, *mask; 17 }; 18 19 struct flow_match_basic { 20 struct flow_dissector_key_basic *key, *mask; 21 }; 22 23 struct flow_match_control { 24 struct flow_dissector_key_control *key, *mask; 25 }; 26 27 struct flow_match_eth_addrs { 28 struct flow_dissector_key_eth_addrs *key, *mask; 29 }; 30 31 struct flow_match_vlan { 32 struct flow_dissector_key_vlan *key, *mask; 33 }; 34 35 struct flow_match_ipv4_addrs { 36 struct flow_dissector_key_ipv4_addrs *key, *mask; 37 }; 38 39 struct flow_match_ipv6_addrs { 40 struct flow_dissector_key_ipv6_addrs *key, *mask; 41 }; 42 43 struct flow_match_ip { 44 struct flow_dissector_key_ip *key, *mask; 45 }; 46 47 struct flow_match_ports { 48 struct flow_dissector_key_ports *key, *mask; 49 }; 50 51 struct flow_match_ports_range { 52 struct flow_dissector_key_ports_range *key, *mask; 53 }; 54 55 struct flow_match_icmp { 56 struct flow_dissector_key_icmp *key, *mask; 57 }; 58 59 struct flow_match_tcp { 60 struct flow_dissector_key_tcp *key, *mask; 61 }; 62 63 struct flow_match_mpls { 64 struct flow_dissector_key_mpls *key, *mask; 65 }; 66 67 struct flow_match_enc_keyid { 68 struct flow_dissector_key_keyid *key, *mask; 69 }; 70 71 struct flow_match_enc_opts { 72 struct flow_dissector_key_enc_opts *key, *mask; 73 }; 74 75 struct flow_match_ct { 76 struct flow_dissector_key_ct *key, *mask; 77 }; 78 79 struct flow_match_pppoe { 80 struct flow_dissector_key_pppoe *key, *mask; 81 }; 82 83 struct flow_match_l2tpv3 { 84 struct flow_dissector_key_l2tpv3 *key, *mask; 85 }; 86 87 struct flow_rule; 88 89 void flow_rule_match_meta(const struct flow_rule *rule, 90 struct flow_match_meta *out); 91 void flow_rule_match_basic(const struct flow_rule *rule, 92 struct flow_match_basic *out); 93 void flow_rule_match_control(const struct flow_rule *rule, 94 struct flow_match_control *out); 95 void flow_rule_match_eth_addrs(const struct flow_rule *rule, 96 struct flow_match_eth_addrs *out); 97 void flow_rule_match_vlan(const struct flow_rule *rule, 98 struct flow_match_vlan *out); 99 void flow_rule_match_cvlan(const struct flow_rule *rule, 100 struct flow_match_vlan *out); 101 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 102 struct flow_match_ipv4_addrs *out); 103 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, 104 struct flow_match_ipv6_addrs *out); 105 void flow_rule_match_ip(const struct flow_rule *rule, 106 struct flow_match_ip *out); 107 void flow_rule_match_ports(const struct flow_rule *rule, 108 struct flow_match_ports *out); 109 void flow_rule_match_ports_range(const struct flow_rule *rule, 110 struct flow_match_ports_range *out); 111 void flow_rule_match_tcp(const struct flow_rule *rule, 112 struct flow_match_tcp *out); 113 void flow_rule_match_icmp(const struct flow_rule *rule, 114 struct flow_match_icmp *out); 115 void flow_rule_match_mpls(const struct flow_rule *rule, 116 struct flow_match_mpls *out); 117 void flow_rule_match_enc_control(const struct flow_rule *rule, 118 struct flow_match_control *out); 119 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, 120 struct flow_match_ipv4_addrs *out); 121 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, 122 struct flow_match_ipv6_addrs *out); 123 void flow_rule_match_enc_ip(const struct flow_rule *rule, 124 struct flow_match_ip *out); 125 void flow_rule_match_enc_ports(const struct flow_rule *rule, 126 struct flow_match_ports *out); 127 void flow_rule_match_enc_keyid(const struct flow_rule *rule, 128 struct flow_match_enc_keyid *out); 129 void flow_rule_match_enc_opts(const struct flow_rule *rule, 130 struct flow_match_enc_opts *out); 131 void flow_rule_match_ct(const struct flow_rule *rule, 132 struct flow_match_ct *out); 133 void flow_rule_match_pppoe(const struct flow_rule *rule, 134 struct flow_match_pppoe *out); 135 void flow_rule_match_l2tpv3(const struct flow_rule *rule, 136 struct flow_match_l2tpv3 *out); 137 138 enum flow_action_id { 139 FLOW_ACTION_ACCEPT = 0, 140 FLOW_ACTION_DROP, 141 FLOW_ACTION_TRAP, 142 FLOW_ACTION_GOTO, 143 FLOW_ACTION_REDIRECT, 144 FLOW_ACTION_MIRRED, 145 FLOW_ACTION_REDIRECT_INGRESS, 146 FLOW_ACTION_MIRRED_INGRESS, 147 FLOW_ACTION_VLAN_PUSH, 148 FLOW_ACTION_VLAN_POP, 149 FLOW_ACTION_VLAN_MANGLE, 150 FLOW_ACTION_TUNNEL_ENCAP, 151 FLOW_ACTION_TUNNEL_DECAP, 152 FLOW_ACTION_MANGLE, 153 FLOW_ACTION_ADD, 154 FLOW_ACTION_CSUM, 155 FLOW_ACTION_MARK, 156 FLOW_ACTION_PTYPE, 157 FLOW_ACTION_PRIORITY, 158 FLOW_ACTION_RX_QUEUE_MAPPING, 159 FLOW_ACTION_WAKE, 160 FLOW_ACTION_QUEUE, 161 FLOW_ACTION_SAMPLE, 162 FLOW_ACTION_POLICE, 163 FLOW_ACTION_CT, 164 FLOW_ACTION_CT_METADATA, 165 FLOW_ACTION_MPLS_PUSH, 166 FLOW_ACTION_MPLS_POP, 167 FLOW_ACTION_MPLS_MANGLE, 168 FLOW_ACTION_GATE, 169 FLOW_ACTION_PPPOE_PUSH, 170 FLOW_ACTION_JUMP, 171 FLOW_ACTION_PIPE, 172 FLOW_ACTION_VLAN_PUSH_ETH, 173 FLOW_ACTION_VLAN_POP_ETH, 174 FLOW_ACTION_CONTINUE, 175 NUM_FLOW_ACTIONS, 176 }; 177 178 /* This is mirroring enum pedit_header_type definition for easy mapping between 179 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to 180 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver. 181 */ 182 enum flow_action_mangle_base { 183 FLOW_ACT_MANGLE_UNSPEC = 0, 184 FLOW_ACT_MANGLE_HDR_TYPE_ETH, 185 FLOW_ACT_MANGLE_HDR_TYPE_IP4, 186 FLOW_ACT_MANGLE_HDR_TYPE_IP6, 187 FLOW_ACT_MANGLE_HDR_TYPE_TCP, 188 FLOW_ACT_MANGLE_HDR_TYPE_UDP, 189 }; 190 191 enum flow_action_hw_stats_bit { 192 FLOW_ACTION_HW_STATS_IMMEDIATE_BIT, 193 FLOW_ACTION_HW_STATS_DELAYED_BIT, 194 FLOW_ACTION_HW_STATS_DISABLED_BIT, 195 196 FLOW_ACTION_HW_STATS_NUM_BITS 197 }; 198 199 enum flow_action_hw_stats { 200 FLOW_ACTION_HW_STATS_IMMEDIATE = 201 BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT), 202 FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT), 203 FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE | 204 FLOW_ACTION_HW_STATS_DELAYED, 205 FLOW_ACTION_HW_STATS_DISABLED = 206 BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT), 207 FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1, 208 }; 209 210 typedef void (*action_destr)(void *priv); 211 212 struct flow_action_cookie { 213 u32 cookie_len; 214 u8 cookie[]; 215 }; 216 217 struct flow_action_cookie *flow_action_cookie_create(void *data, 218 unsigned int len, 219 gfp_t gfp); 220 void flow_action_cookie_destroy(struct flow_action_cookie *cookie); 221 222 struct flow_action_entry { 223 enum flow_action_id id; 224 u32 hw_index; 225 enum flow_action_hw_stats hw_stats; 226 action_destr destructor; 227 void *destructor_priv; 228 union { 229 u32 chain_index; /* FLOW_ACTION_GOTO */ 230 struct net_device *dev; /* FLOW_ACTION_REDIRECT */ 231 struct { /* FLOW_ACTION_VLAN */ 232 u16 vid; 233 __be16 proto; 234 u8 prio; 235 } vlan; 236 struct { /* FLOW_ACTION_VLAN_PUSH_ETH */ 237 unsigned char dst[ETH_ALEN]; 238 unsigned char src[ETH_ALEN]; 239 } vlan_push_eth; 240 struct { /* FLOW_ACTION_MANGLE */ 241 /* FLOW_ACTION_ADD */ 242 enum flow_action_mangle_base htype; 243 u32 offset; 244 u32 mask; 245 u32 val; 246 } mangle; 247 struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ 248 u32 csum_flags; /* FLOW_ACTION_CSUM */ 249 u32 mark; /* FLOW_ACTION_MARK */ 250 u16 ptype; /* FLOW_ACTION_PTYPE */ 251 u16 rx_queue; /* FLOW_ACTION_RX_QUEUE_MAPPING */ 252 u32 priority; /* FLOW_ACTION_PRIORITY */ 253 struct { /* FLOW_ACTION_QUEUE */ 254 u32 ctx; 255 u32 index; 256 u8 vf; 257 } queue; 258 struct { /* FLOW_ACTION_SAMPLE */ 259 struct psample_group *psample_group; 260 u32 rate; 261 u32 trunc_size; 262 bool truncate; 263 } sample; 264 struct { /* FLOW_ACTION_POLICE */ 265 u32 burst; 266 u64 rate_bytes_ps; 267 u64 peakrate_bytes_ps; 268 u32 avrate; 269 u16 overhead; 270 u64 burst_pkt; 271 u64 rate_pkt_ps; 272 u32 mtu; 273 struct { 274 enum flow_action_id act_id; 275 u32 extval; 276 } exceed, notexceed; 277 } police; 278 struct { /* FLOW_ACTION_CT */ 279 int action; 280 u16 zone; 281 struct nf_flowtable *flow_table; 282 } ct; 283 struct { 284 unsigned long cookie; 285 u32 mark; 286 u32 labels[4]; 287 bool orig_dir; 288 } ct_metadata; 289 struct { /* FLOW_ACTION_MPLS_PUSH */ 290 u32 label; 291 __be16 proto; 292 u8 tc; 293 u8 bos; 294 u8 ttl; 295 } mpls_push; 296 struct { /* FLOW_ACTION_MPLS_POP */ 297 __be16 proto; 298 } mpls_pop; 299 struct { /* FLOW_ACTION_MPLS_MANGLE */ 300 u32 label; 301 u8 tc; 302 u8 bos; 303 u8 ttl; 304 } mpls_mangle; 305 struct { 306 s32 prio; 307 u64 basetime; 308 u64 cycletime; 309 u64 cycletimeext; 310 u32 num_entries; 311 struct action_gate_entry *entries; 312 } gate; 313 struct { /* FLOW_ACTION_PPPOE_PUSH */ 314 u16 sid; 315 } pppoe; 316 }; 317 struct flow_action_cookie *cookie; /* user defined action cookie */ 318 }; 319 320 struct flow_action { 321 unsigned int num_entries; 322 struct flow_action_entry entries[]; 323 }; 324 325 static inline bool flow_action_has_entries(const struct flow_action *action) 326 { 327 return action->num_entries; 328 } 329 330 /** 331 * flow_offload_has_one_action() - check if exactly one action is present 332 * @action: tc filter flow offload action 333 * 334 * Returns true if exactly one action is present. 335 */ 336 static inline bool flow_offload_has_one_action(const struct flow_action *action) 337 { 338 return action->num_entries == 1; 339 } 340 341 static inline bool flow_action_is_last_entry(const struct flow_action *action, 342 const struct flow_action_entry *entry) 343 { 344 return entry == &action->entries[action->num_entries - 1]; 345 } 346 347 #define flow_action_for_each(__i, __act, __actions) \ 348 for (__i = 0, __act = &(__actions)->entries[0]; \ 349 __i < (__actions)->num_entries; \ 350 __act = &(__actions)->entries[++__i]) 351 352 static inline bool 353 flow_action_mixed_hw_stats_check(const struct flow_action *action, 354 struct netlink_ext_ack *extack) 355 { 356 const struct flow_action_entry *action_entry; 357 u8 last_hw_stats; 358 int i; 359 360 if (flow_offload_has_one_action(action)) 361 return true; 362 363 flow_action_for_each(i, action_entry, action) { 364 if (i && action_entry->hw_stats != last_hw_stats) { 365 NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); 366 return false; 367 } 368 last_hw_stats = action_entry->hw_stats; 369 } 370 return true; 371 } 372 373 static inline const struct flow_action_entry * 374 flow_action_first_entry_get(const struct flow_action *action) 375 { 376 WARN_ON(!flow_action_has_entries(action)); 377 return &action->entries[0]; 378 } 379 380 static inline bool 381 __flow_action_hw_stats_check(const struct flow_action *action, 382 struct netlink_ext_ack *extack, 383 bool check_allow_bit, 384 enum flow_action_hw_stats_bit allow_bit) 385 { 386 const struct flow_action_entry *action_entry; 387 388 if (!flow_action_has_entries(action)) 389 return true; 390 if (!flow_action_mixed_hw_stats_check(action, extack)) 391 return false; 392 393 action_entry = flow_action_first_entry_get(action); 394 395 /* Zero is not a legal value for hw_stats, catch anyone passing it */ 396 WARN_ON_ONCE(!action_entry->hw_stats); 397 398 if (!check_allow_bit && 399 ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) { 400 NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\""); 401 return false; 402 } else if (check_allow_bit && 403 !(action_entry->hw_stats & BIT(allow_bit))) { 404 NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type"); 405 return false; 406 } 407 return true; 408 } 409 410 static inline bool 411 flow_action_hw_stats_check(const struct flow_action *action, 412 struct netlink_ext_ack *extack, 413 enum flow_action_hw_stats_bit allow_bit) 414 { 415 return __flow_action_hw_stats_check(action, extack, true, allow_bit); 416 } 417 418 static inline bool 419 flow_action_basic_hw_stats_check(const struct flow_action *action, 420 struct netlink_ext_ack *extack) 421 { 422 return __flow_action_hw_stats_check(action, extack, false, 0); 423 } 424 425 struct flow_rule { 426 struct flow_match match; 427 struct flow_action action; 428 }; 429 430 struct flow_rule *flow_rule_alloc(unsigned int num_actions); 431 432 static inline bool flow_rule_match_key(const struct flow_rule *rule, 433 enum flow_dissector_key_id key) 434 { 435 return dissector_uses_key(rule->match.dissector, key); 436 } 437 438 struct flow_stats { 439 u64 pkts; 440 u64 bytes; 441 u64 drops; 442 u64 lastused; 443 enum flow_action_hw_stats used_hw_stats; 444 bool used_hw_stats_valid; 445 }; 446 447 static inline void flow_stats_update(struct flow_stats *flow_stats, 448 u64 bytes, u64 pkts, 449 u64 drops, u64 lastused, 450 enum flow_action_hw_stats used_hw_stats) 451 { 452 flow_stats->pkts += pkts; 453 flow_stats->bytes += bytes; 454 flow_stats->drops += drops; 455 flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); 456 457 /* The driver should pass value with a maximum of one bit set. 458 * Passing FLOW_ACTION_HW_STATS_ANY is invalid. 459 */ 460 WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY); 461 flow_stats->used_hw_stats |= used_hw_stats; 462 flow_stats->used_hw_stats_valid = true; 463 } 464 465 enum flow_block_command { 466 FLOW_BLOCK_BIND, 467 FLOW_BLOCK_UNBIND, 468 }; 469 470 enum flow_block_binder_type { 471 FLOW_BLOCK_BINDER_TYPE_UNSPEC, 472 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, 473 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 474 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, 475 FLOW_BLOCK_BINDER_TYPE_RED_MARK, 476 }; 477 478 struct flow_block { 479 struct list_head cb_list; 480 }; 481 482 struct netlink_ext_ack; 483 484 struct flow_block_offload { 485 enum flow_block_command command; 486 enum flow_block_binder_type binder_type; 487 bool block_shared; 488 bool unlocked_driver_cb; 489 struct net *net; 490 struct flow_block *block; 491 struct list_head cb_list; 492 struct list_head *driver_block_list; 493 struct netlink_ext_ack *extack; 494 struct Qdisc *sch; 495 struct list_head *cb_list_head; 496 }; 497 498 enum tc_setup_type; 499 typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, 500 void *cb_priv); 501 502 struct flow_block_cb; 503 504 struct flow_block_indr { 505 struct list_head list; 506 struct net_device *dev; 507 struct Qdisc *sch; 508 enum flow_block_binder_type binder_type; 509 void *data; 510 void *cb_priv; 511 void (*cleanup)(struct flow_block_cb *block_cb); 512 }; 513 514 struct flow_block_cb { 515 struct list_head driver_list; 516 struct list_head list; 517 flow_setup_cb_t *cb; 518 void *cb_ident; 519 void *cb_priv; 520 void (*release)(void *cb_priv); 521 struct flow_block_indr indr; 522 unsigned int refcnt; 523 }; 524 525 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 526 void *cb_ident, void *cb_priv, 527 void (*release)(void *cb_priv)); 528 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, 529 void *cb_ident, void *cb_priv, 530 void (*release)(void *cb_priv), 531 struct flow_block_offload *bo, 532 struct net_device *dev, 533 struct Qdisc *sch, void *data, 534 void *indr_cb_priv, 535 void (*cleanup)(struct flow_block_cb *block_cb)); 536 void flow_block_cb_free(struct flow_block_cb *block_cb); 537 538 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 539 flow_setup_cb_t *cb, void *cb_ident); 540 541 void *flow_block_cb_priv(struct flow_block_cb *block_cb); 542 void flow_block_cb_incref(struct flow_block_cb *block_cb); 543 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb); 544 545 static inline void flow_block_cb_add(struct flow_block_cb *block_cb, 546 struct flow_block_offload *offload) 547 { 548 list_add_tail(&block_cb->list, &offload->cb_list); 549 } 550 551 static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, 552 struct flow_block_offload *offload) 553 { 554 list_move(&block_cb->list, &offload->cb_list); 555 } 556 557 static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb, 558 struct flow_block_offload *offload) 559 { 560 list_del(&block_cb->indr.list); 561 list_move(&block_cb->list, &offload->cb_list); 562 } 563 564 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 565 struct list_head *driver_block_list); 566 567 int flow_block_cb_setup_simple(struct flow_block_offload *f, 568 struct list_head *driver_list, 569 flow_setup_cb_t *cb, 570 void *cb_ident, void *cb_priv, bool ingress_only); 571 572 enum flow_cls_command { 573 FLOW_CLS_REPLACE, 574 FLOW_CLS_DESTROY, 575 FLOW_CLS_STATS, 576 FLOW_CLS_TMPLT_CREATE, 577 FLOW_CLS_TMPLT_DESTROY, 578 }; 579 580 struct flow_cls_common_offload { 581 u32 chain_index; 582 __be16 protocol; 583 u32 prio; 584 struct netlink_ext_ack *extack; 585 }; 586 587 struct flow_cls_offload { 588 struct flow_cls_common_offload common; 589 enum flow_cls_command command; 590 unsigned long cookie; 591 struct flow_rule *rule; 592 struct flow_stats stats; 593 u32 classid; 594 }; 595 596 enum offload_act_command { 597 FLOW_ACT_REPLACE, 598 FLOW_ACT_DESTROY, 599 FLOW_ACT_STATS, 600 }; 601 602 struct flow_offload_action { 603 struct netlink_ext_ack *extack; /* NULL in FLOW_ACT_STATS process*/ 604 enum offload_act_command command; 605 enum flow_action_id id; 606 u32 index; 607 struct flow_stats stats; 608 struct flow_action action; 609 }; 610 611 struct flow_offload_action *offload_action_alloc(unsigned int num_actions); 612 613 static inline struct flow_rule * 614 flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) 615 { 616 return flow_cmd->rule; 617 } 618 619 static inline void flow_block_init(struct flow_block *flow_block) 620 { 621 INIT_LIST_HEAD(&flow_block->cb_list); 622 } 623 624 typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv, 625 enum tc_setup_type type, void *type_data, 626 void *data, 627 void (*cleanup)(struct flow_block_cb *block_cb)); 628 629 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); 630 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, 631 void (*release)(void *cb_priv)); 632 int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, 633 enum tc_setup_type type, void *data, 634 struct flow_block_offload *bo, 635 void (*cleanup)(struct flow_block_cb *block_cb)); 636 bool flow_indr_dev_exists(void); 637 638 #endif /* _NET_FLOW_OFFLOAD_H */ 639