1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <linux/xarray.h> 40 #include <net/devlink.h> 41 #include <linux/mlx5/device.h> 42 #include <linux/mlx5/eswitch.h> 43 #include <linux/mlx5/vport.h> 44 #include <linux/mlx5/fs.h> 45 #include "lib/mpfs.h" 46 #include "lib/fs_chains.h" 47 #include "sf/sf.h" 48 #include "en/tc_ct.h" 49 #include "en/tc/sample.h" 50 51 enum mlx5_mapped_obj_type { 52 MLX5_MAPPED_OBJ_CHAIN, 53 MLX5_MAPPED_OBJ_SAMPLE, 54 MLX5_MAPPED_OBJ_INT_PORT_METADATA, 55 MLX5_MAPPED_OBJ_ACT_MISS, 56 }; 57 58 struct mlx5_mapped_obj { 59 enum mlx5_mapped_obj_type type; 60 union { 61 u32 chain; 62 u64 act_miss_cookie; 63 struct { 64 u32 group_id; 65 u32 rate; 66 u32 trunc_size; 67 u32 tunnel_id; 68 } sample; 69 u32 int_port_metadata; 70 }; 71 }; 72 73 #ifdef CONFIG_MLX5_ESWITCH 74 75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 76 77 #define MLX5_MAX_UC_PER_VPORT(dev) \ 78 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 79 80 #define MLX5_MAX_MC_PER_VPORT(dev) \ 81 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 82 83 #define mlx5_esw_has_fwd_fdb(dev) \ 84 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 85 86 #define esw_chains(esw) \ 87 ((esw)->fdb_table.offloads.esw_chains_priv) 88 89 enum { 90 MAPPING_TYPE_CHAIN, 91 MAPPING_TYPE_TUNNEL, 92 MAPPING_TYPE_TUNNEL_ENC_OPTS, 93 MAPPING_TYPE_LABELS, 94 MAPPING_TYPE_ZONE, 95 MAPPING_TYPE_INT_PORT, 96 }; 97 98 struct vport_ingress { 99 struct mlx5_flow_table *acl; 100 struct mlx5_flow_handle *allow_rule; 101 struct { 102 struct mlx5_flow_group *allow_spoofchk_only_grp; 103 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 104 struct mlx5_flow_group *allow_untagged_only_grp; 105 struct mlx5_flow_group *drop_grp; 106 struct mlx5_flow_handle *drop_rule; 107 struct mlx5_fc *drop_counter; 108 } legacy; 109 struct { 110 /* Optional group to add an FTE to do internal priority 111 * tagging on ingress packets. 112 */ 113 struct mlx5_flow_group *metadata_prio_tag_grp; 114 /* Group to add default match-all FTE entry to tag ingress 115 * packet with metadata. 116 */ 117 struct mlx5_flow_group *metadata_allmatch_grp; 118 /* Optional group to add a drop all rule */ 119 struct mlx5_flow_group *drop_grp; 120 struct mlx5_modify_hdr *modify_metadata; 121 struct mlx5_flow_handle *modify_metadata_rule; 122 struct mlx5_flow_handle *drop_rule; 123 } offloads; 124 }; 125 126 enum vport_egress_acl_type { 127 VPORT_EGRESS_ACL_TYPE_DEFAULT, 128 VPORT_EGRESS_ACL_TYPE_SHARED_FDB, 129 }; 130 131 struct vport_egress { 132 struct mlx5_flow_table *acl; 133 enum vport_egress_acl_type type; 134 struct mlx5_flow_handle *allowed_vlan; 135 struct mlx5_flow_group *vlan_grp; 136 union { 137 struct { 138 struct mlx5_flow_group *drop_grp; 139 struct mlx5_flow_handle *drop_rule; 140 struct mlx5_fc *drop_counter; 141 } legacy; 142 struct { 143 struct mlx5_flow_group *fwd_grp; 144 struct mlx5_flow_handle *fwd_rule; 145 struct xarray bounce_rules; 146 struct mlx5_flow_group *bounce_grp; 147 } offloads; 148 }; 149 }; 150 151 struct mlx5_vport_drop_stats { 152 u64 rx_dropped; 153 u64 tx_dropped; 154 }; 155 156 struct mlx5_vport_info { 157 u8 mac[ETH_ALEN]; 158 u16 vlan; 159 u64 node_guid; 160 int link_state; 161 u8 qos; 162 u8 spoofchk: 1; 163 u8 trusted: 1; 164 u8 roce_enabled: 1; 165 u8 mig_enabled: 1; 166 u8 ipsec_crypto_enabled: 1; 167 u8 ipsec_packet_enabled: 1; 168 }; 169 170 /* Vport context events */ 171 enum mlx5_eswitch_vport_event { 172 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 173 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 174 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 175 }; 176 177 struct mlx5_vport; 178 179 struct mlx5_devlink_port { 180 struct devlink_port dl_port; 181 struct mlx5_vport *vport; 182 }; 183 184 static inline void mlx5_devlink_port_init(struct mlx5_devlink_port *dl_port, 185 struct mlx5_vport *vport) 186 { 187 dl_port->vport = vport; 188 } 189 190 static inline struct mlx5_devlink_port *mlx5_devlink_port_get(struct devlink_port *dl_port) 191 { 192 return container_of(dl_port, struct mlx5_devlink_port, dl_port); 193 } 194 195 static inline struct mlx5_vport *mlx5_devlink_port_vport_get(struct devlink_port *dl_port) 196 { 197 return mlx5_devlink_port_get(dl_port)->vport; 198 } 199 200 struct mlx5_vport { 201 struct mlx5_core_dev *dev; 202 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 203 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 204 struct mlx5_flow_handle *promisc_rule; 205 struct mlx5_flow_handle *allmulti_rule; 206 struct work_struct vport_change_handler; 207 208 struct vport_ingress ingress; 209 struct vport_egress egress; 210 u32 default_metadata; 211 u32 metadata; 212 213 struct mlx5_vport_info info; 214 215 struct { 216 bool enabled; 217 u32 esw_tsar_ix; 218 u32 bw_share; 219 u32 min_rate; 220 u32 max_rate; 221 struct mlx5_esw_rate_group *group; 222 } qos; 223 224 u16 vport; 225 bool enabled; 226 enum mlx5_eswitch_vport_event enabled_events; 227 int index; 228 struct mlx5_devlink_port *dl_port; 229 }; 230 231 struct mlx5_esw_indir_table; 232 233 struct mlx5_eswitch_fdb { 234 union { 235 struct legacy_fdb { 236 struct mlx5_flow_table *fdb; 237 struct mlx5_flow_group *addr_grp; 238 struct mlx5_flow_group *allmulti_grp; 239 struct mlx5_flow_group *promisc_grp; 240 struct mlx5_flow_table *vepa_fdb; 241 struct mlx5_flow_handle *vepa_uplink_rule; 242 struct mlx5_flow_handle *vepa_star_rule; 243 } legacy; 244 245 struct offloads_fdb { 246 struct mlx5_flow_namespace *ns; 247 struct mlx5_flow_table *tc_miss_table; 248 struct mlx5_flow_table *slow_fdb; 249 struct mlx5_flow_group *send_to_vport_grp; 250 struct mlx5_flow_group *send_to_vport_meta_grp; 251 struct mlx5_flow_group *peer_miss_grp; 252 struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS]; 253 struct mlx5_flow_group *miss_grp; 254 struct mlx5_flow_handle **send_to_vport_meta_rules; 255 struct mlx5_flow_handle *miss_rule_uni; 256 struct mlx5_flow_handle *miss_rule_multi; 257 258 struct mlx5_fs_chains *esw_chains_priv; 259 struct { 260 DECLARE_HASHTABLE(table, 8); 261 /* Protects vports.table */ 262 struct mutex lock; 263 } vports; 264 265 struct mlx5_esw_indir_table *indir; 266 267 } offloads; 268 }; 269 u32 flags; 270 }; 271 272 struct mlx5_esw_offload { 273 struct mlx5_flow_table *ft_offloads_restore; 274 struct mlx5_flow_group *restore_group; 275 struct mlx5_modify_hdr *restore_copy_hdr_id; 276 struct mapping_ctx *reg_c0_obj_pool; 277 278 struct mlx5_flow_table *ft_offloads; 279 struct mlx5_flow_group *vport_rx_group; 280 struct mlx5_flow_group *vport_rx_drop_group; 281 struct mlx5_flow_handle *vport_rx_drop_rule; 282 struct mlx5_flow_table *ft_ipsec_tx_pol; 283 struct xarray vport_reps; 284 struct list_head peer_flows[MLX5_MAX_PORTS]; 285 struct mutex peer_mutex; 286 struct mutex encap_tbl_lock; /* protects encap_tbl */ 287 DECLARE_HASHTABLE(encap_tbl, 8); 288 struct mutex decap_tbl_lock; /* protects decap_tbl */ 289 DECLARE_HASHTABLE(decap_tbl, 8); 290 struct mod_hdr_tbl mod_hdr; 291 DECLARE_HASHTABLE(termtbl_tbl, 8); 292 struct mutex termtbl_mutex; /* protects termtbl hash */ 293 struct xarray vhca_map; 294 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 295 u8 inline_mode; 296 atomic64_t num_flows; 297 u64 num_block_encap; 298 u64 num_block_mode; 299 enum devlink_eswitch_encap_mode encap; 300 struct ida vport_metadata_ida; 301 unsigned int host_number; /* ECPF supports one external host */ 302 }; 303 304 /* E-Switch MC FDB table hash node */ 305 struct esw_mc_addr { /* SRIOV only */ 306 struct l2addr_node node; 307 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 308 u32 refcnt; 309 }; 310 311 struct mlx5_host_work { 312 struct work_struct work; 313 struct mlx5_eswitch *esw; 314 }; 315 316 struct mlx5_esw_functions { 317 struct mlx5_nb nb; 318 u16 num_vfs; 319 u16 num_ec_vfs; 320 }; 321 322 enum { 323 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 324 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), 325 MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2), 326 }; 327 328 struct mlx5_esw_bridge_offloads; 329 330 enum { 331 MLX5_ESW_FDB_CREATED = BIT(0), 332 }; 333 334 struct dentry; 335 336 struct mlx5_eswitch { 337 struct mlx5_core_dev *dev; 338 struct mlx5_nb nb; 339 struct mlx5_eswitch_fdb fdb_table; 340 /* legacy data structures */ 341 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 342 struct esw_mc_addr mc_promisc; 343 /* end of legacy */ 344 struct dentry *debugfs_root; 345 struct workqueue_struct *work_queue; 346 struct xarray vports; 347 u32 flags; 348 int total_vports; 349 int enabled_vports; 350 /* Synchronize between vport change events 351 * and async SRIOV admin state changes 352 */ 353 struct mutex state_lock; 354 355 /* Protects eswitch mode change that occurs via one or more 356 * user commands, i.e. sriov state change, devlink commands. 357 */ 358 struct rw_semaphore mode_lock; 359 atomic64_t user_count; 360 361 struct { 362 u32 root_tsar_ix; 363 struct mlx5_esw_rate_group *group0; 364 struct list_head groups; /* Protected by esw->state_lock */ 365 366 /* Protected by esw->state_lock. 367 * Initially 0, meaning no QoS users and QoS is disabled. 368 */ 369 refcount_t refcnt; 370 } qos; 371 372 struct mlx5_esw_bridge_offloads *br_offloads; 373 struct mlx5_esw_offload offloads; 374 int mode; 375 u16 manager_vport; 376 u16 first_host_vport; 377 u8 num_peers; 378 struct mlx5_esw_functions esw_funcs; 379 struct { 380 u32 large_group_num; 381 } params; 382 struct blocking_notifier_head n_head; 383 struct xarray paired; 384 struct mlx5_devcom_comp_dev *devcom; 385 u16 enabled_ipsec_vf_count; 386 bool eswitch_operation_in_progress; 387 }; 388 389 void esw_offloads_disable(struct mlx5_eswitch *esw); 390 int esw_offloads_enable(struct mlx5_eswitch *esw); 391 void esw_offloads_cleanup(struct mlx5_eswitch *esw); 392 int esw_offloads_init(struct mlx5_eswitch *esw); 393 394 struct mlx5_flow_handle * 395 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num); 396 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule); 397 398 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); 399 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); 400 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); 401 402 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); 403 404 /* E-Switch API */ 405 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 406 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 407 408 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) 409 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs); 410 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); 411 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); 412 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); 413 void mlx5_eswitch_disable(struct mlx5_eswitch *esw); 414 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key); 415 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); 416 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw); 417 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 418 u16 vport, const u8 *mac); 419 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 420 u16 vport, int link_state); 421 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 422 u16 vport, u16 vlan, u8 qos); 423 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 424 u16 vport, bool spoofchk); 425 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 426 u16 vport_num, bool setting); 427 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 428 u32 max_rate, u32 min_rate); 429 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, 430 struct mlx5_vport *vport, 431 struct mlx5_esw_rate_group *group, 432 struct netlink_ext_ack *extack); 433 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 434 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 435 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 436 u16 vport, struct ifla_vf_info *ivi); 437 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 438 u16 vport, 439 struct ifla_vf_stats *vf_stats); 440 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 441 442 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 443 bool other_vport, void *in); 444 445 struct mlx5_flow_spec; 446 struct mlx5_esw_flow_attr; 447 struct mlx5_termtbl_handle; 448 449 bool 450 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 451 struct mlx5_flow_attr *attr, 452 struct mlx5_flow_act *flow_act, 453 struct mlx5_flow_spec *spec); 454 455 struct mlx5_flow_handle * 456 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 457 struct mlx5_flow_table *ft, 458 struct mlx5_flow_spec *spec, 459 struct mlx5_esw_flow_attr *attr, 460 struct mlx5_flow_act *flow_act, 461 struct mlx5_flow_destination *dest, 462 int num_dest); 463 464 void 465 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 466 struct mlx5_termtbl_handle *tt); 467 468 void 469 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec); 470 471 struct mlx5_flow_handle * 472 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 473 struct mlx5_flow_spec *spec, 474 struct mlx5_flow_attr *attr); 475 struct mlx5_flow_handle * 476 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 477 struct mlx5_flow_spec *spec, 478 struct mlx5_flow_attr *attr); 479 void 480 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 481 struct mlx5_flow_handle *rule, 482 struct mlx5_flow_attr *attr); 483 void 484 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 485 struct mlx5_flow_handle *rule, 486 struct mlx5_flow_attr *attr); 487 488 struct mlx5_flow_handle * 489 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 490 struct mlx5_flow_destination *dest); 491 492 enum { 493 SET_VLAN_STRIP = BIT(0), 494 SET_VLAN_INSERT = BIT(1) 495 }; 496 497 enum mlx5_flow_match_level { 498 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 499 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 500 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 501 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 502 }; 503 504 /* current maximum for flow based vport multicasting */ 505 #define MLX5_MAX_FLOW_FWD_VPORTS 32 506 507 enum { 508 MLX5_ESW_DEST_ENCAP = BIT(0), 509 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 510 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), 511 }; 512 513 struct mlx5_esw_flow_attr { 514 struct mlx5_eswitch_rep *in_rep; 515 struct mlx5_core_dev *in_mdev; 516 struct mlx5_core_dev *counter_dev; 517 struct mlx5e_tc_int_port *dest_int_port; 518 struct mlx5e_tc_int_port *int_port; 519 520 int split_count; 521 int out_count; 522 523 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 524 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 525 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 526 u8 total_vlan; 527 struct { 528 u32 flags; 529 bool vport_valid; 530 u16 vport; 531 struct mlx5_pkt_reformat *pkt_reformat; 532 struct mlx5_core_dev *mdev; 533 struct mlx5_termtbl_handle *termtbl; 534 int src_port_rewrite_act_id; 535 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 536 struct mlx5_rx_tun_attr *rx_tun_attr; 537 struct ethhdr eth; 538 struct mlx5_pkt_reformat *decap_pkt_reformat; 539 }; 540 541 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 542 struct netlink_ext_ack *extack); 543 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 544 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 545 struct netlink_ext_ack *extack); 546 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 547 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 548 enum devlink_eswitch_encap_mode encap, 549 struct netlink_ext_ack *extack); 550 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 551 enum devlink_eswitch_encap_mode *encap); 552 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, 553 u8 *hw_addr, int *hw_addr_len, 554 struct netlink_ext_ack *extack); 555 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, 556 const u8 *hw_addr, int hw_addr_len, 557 struct netlink_ext_ack *extack); 558 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, 559 struct netlink_ext_ack *extack); 560 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, 561 struct netlink_ext_ack *extack); 562 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, 563 struct netlink_ext_ack *extack); 564 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, 565 struct netlink_ext_ack *extack); 566 #ifdef CONFIG_XFRM_OFFLOAD 567 int mlx5_devlink_port_fn_ipsec_crypto_get(struct devlink_port *port, bool *is_enabled, 568 struct netlink_ext_ack *extack); 569 int mlx5_devlink_port_fn_ipsec_crypto_set(struct devlink_port *port, bool enable, 570 struct netlink_ext_ack *extack); 571 int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_enabled, 572 struct netlink_ext_ack *extack); 573 int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable, 574 struct netlink_ext_ack *extack); 575 #endif /* CONFIG_XFRM_OFFLOAD */ 576 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 577 578 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 579 u16 vport, u16 vlan, u8 qos, u8 set_flags); 580 581 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) 582 { 583 return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && 584 MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); 585 } 586 587 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 588 u8 vlan_depth) 589 { 590 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 591 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 592 593 if (vlan_depth == 1) 594 return ret; 595 596 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 597 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 598 } 599 600 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 601 struct mlx5_core_dev *dev1); 602 603 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 604 605 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 606 607 #define esw_info(__dev, format, ...) \ 608 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 609 610 #define esw_warn(__dev, format, ...) \ 611 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 612 613 #define esw_debug(dev, format, ...) \ 614 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 615 616 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) 617 { 618 return esw && MLX5_ESWITCH_MANAGER(esw->dev); 619 } 620 621 /* The returned number is valid only when the dev is eswitch manager. */ 622 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) 623 { 624 return mlx5_core_is_ecpf_esw_manager(dev) ? 625 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 626 } 627 628 static inline bool 629 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 630 { 631 return esw->manager_vport == vport_num; 632 } 633 634 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num, 635 u16 esw_owner_vhca_id) 636 { 637 return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) || 638 (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev)); 639 } 640 641 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 642 { 643 return mlx5_core_is_ecpf_esw_manager(dev) ? 644 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 645 } 646 647 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) 648 { 649 return mlx5_core_is_ecpf_esw_manager(dev); 650 } 651 652 static inline unsigned int 653 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 654 u16 vport_num) 655 { 656 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; 657 } 658 659 static inline u16 660 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) 661 { 662 return dl_port_index & 0xffff; 663 } 664 665 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw) 666 { 667 return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED; 668 } 669 670 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 671 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 672 673 /* Each mark identifies eswitch vport type. 674 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using 675 * a single mark. 676 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport. 677 * MLX5_ESW_VPT_SF identifies SF vport. 678 */ 679 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0 680 #define MLX5_ESW_VPT_VF XA_MARK_1 681 #define MLX5_ESW_VPT_SF XA_MARK_2 682 683 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init. 684 * Borrowed the idea from xa_for_each_marked() but with support for desired last element. 685 */ 686 687 #define mlx5_esw_for_each_vport(esw, index, vport) \ 688 xa_for_each(&((esw)->vports), index, vport) 689 690 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ 691 for (index = 0, entry = xa_find(xa, &index, last, filter); \ 692 entry; entry = xa_find_after(xa, &index, last, filter)) 693 694 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \ 695 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter) 696 697 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \ 698 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF) 699 700 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \ 701 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN) 702 703 /* This macro should only be used if EC SRIOV is enabled. 704 * 705 * Because there were no more marks available on the xarray this uses a 706 * for_each_range approach. The range is only valid when EC SRIOV is enabled 707 */ 708 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \ 709 xa_for_each_range(&((esw)->vports), \ 710 index, \ 711 vport, \ 712 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \ 713 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\ 714 (last) - 1) 715 716 struct mlx5_eswitch *__must_check 717 mlx5_devlink_eswitch_get(struct devlink *devlink); 718 719 struct mlx5_eswitch *mlx5_devlink_eswitch_nocheck_get(struct devlink *devlink); 720 721 struct mlx5_vport *__must_check 722 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 723 724 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); 725 bool mlx5_eswitch_is_pf_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); 726 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); 727 728 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 729 730 int 731 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 732 enum mlx5_eswitch_vport_event enabled_events); 733 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 734 735 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 736 enum mlx5_eswitch_vport_event enabled_events); 737 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 738 739 int 740 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 741 struct mlx5_vport *vport); 742 void 743 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 744 struct mlx5_vport *vport); 745 746 struct esw_vport_tbl_namespace { 747 int max_fte; 748 int max_num_groups; 749 u32 flags; 750 }; 751 752 struct mlx5_vport_tbl_attr { 753 u32 chain; 754 u16 prio; 755 u16 vport; 756 struct esw_vport_tbl_namespace *vport_ns; 757 }; 758 759 struct mlx5_flow_table * 760 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 761 void 762 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 763 764 struct mlx5_flow_handle * 765 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 766 767 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 768 u32 *flow_group_in, 769 int match_params); 770 771 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, 772 u16 vport, 773 struct mlx5_flow_spec *spec); 774 775 int mlx5_esw_offloads_init_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 776 void mlx5_esw_offloads_cleanup_pf_vf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 777 778 int mlx5_esw_offloads_init_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 779 struct mlx5_devlink_port *dl_port, 780 u32 controller, u32 sfnum); 781 void mlx5_esw_offloads_cleanup_sf_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 782 783 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 784 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 785 786 int mlx5_eswitch_load_sf_vport(struct mlx5_eswitch *esw, u16 vport_num, 787 enum mlx5_eswitch_vport_event enabled_events, 788 struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum); 789 void mlx5_eswitch_unload_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); 790 791 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 792 enum mlx5_eswitch_vport_event enabled_events); 793 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); 794 795 int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, 796 struct mlx5_vport *vport); 797 void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, 798 struct mlx5_vport *vport); 799 800 int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 801 struct mlx5_devlink_port *dl_port, 802 u32 controller, u32 sfnum); 803 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 804 805 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 806 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport); 807 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); 808 809 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); 810 811 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); 812 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); 813 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); 814 815 /** 816 * mlx5_esw_event_info - Indicates eswitch mode changed/changing. 817 * 818 * @new_mode: New mode of eswitch. 819 */ 820 struct mlx5_esw_event_info { 821 u16 new_mode; 822 }; 823 824 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); 825 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); 826 827 bool mlx5_esw_hold(struct mlx5_core_dev *dev); 828 void mlx5_esw_release(struct mlx5_core_dev *dev); 829 void mlx5_esw_get(struct mlx5_core_dev *dev); 830 void mlx5_esw_put(struct mlx5_core_dev *dev); 831 int mlx5_esw_try_lock(struct mlx5_eswitch *esw); 832 int mlx5_esw_lock(struct mlx5_eswitch *esw); 833 void mlx5_esw_unlock(struct mlx5_eswitch *esw); 834 835 void esw_vport_change_handle_locked(struct mlx5_vport *vport); 836 837 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); 838 839 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 840 struct mlx5_eswitch *slave_esw, int max_slaves); 841 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 842 struct mlx5_eswitch *slave_esw); 843 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); 844 845 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); 846 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); 847 848 int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); 849 void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev); 850 851 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) 852 { 853 if (mlx5_esw_allowed(esw)) 854 return esw->esw_funcs.num_vfs; 855 856 return 0; 857 } 858 859 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) 860 { 861 if (mlx5_esw_allowed(esw)) 862 return esw->num_peers; 863 return 0; 864 } 865 866 static inline struct mlx5_flow_table * 867 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw) 868 { 869 return esw->fdb_table.offloads.slow_fdb; 870 } 871 872 int mlx5_eswitch_restore_ipsec_rule(struct mlx5_eswitch *esw, struct mlx5_flow_handle *rule, 873 struct mlx5_esw_flow_attr *esw_attr, int attr_idx); 874 bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev); 875 void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev); 876 bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev); 877 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, 878 struct mlx5_vport *vport); 879 int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev, 880 u16 vport_num); 881 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 882 bool enable); 883 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 884 bool enable); 885 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, 886 u16 vport_num); 887 void mlx5_esw_vport_ipsec_offload_enable(struct mlx5_eswitch *esw); 888 void mlx5_esw_vport_ipsec_offload_disable(struct mlx5_eswitch *esw); 889 890 #else /* CONFIG_MLX5_ESWITCH */ 891 /* eswitch API stubs */ 892 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 893 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 894 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } 895 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} 896 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} 897 static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {} 898 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} 899 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; } 900 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 901 static inline 902 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } 903 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 904 { 905 return ERR_PTR(-EOPNOTSUPP); 906 } 907 908 static inline struct mlx5_flow_handle * 909 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 910 { 911 return ERR_PTR(-EOPNOTSUPP); 912 } 913 914 static inline unsigned int 915 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 916 u16 vport_num) 917 { 918 return vport_num; 919 } 920 921 static inline int 922 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 923 struct mlx5_eswitch *slave_esw, int max_slaves) 924 { 925 return 0; 926 } 927 928 static inline void 929 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 930 struct mlx5_eswitch *slave_esw) {} 931 932 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; } 933 934 static inline int 935 mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) 936 { 937 return 0; 938 } 939 940 static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) 941 { 942 return true; 943 } 944 945 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) 946 { 947 } 948 949 static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; } 950 static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {} 951 static inline bool mlx5_eswitch_block_ipsec(struct mlx5_core_dev *dev) 952 { 953 return false; 954 } 955 956 static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {} 957 #endif /* CONFIG_MLX5_ESWITCH */ 958 959 #endif /* __MLX5_ESWITCH_H__ */ 960