1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <linux/xarray.h> 40 #include <net/devlink.h> 41 #include <linux/mlx5/device.h> 42 #include <linux/mlx5/eswitch.h> 43 #include <linux/mlx5/vport.h> 44 #include <linux/mlx5/fs.h> 45 #include "lib/mpfs.h" 46 #include "lib/fs_chains.h" 47 #include "sf/sf.h" 48 #include "en/tc_ct.h" 49 #include "en/tc/sample.h" 50 51 enum mlx5_mapped_obj_type { 52 MLX5_MAPPED_OBJ_CHAIN, 53 MLX5_MAPPED_OBJ_SAMPLE, 54 MLX5_MAPPED_OBJ_INT_PORT_METADATA, 55 MLX5_MAPPED_OBJ_ACT_MISS, 56 }; 57 58 struct mlx5_mapped_obj { 59 enum mlx5_mapped_obj_type type; 60 union { 61 u32 chain; 62 u64 act_miss_cookie; 63 struct { 64 u32 group_id; 65 u32 rate; 66 u32 trunc_size; 67 u32 tunnel_id; 68 } sample; 69 u32 int_port_metadata; 70 }; 71 }; 72 73 #ifdef CONFIG_MLX5_ESWITCH 74 75 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 76 77 #define MLX5_MAX_UC_PER_VPORT(dev) \ 78 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 79 80 #define MLX5_MAX_MC_PER_VPORT(dev) \ 81 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 82 83 #define mlx5_esw_has_fwd_fdb(dev) \ 84 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 85 86 #define esw_chains(esw) \ 87 ((esw)->fdb_table.offloads.esw_chains_priv) 88 89 enum { 90 MAPPING_TYPE_CHAIN, 91 MAPPING_TYPE_TUNNEL, 92 MAPPING_TYPE_TUNNEL_ENC_OPTS, 93 MAPPING_TYPE_LABELS, 94 MAPPING_TYPE_ZONE, 95 MAPPING_TYPE_INT_PORT, 96 }; 97 98 struct vport_ingress { 99 struct mlx5_flow_table *acl; 100 struct mlx5_flow_handle *allow_rule; 101 struct { 102 struct mlx5_flow_group *allow_spoofchk_only_grp; 103 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 104 struct mlx5_flow_group *allow_untagged_only_grp; 105 struct mlx5_flow_group *drop_grp; 106 struct mlx5_flow_handle *drop_rule; 107 struct mlx5_fc *drop_counter; 108 } legacy; 109 struct { 110 /* Optional group to add an FTE to do internal priority 111 * tagging on ingress packets. 112 */ 113 struct mlx5_flow_group *metadata_prio_tag_grp; 114 /* Group to add default match-all FTE entry to tag ingress 115 * packet with metadata. 116 */ 117 struct mlx5_flow_group *metadata_allmatch_grp; 118 /* Optional group to add a drop all rule */ 119 struct mlx5_flow_group *drop_grp; 120 struct mlx5_modify_hdr *modify_metadata; 121 struct mlx5_flow_handle *modify_metadata_rule; 122 struct mlx5_flow_handle *drop_rule; 123 } offloads; 124 }; 125 126 enum vport_egress_acl_type { 127 VPORT_EGRESS_ACL_TYPE_DEFAULT, 128 VPORT_EGRESS_ACL_TYPE_SHARED_FDB, 129 }; 130 131 struct vport_egress { 132 struct mlx5_flow_table *acl; 133 enum vport_egress_acl_type type; 134 struct mlx5_flow_handle *allowed_vlan; 135 struct mlx5_flow_group *vlan_grp; 136 union { 137 struct { 138 struct mlx5_flow_group *drop_grp; 139 struct mlx5_flow_handle *drop_rule; 140 struct mlx5_fc *drop_counter; 141 } legacy; 142 struct { 143 struct mlx5_flow_group *fwd_grp; 144 struct mlx5_flow_handle *fwd_rule; 145 struct xarray bounce_rules; 146 struct mlx5_flow_group *bounce_grp; 147 } offloads; 148 }; 149 }; 150 151 struct mlx5_vport_drop_stats { 152 u64 rx_dropped; 153 u64 tx_dropped; 154 }; 155 156 struct mlx5_vport_info { 157 u8 mac[ETH_ALEN]; 158 u16 vlan; 159 u64 node_guid; 160 int link_state; 161 u8 qos; 162 u8 spoofchk: 1; 163 u8 trusted: 1; 164 u8 roce_enabled: 1; 165 u8 mig_enabled: 1; 166 }; 167 168 /* Vport context events */ 169 enum mlx5_eswitch_vport_event { 170 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 171 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 172 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 173 }; 174 175 struct mlx5_vport { 176 struct mlx5_core_dev *dev; 177 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 178 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 179 struct mlx5_flow_handle *promisc_rule; 180 struct mlx5_flow_handle *allmulti_rule; 181 struct work_struct vport_change_handler; 182 183 struct vport_ingress ingress; 184 struct vport_egress egress; 185 u32 default_metadata; 186 u32 metadata; 187 188 struct mlx5_vport_info info; 189 190 struct { 191 bool enabled; 192 u32 esw_tsar_ix; 193 u32 bw_share; 194 u32 min_rate; 195 u32 max_rate; 196 struct mlx5_esw_rate_group *group; 197 } qos; 198 199 u16 vport; 200 bool enabled; 201 enum mlx5_eswitch_vport_event enabled_events; 202 int index; 203 struct devlink_port *dl_port; 204 }; 205 206 struct mlx5_esw_indir_table; 207 208 struct mlx5_eswitch_fdb { 209 union { 210 struct legacy_fdb { 211 struct mlx5_flow_table *fdb; 212 struct mlx5_flow_group *addr_grp; 213 struct mlx5_flow_group *allmulti_grp; 214 struct mlx5_flow_group *promisc_grp; 215 struct mlx5_flow_table *vepa_fdb; 216 struct mlx5_flow_handle *vepa_uplink_rule; 217 struct mlx5_flow_handle *vepa_star_rule; 218 } legacy; 219 220 struct offloads_fdb { 221 struct mlx5_flow_namespace *ns; 222 struct mlx5_flow_table *tc_miss_table; 223 struct mlx5_flow_table *slow_fdb; 224 struct mlx5_flow_group *send_to_vport_grp; 225 struct mlx5_flow_group *send_to_vport_meta_grp; 226 struct mlx5_flow_group *peer_miss_grp; 227 struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS]; 228 struct mlx5_flow_group *miss_grp; 229 struct mlx5_flow_handle **send_to_vport_meta_rules; 230 struct mlx5_flow_handle *miss_rule_uni; 231 struct mlx5_flow_handle *miss_rule_multi; 232 233 struct mlx5_fs_chains *esw_chains_priv; 234 struct { 235 DECLARE_HASHTABLE(table, 8); 236 /* Protects vports.table */ 237 struct mutex lock; 238 } vports; 239 240 struct mlx5_esw_indir_table *indir; 241 242 } offloads; 243 }; 244 u32 flags; 245 }; 246 247 struct mlx5_esw_offload { 248 struct mlx5_flow_table *ft_offloads_restore; 249 struct mlx5_flow_group *restore_group; 250 struct mlx5_modify_hdr *restore_copy_hdr_id; 251 struct mapping_ctx *reg_c0_obj_pool; 252 253 struct mlx5_flow_table *ft_offloads; 254 struct mlx5_flow_group *vport_rx_group; 255 struct mlx5_flow_group *vport_rx_drop_group; 256 struct mlx5_flow_handle *vport_rx_drop_rule; 257 struct mlx5_flow_table *ft_ipsec_tx_pol; 258 struct xarray vport_reps; 259 struct list_head peer_flows[MLX5_MAX_PORTS]; 260 struct mutex peer_mutex; 261 struct mutex encap_tbl_lock; /* protects encap_tbl */ 262 DECLARE_HASHTABLE(encap_tbl, 8); 263 struct mutex decap_tbl_lock; /* protects decap_tbl */ 264 DECLARE_HASHTABLE(decap_tbl, 8); 265 struct mod_hdr_tbl mod_hdr; 266 DECLARE_HASHTABLE(termtbl_tbl, 8); 267 struct mutex termtbl_mutex; /* protects termtbl hash */ 268 struct xarray vhca_map; 269 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 270 u8 inline_mode; 271 atomic64_t num_flows; 272 u64 num_block_encap; 273 u64 num_block_mode; 274 enum devlink_eswitch_encap_mode encap; 275 struct ida vport_metadata_ida; 276 unsigned int host_number; /* ECPF supports one external host */ 277 }; 278 279 /* E-Switch MC FDB table hash node */ 280 struct esw_mc_addr { /* SRIOV only */ 281 struct l2addr_node node; 282 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 283 u32 refcnt; 284 }; 285 286 struct mlx5_host_work { 287 struct work_struct work; 288 struct mlx5_eswitch *esw; 289 }; 290 291 struct mlx5_esw_functions { 292 struct mlx5_nb nb; 293 u16 num_vfs; 294 u16 num_ec_vfs; 295 }; 296 297 enum { 298 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 299 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), 300 MLX5_ESWITCH_VPORT_ACL_NS_CREATED = BIT(2), 301 }; 302 303 struct mlx5_esw_bridge_offloads; 304 305 enum { 306 MLX5_ESW_FDB_CREATED = BIT(0), 307 }; 308 309 struct dentry; 310 311 struct mlx5_eswitch { 312 struct mlx5_core_dev *dev; 313 struct mlx5_nb nb; 314 struct mlx5_eswitch_fdb fdb_table; 315 /* legacy data structures */ 316 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 317 struct esw_mc_addr mc_promisc; 318 /* end of legacy */ 319 struct dentry *debugfs_root; 320 struct workqueue_struct *work_queue; 321 struct xarray vports; 322 u32 flags; 323 int total_vports; 324 int enabled_vports; 325 /* Synchronize between vport change events 326 * and async SRIOV admin state changes 327 */ 328 struct mutex state_lock; 329 330 /* Protects eswitch mode change that occurs via one or more 331 * user commands, i.e. sriov state change, devlink commands. 332 */ 333 struct rw_semaphore mode_lock; 334 atomic64_t user_count; 335 336 struct { 337 u32 root_tsar_ix; 338 struct mlx5_esw_rate_group *group0; 339 struct list_head groups; /* Protected by esw->state_lock */ 340 341 /* Protected by esw->state_lock. 342 * Initially 0, meaning no QoS users and QoS is disabled. 343 */ 344 refcount_t refcnt; 345 } qos; 346 347 struct mlx5_esw_bridge_offloads *br_offloads; 348 struct mlx5_esw_offload offloads; 349 int mode; 350 u16 manager_vport; 351 u16 first_host_vport; 352 u8 num_peers; 353 struct mlx5_esw_functions esw_funcs; 354 struct { 355 u32 large_group_num; 356 } params; 357 struct blocking_notifier_head n_head; 358 struct xarray paired; 359 struct mlx5_devcom_comp_dev *devcom; 360 }; 361 362 void esw_offloads_disable(struct mlx5_eswitch *esw); 363 int esw_offloads_enable(struct mlx5_eswitch *esw); 364 void esw_offloads_cleanup(struct mlx5_eswitch *esw); 365 int esw_offloads_init(struct mlx5_eswitch *esw); 366 367 struct mlx5_flow_handle * 368 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num); 369 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule); 370 371 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); 372 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); 373 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); 374 375 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); 376 377 /* E-Switch API */ 378 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 379 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 380 381 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) 382 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs); 383 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); 384 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf); 385 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw); 386 void mlx5_eswitch_disable(struct mlx5_eswitch *esw); 387 void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key); 388 void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw); 389 bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw); 390 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 391 u16 vport, const u8 *mac); 392 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 393 u16 vport, int link_state); 394 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 395 u16 vport, u16 vlan, u8 qos); 396 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 397 u16 vport, bool spoofchk); 398 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 399 u16 vport_num, bool setting); 400 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 401 u32 max_rate, u32 min_rate); 402 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, 403 struct mlx5_vport *vport, 404 struct mlx5_esw_rate_group *group, 405 struct netlink_ext_ack *extack); 406 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 407 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 408 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 409 u16 vport, struct ifla_vf_info *ivi); 410 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 411 u16 vport, 412 struct ifla_vf_stats *vf_stats); 413 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 414 415 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 416 bool other_vport, void *in); 417 418 struct mlx5_flow_spec; 419 struct mlx5_esw_flow_attr; 420 struct mlx5_termtbl_handle; 421 422 bool 423 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 424 struct mlx5_flow_attr *attr, 425 struct mlx5_flow_act *flow_act, 426 struct mlx5_flow_spec *spec); 427 428 struct mlx5_flow_handle * 429 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 430 struct mlx5_flow_table *ft, 431 struct mlx5_flow_spec *spec, 432 struct mlx5_esw_flow_attr *attr, 433 struct mlx5_flow_act *flow_act, 434 struct mlx5_flow_destination *dest, 435 int num_dest); 436 437 void 438 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 439 struct mlx5_termtbl_handle *tt); 440 441 void 442 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec); 443 444 struct mlx5_flow_handle * 445 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 446 struct mlx5_flow_spec *spec, 447 struct mlx5_flow_attr *attr); 448 struct mlx5_flow_handle * 449 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 450 struct mlx5_flow_spec *spec, 451 struct mlx5_flow_attr *attr); 452 void 453 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 454 struct mlx5_flow_handle *rule, 455 struct mlx5_flow_attr *attr); 456 void 457 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 458 struct mlx5_flow_handle *rule, 459 struct mlx5_flow_attr *attr); 460 461 struct mlx5_flow_handle * 462 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 463 struct mlx5_flow_destination *dest); 464 465 enum { 466 SET_VLAN_STRIP = BIT(0), 467 SET_VLAN_INSERT = BIT(1) 468 }; 469 470 enum mlx5_flow_match_level { 471 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 472 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 473 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 474 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 475 }; 476 477 /* current maximum for flow based vport multicasting */ 478 #define MLX5_MAX_FLOW_FWD_VPORTS 32 479 480 enum { 481 MLX5_ESW_DEST_ENCAP = BIT(0), 482 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 483 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), 484 }; 485 486 struct mlx5_esw_flow_attr { 487 struct mlx5_eswitch_rep *in_rep; 488 struct mlx5_core_dev *in_mdev; 489 struct mlx5_core_dev *counter_dev; 490 struct mlx5e_tc_int_port *dest_int_port; 491 struct mlx5e_tc_int_port *int_port; 492 493 int split_count; 494 int out_count; 495 496 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 497 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 498 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 499 u8 total_vlan; 500 struct { 501 u32 flags; 502 struct mlx5_eswitch_rep *rep; 503 struct mlx5_pkt_reformat *pkt_reformat; 504 struct mlx5_core_dev *mdev; 505 struct mlx5_termtbl_handle *termtbl; 506 int src_port_rewrite_act_id; 507 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 508 struct mlx5_rx_tun_attr *rx_tun_attr; 509 struct ethhdr eth; 510 struct mlx5_pkt_reformat *decap_pkt_reformat; 511 }; 512 513 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 514 struct netlink_ext_ack *extack); 515 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 516 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 517 struct netlink_ext_ack *extack); 518 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 519 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 520 enum devlink_eswitch_encap_mode encap, 521 struct netlink_ext_ack *extack); 522 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 523 enum devlink_eswitch_encap_mode *encap); 524 int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port, 525 u8 *hw_addr, int *hw_addr_len, 526 struct netlink_ext_ack *extack); 527 int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port, 528 const u8 *hw_addr, int hw_addr_len, 529 struct netlink_ext_ack *extack); 530 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled, 531 struct netlink_ext_ack *extack); 532 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable, 533 struct netlink_ext_ack *extack); 534 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled, 535 struct netlink_ext_ack *extack); 536 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable, 537 struct netlink_ext_ack *extack); 538 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 539 540 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 541 u16 vport, u16 vlan, u8 qos, u8 set_flags); 542 543 static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw) 544 { 545 return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) && 546 MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan)); 547 } 548 549 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 550 u8 vlan_depth) 551 { 552 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 553 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 554 555 if (vlan_depth == 1) 556 return ret; 557 558 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 559 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 560 } 561 562 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 563 struct mlx5_core_dev *dev1); 564 565 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 566 567 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 568 569 #define esw_info(__dev, format, ...) \ 570 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 571 572 #define esw_warn(__dev, format, ...) \ 573 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 574 575 #define esw_debug(dev, format, ...) \ 576 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 577 578 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) 579 { 580 return esw && MLX5_ESWITCH_MANAGER(esw->dev); 581 } 582 583 /* The returned number is valid only when the dev is eswitch manager. */ 584 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) 585 { 586 return mlx5_core_is_ecpf_esw_manager(dev) ? 587 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 588 } 589 590 static inline bool 591 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 592 { 593 return esw->manager_vport == vport_num; 594 } 595 596 static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num, 597 u16 esw_owner_vhca_id) 598 { 599 return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) || 600 (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev)); 601 } 602 603 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 604 { 605 return mlx5_core_is_ecpf_esw_manager(dev) ? 606 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 607 } 608 609 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) 610 { 611 return mlx5_core_is_ecpf_esw_manager(dev); 612 } 613 614 static inline unsigned int 615 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 616 u16 vport_num) 617 { 618 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; 619 } 620 621 static inline u16 622 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) 623 { 624 return dl_port_index & 0xffff; 625 } 626 627 static inline bool mlx5_esw_is_fdb_created(struct mlx5_eswitch *esw) 628 { 629 return esw->fdb_table.flags & MLX5_ESW_FDB_CREATED; 630 } 631 632 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 633 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 634 635 /* Each mark identifies eswitch vport type. 636 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using 637 * a single mark. 638 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport. 639 * MLX5_ESW_VPT_SF identifies SF vport. 640 */ 641 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0 642 #define MLX5_ESW_VPT_VF XA_MARK_1 643 #define MLX5_ESW_VPT_SF XA_MARK_2 644 645 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init. 646 * Borrowed the idea from xa_for_each_marked() but with support for desired last element. 647 */ 648 649 #define mlx5_esw_for_each_vport(esw, index, vport) \ 650 xa_for_each(&((esw)->vports), index, vport) 651 652 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ 653 for (index = 0, entry = xa_find(xa, &index, last, filter); \ 654 entry; entry = xa_find_after(xa, &index, last, filter)) 655 656 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \ 657 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter) 658 659 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \ 660 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF) 661 662 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \ 663 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN) 664 665 /* This macro should only be used if EC SRIOV is enabled. 666 * 667 * Because there were no more marks available on the xarray this uses a 668 * for_each_range approach. The range is only valid when EC SRIOV is enabled 669 */ 670 #define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \ 671 xa_for_each_range(&((esw)->vports), \ 672 index, \ 673 vport, \ 674 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \ 675 MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\ 676 (last) - 1) 677 678 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); 679 struct mlx5_vport *__must_check 680 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 681 682 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); 683 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); 684 685 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 686 687 int 688 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 689 enum mlx5_eswitch_vport_event enabled_events); 690 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 691 692 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, 693 enum mlx5_eswitch_vport_event enabled_events); 694 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); 695 696 int 697 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 698 struct mlx5_vport *vport); 699 void 700 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 701 struct mlx5_vport *vport); 702 703 struct esw_vport_tbl_namespace { 704 int max_fte; 705 int max_num_groups; 706 u32 flags; 707 }; 708 709 struct mlx5_vport_tbl_attr { 710 u32 chain; 711 u16 prio; 712 u16 vport; 713 struct esw_vport_tbl_namespace *vport_ns; 714 }; 715 716 struct mlx5_flow_table * 717 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 718 void 719 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 720 721 struct mlx5_flow_handle * 722 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 723 724 void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw, 725 u32 *flow_group_in, 726 int match_params); 727 728 void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw, 729 u16 vport, 730 struct mlx5_flow_spec *spec); 731 732 int mlx5_esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); 733 void mlx5_esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); 734 735 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 736 enum mlx5_eswitch_vport_event enabled_events); 737 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); 738 739 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num); 740 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); 741 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); 742 743 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 744 u16 vport_num, u32 controller, u32 sfnum); 745 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); 746 747 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 748 u16 vport_num, u32 controller, u32 sfnum); 749 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); 750 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); 751 752 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); 753 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); 754 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); 755 756 /** 757 * mlx5_esw_event_info - Indicates eswitch mode changed/changing. 758 * 759 * @new_mode: New mode of eswitch. 760 */ 761 struct mlx5_esw_event_info { 762 u16 new_mode; 763 }; 764 765 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); 766 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); 767 768 bool mlx5_esw_hold(struct mlx5_core_dev *dev); 769 void mlx5_esw_release(struct mlx5_core_dev *dev); 770 void mlx5_esw_get(struct mlx5_core_dev *dev); 771 void mlx5_esw_put(struct mlx5_core_dev *dev); 772 int mlx5_esw_try_lock(struct mlx5_eswitch *esw); 773 void mlx5_esw_unlock(struct mlx5_eswitch *esw); 774 775 void esw_vport_change_handle_locked(struct mlx5_vport *vport); 776 777 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); 778 779 int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 780 struct mlx5_eswitch *slave_esw, int max_slaves); 781 void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 782 struct mlx5_eswitch *slave_esw); 783 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); 784 785 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); 786 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); 787 788 int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev); 789 void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err); 790 void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev); 791 void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev); 792 793 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) 794 { 795 if (mlx5_esw_allowed(esw)) 796 return esw->esw_funcs.num_vfs; 797 798 return 0; 799 } 800 801 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) 802 { 803 if (mlx5_esw_allowed(esw)) 804 return esw->num_peers; 805 return 0; 806 } 807 808 static inline struct mlx5_flow_table * 809 mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw) 810 { 811 return esw->fdb_table.offloads.slow_fdb; 812 } 813 814 #else /* CONFIG_MLX5_ESWITCH */ 815 /* eswitch API stubs */ 816 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 817 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 818 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } 819 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {} 820 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {} 821 static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw, u64 key) {} 822 static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {} 823 static inline bool mlx5_esw_offloads_devcom_is_ready(struct mlx5_eswitch *esw) { return false; } 824 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 825 static inline 826 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } 827 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 828 { 829 return ERR_PTR(-EOPNOTSUPP); 830 } 831 832 static inline struct mlx5_flow_handle * 833 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 834 { 835 return ERR_PTR(-EOPNOTSUPP); 836 } 837 838 static inline unsigned int 839 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 840 u16 vport_num) 841 { 842 return vport_num; 843 } 844 845 static inline int 846 mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw, 847 struct mlx5_eswitch *slave_esw, int max_slaves) 848 { 849 return 0; 850 } 851 852 static inline void 853 mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw, 854 struct mlx5_eswitch *slave_esw) {} 855 856 static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; } 857 858 static inline int 859 mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) 860 { 861 return 0; 862 } 863 864 static inline bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev) 865 { 866 return true; 867 } 868 869 static inline void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev) 870 { 871 } 872 873 static inline int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) { return 0; } 874 875 static inline void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) {} 876 877 static inline void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) {} 878 879 static inline void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) {} 880 #endif /* CONFIG_MLX5_ESWITCH */ 881 882 #endif /* __MLX5_ESWITCH_H__ */ 883