1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <linux/xarray.h> 40 #include <net/devlink.h> 41 #include <linux/mlx5/device.h> 42 #include <linux/mlx5/eswitch.h> 43 #include <linux/mlx5/vport.h> 44 #include <linux/mlx5/fs.h> 45 #include "lib/mpfs.h" 46 #include "lib/fs_chains.h" 47 #include "sf/sf.h" 48 #include "en/tc_ct.h" 49 #include "en/tc/sample.h" 50 51 enum mlx5_mapped_obj_type { 52 MLX5_MAPPED_OBJ_CHAIN, 53 MLX5_MAPPED_OBJ_SAMPLE, 54 MLX5_MAPPED_OBJ_INT_PORT_METADATA, 55 }; 56 57 struct mlx5_mapped_obj { 58 enum mlx5_mapped_obj_type type; 59 union { 60 u32 chain; 61 struct { 62 u32 group_id; 63 u32 rate; 64 u32 trunc_size; 65 u32 tunnel_id; 66 } sample; 67 u32 int_port_metadata; 68 }; 69 }; 70 71 #ifdef CONFIG_MLX5_ESWITCH 72 73 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 74 75 #define MLX5_MAX_UC_PER_VPORT(dev) \ 76 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 77 78 #define MLX5_MAX_MC_PER_VPORT(dev) \ 79 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 80 81 #define mlx5_esw_has_fwd_fdb(dev) \ 82 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 83 84 #define esw_chains(esw) \ 85 ((esw)->fdb_table.offloads.esw_chains_priv) 86 87 enum { 88 MAPPING_TYPE_CHAIN, 89 MAPPING_TYPE_TUNNEL, 90 MAPPING_TYPE_TUNNEL_ENC_OPTS, 91 MAPPING_TYPE_LABELS, 92 MAPPING_TYPE_ZONE, 93 MAPPING_TYPE_INT_PORT, 94 }; 95 96 struct vport_ingress { 97 struct mlx5_flow_table *acl; 98 struct mlx5_flow_handle *allow_rule; 99 struct { 100 struct mlx5_flow_group *allow_spoofchk_only_grp; 101 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 102 struct mlx5_flow_group *allow_untagged_only_grp; 103 struct mlx5_flow_group *drop_grp; 104 struct mlx5_flow_handle *drop_rule; 105 struct mlx5_fc *drop_counter; 106 } legacy; 107 struct { 108 /* Optional group to add an FTE to do internal priority 109 * tagging on ingress packets. 110 */ 111 struct mlx5_flow_group *metadata_prio_tag_grp; 112 /* Group to add default match-all FTE entry to tag ingress 113 * packet with metadata. 114 */ 115 struct mlx5_flow_group *metadata_allmatch_grp; 116 struct mlx5_modify_hdr *modify_metadata; 117 struct mlx5_flow_handle *modify_metadata_rule; 118 } offloads; 119 }; 120 121 struct vport_egress { 122 struct mlx5_flow_table *acl; 123 struct mlx5_flow_handle *allowed_vlan; 124 struct mlx5_flow_group *vlan_grp; 125 union { 126 struct { 127 struct mlx5_flow_group *drop_grp; 128 struct mlx5_flow_handle *drop_rule; 129 struct mlx5_fc *drop_counter; 130 } legacy; 131 struct { 132 struct mlx5_flow_group *fwd_grp; 133 struct mlx5_flow_handle *fwd_rule; 134 struct mlx5_flow_handle *bounce_rule; 135 struct mlx5_flow_group *bounce_grp; 136 } offloads; 137 }; 138 }; 139 140 struct mlx5_vport_drop_stats { 141 u64 rx_dropped; 142 u64 tx_dropped; 143 }; 144 145 struct mlx5_vport_info { 146 u8 mac[ETH_ALEN]; 147 u16 vlan; 148 u64 node_guid; 149 int link_state; 150 u8 qos; 151 u8 spoofchk: 1; 152 u8 trusted: 1; 153 }; 154 155 /* Vport context events */ 156 enum mlx5_eswitch_vport_event { 157 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 158 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 159 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 160 }; 161 162 struct mlx5_vport { 163 struct mlx5_core_dev *dev; 164 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 165 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 166 struct mlx5_flow_handle *promisc_rule; 167 struct mlx5_flow_handle *allmulti_rule; 168 struct work_struct vport_change_handler; 169 170 struct vport_ingress ingress; 171 struct vport_egress egress; 172 u32 default_metadata; 173 u32 metadata; 174 175 struct mlx5_vport_info info; 176 177 struct { 178 bool enabled; 179 u32 esw_tsar_ix; 180 u32 bw_share; 181 u32 min_rate; 182 u32 max_rate; 183 struct mlx5_esw_rate_group *group; 184 } qos; 185 186 u16 vport; 187 bool enabled; 188 enum mlx5_eswitch_vport_event enabled_events; 189 int index; 190 struct devlink_port *dl_port; 191 }; 192 193 struct mlx5_esw_indir_table; 194 195 struct mlx5_eswitch_fdb { 196 union { 197 struct legacy_fdb { 198 struct mlx5_flow_table *fdb; 199 struct mlx5_flow_group *addr_grp; 200 struct mlx5_flow_group *allmulti_grp; 201 struct mlx5_flow_group *promisc_grp; 202 struct mlx5_flow_table *vepa_fdb; 203 struct mlx5_flow_handle *vepa_uplink_rule; 204 struct mlx5_flow_handle *vepa_star_rule; 205 } legacy; 206 207 struct offloads_fdb { 208 struct mlx5_flow_namespace *ns; 209 struct mlx5_flow_table *tc_miss_table; 210 struct mlx5_flow_table *slow_fdb; 211 struct mlx5_flow_group *send_to_vport_grp; 212 struct mlx5_flow_group *send_to_vport_meta_grp; 213 struct mlx5_flow_group *peer_miss_grp; 214 struct mlx5_flow_handle **peer_miss_rules; 215 struct mlx5_flow_group *miss_grp; 216 struct mlx5_flow_handle **send_to_vport_meta_rules; 217 struct mlx5_flow_handle *miss_rule_uni; 218 struct mlx5_flow_handle *miss_rule_multi; 219 int vlan_push_pop_refcount; 220 221 struct mlx5_fs_chains *esw_chains_priv; 222 struct { 223 DECLARE_HASHTABLE(table, 8); 224 /* Protects vports.table */ 225 struct mutex lock; 226 } vports; 227 228 struct mlx5_esw_indir_table *indir; 229 230 } offloads; 231 }; 232 u32 flags; 233 }; 234 235 struct mlx5_esw_offload { 236 struct mlx5_flow_table *ft_offloads_restore; 237 struct mlx5_flow_group *restore_group; 238 struct mlx5_modify_hdr *restore_copy_hdr_id; 239 struct mapping_ctx *reg_c0_obj_pool; 240 241 struct mlx5_flow_table *ft_offloads; 242 struct mlx5_flow_group *vport_rx_group; 243 struct xarray vport_reps; 244 struct list_head peer_flows; 245 struct mutex peer_mutex; 246 struct mutex encap_tbl_lock; /* protects encap_tbl */ 247 DECLARE_HASHTABLE(encap_tbl, 8); 248 struct mutex decap_tbl_lock; /* protects decap_tbl */ 249 DECLARE_HASHTABLE(decap_tbl, 8); 250 struct mod_hdr_tbl mod_hdr; 251 DECLARE_HASHTABLE(termtbl_tbl, 8); 252 struct mutex termtbl_mutex; /* protects termtbl hash */ 253 struct xarray vhca_map; 254 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 255 u8 inline_mode; 256 atomic64_t num_flows; 257 enum devlink_eswitch_encap_mode encap; 258 struct ida vport_metadata_ida; 259 unsigned int host_number; /* ECPF supports one external host */ 260 }; 261 262 /* E-Switch MC FDB table hash node */ 263 struct esw_mc_addr { /* SRIOV only */ 264 struct l2addr_node node; 265 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 266 u32 refcnt; 267 }; 268 269 struct mlx5_host_work { 270 struct work_struct work; 271 struct mlx5_eswitch *esw; 272 }; 273 274 struct mlx5_esw_functions { 275 struct mlx5_nb nb; 276 u16 num_vfs; 277 }; 278 279 enum { 280 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 281 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), 282 }; 283 284 struct mlx5_esw_bridge_offloads; 285 286 struct mlx5_eswitch { 287 struct mlx5_core_dev *dev; 288 struct mlx5_nb nb; 289 struct mlx5_eswitch_fdb fdb_table; 290 /* legacy data structures */ 291 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 292 struct esw_mc_addr mc_promisc; 293 /* end of legacy */ 294 struct workqueue_struct *work_queue; 295 struct xarray vports; 296 u32 flags; 297 int total_vports; 298 int enabled_vports; 299 /* Synchronize between vport change events 300 * and async SRIOV admin state changes 301 */ 302 struct mutex state_lock; 303 304 /* Protects eswitch mode change that occurs via one or more 305 * user commands, i.e. sriov state change, devlink commands. 306 */ 307 struct rw_semaphore mode_lock; 308 atomic64_t user_count; 309 310 struct { 311 bool enabled; 312 u32 root_tsar_ix; 313 struct mlx5_esw_rate_group *group0; 314 struct list_head groups; /* Protected by esw->state_lock */ 315 } qos; 316 317 struct mlx5_esw_bridge_offloads *br_offloads; 318 struct mlx5_esw_offload offloads; 319 int mode; 320 u16 manager_vport; 321 u16 first_host_vport; 322 struct mlx5_esw_functions esw_funcs; 323 struct { 324 u32 large_group_num; 325 } params; 326 struct blocking_notifier_head n_head; 327 struct lock_class_key mode_lock_key; 328 }; 329 330 void esw_offloads_disable(struct mlx5_eswitch *esw); 331 int esw_offloads_enable(struct mlx5_eswitch *esw); 332 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); 333 int esw_offloads_init_reps(struct mlx5_eswitch *esw); 334 335 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); 336 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); 337 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); 338 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); 339 340 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); 341 342 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw); 343 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); 344 345 /* E-Switch API */ 346 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 347 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 348 349 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) 350 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs); 351 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); 352 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); 353 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); 354 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 355 u16 vport, const u8 *mac); 356 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 357 u16 vport, int link_state); 358 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 359 u16 vport, u16 vlan, u8 qos); 360 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 361 u16 vport, bool spoofchk); 362 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 363 u16 vport_num, bool setting); 364 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 365 u32 max_rate, u32 min_rate); 366 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, 367 struct mlx5_vport *vport, 368 struct mlx5_esw_rate_group *group, 369 struct netlink_ext_ack *extack); 370 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 371 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 372 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 373 u16 vport, struct ifla_vf_info *ivi); 374 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 375 u16 vport, 376 struct ifla_vf_stats *vf_stats); 377 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 378 379 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 380 bool other_vport, void *in); 381 382 struct mlx5_flow_spec; 383 struct mlx5_esw_flow_attr; 384 struct mlx5_termtbl_handle; 385 386 bool 387 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 388 struct mlx5_flow_attr *attr, 389 struct mlx5_flow_act *flow_act, 390 struct mlx5_flow_spec *spec); 391 392 struct mlx5_flow_handle * 393 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 394 struct mlx5_flow_table *ft, 395 struct mlx5_flow_spec *spec, 396 struct mlx5_esw_flow_attr *attr, 397 struct mlx5_flow_act *flow_act, 398 struct mlx5_flow_destination *dest, 399 int num_dest); 400 401 void 402 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 403 struct mlx5_termtbl_handle *tt); 404 405 void 406 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec); 407 408 struct mlx5_flow_handle * 409 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 410 struct mlx5_flow_spec *spec, 411 struct mlx5_flow_attr *attr); 412 struct mlx5_flow_handle * 413 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 414 struct mlx5_flow_spec *spec, 415 struct mlx5_flow_attr *attr); 416 void 417 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 418 struct mlx5_flow_handle *rule, 419 struct mlx5_flow_attr *attr); 420 void 421 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 422 struct mlx5_flow_handle *rule, 423 struct mlx5_flow_attr *attr); 424 425 struct mlx5_flow_handle * 426 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 427 struct mlx5_flow_destination *dest); 428 429 enum { 430 SET_VLAN_STRIP = BIT(0), 431 SET_VLAN_INSERT = BIT(1) 432 }; 433 434 enum mlx5_flow_match_level { 435 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 436 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 437 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 438 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 439 }; 440 441 /* current maximum for flow based vport multicasting */ 442 #define MLX5_MAX_FLOW_FWD_VPORTS 32 443 444 enum { 445 MLX5_ESW_DEST_ENCAP = BIT(0), 446 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 447 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), 448 }; 449 450 enum { 451 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), 452 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), 453 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), 454 MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3), 455 MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4), 456 MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5), 457 }; 458 459 /* Returns true if any of the flags that require skipping further TC/NF processing are set. */ 460 static inline bool 461 mlx5_esw_attr_flags_skip(u32 attr_flags) 462 { 463 return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT); 464 } 465 466 struct mlx5_esw_flow_attr { 467 struct mlx5_eswitch_rep *in_rep; 468 struct mlx5_core_dev *in_mdev; 469 struct mlx5_core_dev *counter_dev; 470 struct mlx5e_tc_int_port *dest_int_port; 471 struct mlx5e_tc_int_port *int_port; 472 473 int split_count; 474 int out_count; 475 476 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 477 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 478 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 479 u8 total_vlan; 480 struct { 481 u32 flags; 482 struct mlx5_eswitch_rep *rep; 483 struct mlx5_pkt_reformat *pkt_reformat; 484 struct mlx5_core_dev *mdev; 485 struct mlx5_termtbl_handle *termtbl; 486 int src_port_rewrite_act_id; 487 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 488 struct mlx5_rx_tun_attr *rx_tun_attr; 489 struct mlx5_pkt_reformat *decap_pkt_reformat; 490 }; 491 492 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 493 struct netlink_ext_ack *extack); 494 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 495 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 496 struct netlink_ext_ack *extack); 497 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 498 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 499 enum devlink_eswitch_encap_mode encap, 500 struct netlink_ext_ack *extack); 501 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 502 enum devlink_eswitch_encap_mode *encap); 503 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, 504 u8 *hw_addr, int *hw_addr_len, 505 struct netlink_ext_ack *extack); 506 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, 507 const u8 *hw_addr, int hw_addr_len, 508 struct netlink_ext_ack *extack); 509 510 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 511 512 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 513 struct mlx5_flow_attr *attr); 514 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 515 struct mlx5_flow_attr *attr); 516 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 517 u16 vport, u16 vlan, u8 qos, u8 set_flags); 518 519 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw) 520 { 521 return esw->qos.enabled; 522 } 523 524 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 525 u8 vlan_depth) 526 { 527 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 528 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 529 530 if (vlan_depth == 1) 531 return ret; 532 533 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 534 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 535 } 536 537 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, 538 struct mlx5_core_dev *dev1); 539 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 540 struct mlx5_core_dev *dev1); 541 542 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 543 544 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 545 546 #define esw_info(__dev, format, ...) \ 547 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 548 549 #define esw_warn(__dev, format, ...) \ 550 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 551 552 #define esw_debug(dev, format, ...) \ 553 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 554 555 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw) 556 { 557 return esw && MLX5_ESWITCH_MANAGER(esw->dev); 558 } 559 560 /* The returned number is valid only when the dev is eswitch manager. */ 561 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) 562 { 563 return mlx5_core_is_ecpf_esw_manager(dev) ? 564 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 565 } 566 567 static inline bool 568 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 569 { 570 return esw->manager_vport == vport_num; 571 } 572 573 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 574 { 575 return mlx5_core_is_ecpf_esw_manager(dev) ? 576 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 577 } 578 579 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) 580 { 581 return mlx5_core_is_ecpf_esw_manager(dev); 582 } 583 584 static inline unsigned int 585 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 586 u16 vport_num) 587 { 588 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; 589 } 590 591 static inline u16 592 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) 593 { 594 return dl_port_index & 0xffff; 595 } 596 597 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 598 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 599 600 /* Each mark identifies eswitch vport type. 601 * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using 602 * a single mark. 603 * MLX5_ESW_VPT_VF identifies a SRIOV VF vport. 604 * MLX5_ESW_VPT_SF identifies SF vport. 605 */ 606 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0 607 #define MLX5_ESW_VPT_VF XA_MARK_1 608 #define MLX5_ESW_VPT_SF XA_MARK_2 609 610 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init. 611 * Borrowed the idea from xa_for_each_marked() but with support for desired last element. 612 */ 613 614 #define mlx5_esw_for_each_vport(esw, index, vport) \ 615 xa_for_each(&((esw)->vports), index, vport) 616 617 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter) \ 618 for (index = 0, entry = xa_find(xa, &index, last, filter); \ 619 entry; entry = xa_find_after(xa, &index, last, filter)) 620 621 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter) \ 622 mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter) 623 624 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last) \ 625 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF) 626 627 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \ 628 mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN) 629 630 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); 631 struct mlx5_vport *__must_check 632 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 633 634 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num); 635 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num); 636 637 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 638 639 int 640 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 641 enum mlx5_eswitch_vport_event enabled_events); 642 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 643 644 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, 645 enum mlx5_eswitch_vport_event enabled_events); 646 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); 647 648 int 649 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 650 struct mlx5_vport *vport); 651 void 652 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 653 struct mlx5_vport *vport); 654 655 struct esw_vport_tbl_namespace { 656 int max_fte; 657 int max_num_groups; 658 u32 flags; 659 }; 660 661 struct mlx5_vport_tbl_attr { 662 u32 chain; 663 u16 prio; 664 u16 vport; 665 const struct esw_vport_tbl_namespace *vport_ns; 666 }; 667 668 struct mlx5_flow_table * 669 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 670 void 671 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr); 672 673 struct mlx5_flow_handle * 674 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 675 676 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); 677 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); 678 679 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num); 680 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num); 681 682 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 683 enum mlx5_eswitch_vport_event enabled_events); 684 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num); 685 686 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 687 enum mlx5_eswitch_vport_event enabled_events); 688 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); 689 690 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num); 691 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); 692 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); 693 694 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 695 u16 vport_num, u32 controller, u32 sfnum); 696 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); 697 698 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 699 u16 vport_num, u32 controller, u32 sfnum); 700 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); 701 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id); 702 703 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); 704 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); 705 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); 706 707 /** 708 * mlx5_esw_event_info - Indicates eswitch mode changed/changing. 709 * 710 * @new_mode: New mode of eswitch. 711 */ 712 struct mlx5_esw_event_info { 713 u16 new_mode; 714 }; 715 716 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); 717 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); 718 719 bool mlx5_esw_hold(struct mlx5_core_dev *dev); 720 void mlx5_esw_release(struct mlx5_core_dev *dev); 721 void mlx5_esw_get(struct mlx5_core_dev *dev); 722 void mlx5_esw_put(struct mlx5_core_dev *dev); 723 int mlx5_esw_try_lock(struct mlx5_eswitch *esw); 724 void mlx5_esw_unlock(struct mlx5_eswitch *esw); 725 void mlx5_esw_lock(struct mlx5_eswitch *esw); 726 727 void esw_vport_change_handle_locked(struct mlx5_vport *vport); 728 729 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); 730 731 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, 732 struct mlx5_eswitch *slave_esw); 733 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, 734 struct mlx5_eswitch *slave_esw); 735 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); 736 737 #else /* CONFIG_MLX5_ESWITCH */ 738 /* eswitch API stubs */ 739 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 740 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 741 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } 742 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} 743 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } 744 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 745 static inline 746 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } 747 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 748 { 749 return ERR_PTR(-EOPNOTSUPP); 750 } 751 752 static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; } 753 static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; } 754 755 static inline struct mlx5_flow_handle * 756 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 757 { 758 return ERR_PTR(-EOPNOTSUPP); 759 } 760 761 static inline unsigned int 762 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 763 u16 vport_num) 764 { 765 return vport_num; 766 } 767 768 static inline int 769 mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, 770 struct mlx5_eswitch *slave_esw) 771 { 772 return 0; 773 } 774 775 static inline void 776 mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, 777 struct mlx5_eswitch *slave_esw) {} 778 779 static inline int 780 mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) 781 { 782 return 0; 783 } 784 #endif /* CONFIG_MLX5_ESWITCH */ 785 786 #endif /* __MLX5_ESWITCH_H__ */ 787