1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <linux/xarray.h> 40 #include <net/devlink.h> 41 #include <linux/mlx5/device.h> 42 #include <linux/mlx5/eswitch.h> 43 #include <linux/mlx5/vport.h> 44 #include <linux/mlx5/fs.h> 45 #include "lib/mpfs.h" 46 #include "lib/fs_chains.h" 47 #include "sf/sf.h" 48 #include "en/tc_ct.h" 49 50 #ifdef CONFIG_MLX5_ESWITCH 51 52 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 53 54 #define MLX5_MAX_UC_PER_VPORT(dev) \ 55 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 56 57 #define MLX5_MAX_MC_PER_VPORT(dev) \ 58 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 59 60 #define MLX5_MIN_BW_SHARE 1 61 62 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ 63 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) 64 65 #define mlx5_esw_has_fwd_fdb(dev) \ 66 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 67 68 #define esw_chains(esw) \ 69 ((esw)->fdb_table.offloads.esw_chains_priv) 70 71 struct vport_ingress { 72 struct mlx5_flow_table *acl; 73 struct mlx5_flow_handle *allow_rule; 74 struct { 75 struct mlx5_flow_group *allow_spoofchk_only_grp; 76 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 77 struct mlx5_flow_group *allow_untagged_only_grp; 78 struct mlx5_flow_group *drop_grp; 79 struct mlx5_flow_handle *drop_rule; 80 struct mlx5_fc *drop_counter; 81 } legacy; 82 struct { 83 /* Optional group to add an FTE to do internal priority 84 * tagging on ingress packets. 85 */ 86 struct mlx5_flow_group *metadata_prio_tag_grp; 87 /* Group to add default match-all FTE entry to tag ingress 88 * packet with metadata. 89 */ 90 struct mlx5_flow_group *metadata_allmatch_grp; 91 struct mlx5_modify_hdr *modify_metadata; 92 struct mlx5_flow_handle *modify_metadata_rule; 93 } offloads; 94 }; 95 96 struct vport_egress { 97 struct mlx5_flow_table *acl; 98 struct mlx5_flow_handle *allowed_vlan; 99 struct mlx5_flow_group *vlan_grp; 100 union { 101 struct { 102 struct mlx5_flow_group *drop_grp; 103 struct mlx5_flow_handle *drop_rule; 104 struct mlx5_fc *drop_counter; 105 } legacy; 106 struct { 107 struct mlx5_flow_group *fwd_grp; 108 struct mlx5_flow_handle *fwd_rule; 109 } offloads; 110 }; 111 }; 112 113 struct mlx5_vport_drop_stats { 114 u64 rx_dropped; 115 u64 tx_dropped; 116 }; 117 118 struct mlx5_vport_info { 119 u8 mac[ETH_ALEN]; 120 u16 vlan; 121 u8 qos; 122 u64 node_guid; 123 int link_state; 124 u32 min_rate; 125 u32 max_rate; 126 bool spoofchk; 127 bool trusted; 128 }; 129 130 /* Vport context events */ 131 enum mlx5_eswitch_vport_event { 132 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 133 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 134 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 135 }; 136 137 struct mlx5_vport { 138 struct mlx5_core_dev *dev; 139 int vport; 140 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 141 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 142 struct mlx5_flow_handle *promisc_rule; 143 struct mlx5_flow_handle *allmulti_rule; 144 struct work_struct vport_change_handler; 145 146 struct vport_ingress ingress; 147 struct vport_egress egress; 148 u32 default_metadata; 149 u32 metadata; 150 151 struct mlx5_vport_info info; 152 153 struct { 154 bool enabled; 155 u32 esw_tsar_ix; 156 u32 bw_share; 157 } qos; 158 159 bool enabled; 160 enum mlx5_eswitch_vport_event enabled_events; 161 struct devlink_port *dl_port; 162 }; 163 164 struct mlx5_esw_indir_table; 165 166 struct mlx5_eswitch_fdb { 167 union { 168 struct legacy_fdb { 169 struct mlx5_flow_table *fdb; 170 struct mlx5_flow_group *addr_grp; 171 struct mlx5_flow_group *allmulti_grp; 172 struct mlx5_flow_group *promisc_grp; 173 struct mlx5_flow_table *vepa_fdb; 174 struct mlx5_flow_handle *vepa_uplink_rule; 175 struct mlx5_flow_handle *vepa_star_rule; 176 } legacy; 177 178 struct offloads_fdb { 179 struct mlx5_flow_namespace *ns; 180 struct mlx5_flow_table *slow_fdb; 181 struct mlx5_flow_group *send_to_vport_grp; 182 struct mlx5_flow_group *send_to_vport_meta_grp; 183 struct mlx5_flow_group *peer_miss_grp; 184 struct mlx5_flow_handle **peer_miss_rules; 185 struct mlx5_flow_group *miss_grp; 186 struct mlx5_flow_handle **send_to_vport_meta_rules; 187 struct mlx5_flow_handle *miss_rule_uni; 188 struct mlx5_flow_handle *miss_rule_multi; 189 int vlan_push_pop_refcount; 190 191 struct mlx5_fs_chains *esw_chains_priv; 192 struct { 193 DECLARE_HASHTABLE(table, 8); 194 /* Protects vports.table */ 195 struct mutex lock; 196 } vports; 197 198 struct mlx5_esw_indir_table *indir; 199 200 } offloads; 201 }; 202 u32 flags; 203 }; 204 205 struct mlx5_esw_offload { 206 struct mlx5_flow_table *ft_offloads_restore; 207 struct mlx5_flow_group *restore_group; 208 struct mlx5_modify_hdr *restore_copy_hdr_id; 209 210 struct mlx5_flow_table *ft_offloads; 211 struct mlx5_flow_group *vport_rx_group; 212 struct mlx5_eswitch_rep *vport_reps; 213 struct list_head peer_flows; 214 struct mutex peer_mutex; 215 struct mutex encap_tbl_lock; /* protects encap_tbl */ 216 DECLARE_HASHTABLE(encap_tbl, 8); 217 struct mutex decap_tbl_lock; /* protects decap_tbl */ 218 DECLARE_HASHTABLE(decap_tbl, 8); 219 struct mod_hdr_tbl mod_hdr; 220 DECLARE_HASHTABLE(termtbl_tbl, 8); 221 struct mutex termtbl_mutex; /* protects termtbl hash */ 222 struct xarray vhca_map; 223 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 224 u8 inline_mode; 225 atomic64_t num_flows; 226 enum devlink_eswitch_encap_mode encap; 227 struct ida vport_metadata_ida; 228 unsigned int host_number; /* ECPF supports one external host */ 229 }; 230 231 /* E-Switch MC FDB table hash node */ 232 struct esw_mc_addr { /* SRIOV only */ 233 struct l2addr_node node; 234 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 235 u32 refcnt; 236 }; 237 238 struct mlx5_host_work { 239 struct work_struct work; 240 struct mlx5_eswitch *esw; 241 }; 242 243 struct mlx5_esw_functions { 244 struct mlx5_nb nb; 245 u16 num_vfs; 246 }; 247 248 enum { 249 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 250 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), 251 }; 252 253 struct mlx5_eswitch { 254 struct mlx5_core_dev *dev; 255 struct mlx5_nb nb; 256 struct mlx5_eswitch_fdb fdb_table; 257 /* legacy data structures */ 258 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 259 struct esw_mc_addr mc_promisc; 260 /* end of legacy */ 261 struct workqueue_struct *work_queue; 262 struct mlx5_vport *vports; 263 u32 flags; 264 int total_vports; 265 int enabled_vports; 266 /* Synchronize between vport change events 267 * and async SRIOV admin state changes 268 */ 269 struct mutex state_lock; 270 271 /* Protects eswitch mode change that occurs via one or more 272 * user commands, i.e. sriov state change, devlink commands. 273 */ 274 struct mutex mode_lock; 275 276 struct { 277 bool enabled; 278 u32 root_tsar_id; 279 } qos; 280 281 struct mlx5_esw_offload offloads; 282 int mode; 283 u16 manager_vport; 284 u16 first_host_vport; 285 struct mlx5_esw_functions esw_funcs; 286 struct { 287 u32 large_group_num; 288 } params; 289 struct blocking_notifier_head n_head; 290 }; 291 292 void esw_offloads_disable(struct mlx5_eswitch *esw); 293 int esw_offloads_enable(struct mlx5_eswitch *esw); 294 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); 295 int esw_offloads_init_reps(struct mlx5_eswitch *esw); 296 297 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); 298 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); 299 300 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, 301 u32 rate_mbps); 302 303 /* E-Switch API */ 304 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 305 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 306 307 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) 308 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs); 309 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); 310 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); 311 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); 312 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 313 u16 vport, const u8 *mac); 314 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 315 u16 vport, int link_state); 316 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 317 u16 vport, u16 vlan, u8 qos); 318 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 319 u16 vport, bool spoofchk); 320 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 321 u16 vport_num, bool setting); 322 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 323 u32 max_rate, u32 min_rate); 324 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 325 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 326 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 327 u16 vport, struct ifla_vf_info *ivi); 328 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 329 u16 vport, 330 struct ifla_vf_stats *vf_stats); 331 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 332 333 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 334 bool other_vport, void *in); 335 336 struct mlx5_flow_spec; 337 struct mlx5_esw_flow_attr; 338 struct mlx5_termtbl_handle; 339 340 bool 341 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 342 struct mlx5_flow_attr *attr, 343 struct mlx5_flow_act *flow_act, 344 struct mlx5_flow_spec *spec); 345 346 struct mlx5_flow_handle * 347 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 348 struct mlx5_flow_table *ft, 349 struct mlx5_flow_spec *spec, 350 struct mlx5_esw_flow_attr *attr, 351 struct mlx5_flow_act *flow_act, 352 struct mlx5_flow_destination *dest, 353 int num_dest); 354 355 void 356 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 357 struct mlx5_termtbl_handle *tt); 358 359 struct mlx5_flow_handle * 360 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 361 struct mlx5_flow_spec *spec, 362 struct mlx5_flow_attr *attr); 363 struct mlx5_flow_handle * 364 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 365 struct mlx5_flow_spec *spec, 366 struct mlx5_flow_attr *attr); 367 void 368 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 369 struct mlx5_flow_handle *rule, 370 struct mlx5_flow_attr *attr); 371 void 372 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 373 struct mlx5_flow_handle *rule, 374 struct mlx5_flow_attr *attr); 375 376 struct mlx5_flow_handle * 377 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 378 struct mlx5_flow_destination *dest); 379 380 enum { 381 SET_VLAN_STRIP = BIT(0), 382 SET_VLAN_INSERT = BIT(1) 383 }; 384 385 enum mlx5_flow_match_level { 386 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 387 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 388 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 389 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 390 }; 391 392 /* current maximum for flow based vport multicasting */ 393 #define MLX5_MAX_FLOW_FWD_VPORTS 2 394 395 enum { 396 MLX5_ESW_DEST_ENCAP = BIT(0), 397 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 398 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2), 399 }; 400 401 enum { 402 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), 403 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), 404 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), 405 MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3), 406 }; 407 408 struct mlx5_esw_flow_attr { 409 struct mlx5_eswitch_rep *in_rep; 410 struct mlx5_core_dev *in_mdev; 411 struct mlx5_core_dev *counter_dev; 412 413 int split_count; 414 int out_count; 415 416 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 417 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 418 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 419 u8 total_vlan; 420 struct { 421 u32 flags; 422 struct mlx5_eswitch_rep *rep; 423 struct mlx5_pkt_reformat *pkt_reformat; 424 struct mlx5_core_dev *mdev; 425 struct mlx5_termtbl_handle *termtbl; 426 int src_port_rewrite_act_id; 427 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 428 struct mlx5_rx_tun_attr *rx_tun_attr; 429 struct mlx5_pkt_reformat *decap_pkt_reformat; 430 }; 431 432 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 433 struct netlink_ext_ack *extack); 434 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 435 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 436 struct netlink_ext_ack *extack); 437 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 438 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 439 enum devlink_eswitch_encap_mode encap, 440 struct netlink_ext_ack *extack); 441 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 442 enum devlink_eswitch_encap_mode *encap); 443 int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, 444 struct devlink_port *port, 445 u8 *hw_addr, int *hw_addr_len, 446 struct netlink_ext_ack *extack); 447 int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, 448 struct devlink_port *port, 449 const u8 *hw_addr, int hw_addr_len, 450 struct netlink_ext_ack *extack); 451 452 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 453 454 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 455 struct mlx5_flow_attr *attr); 456 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 457 struct mlx5_flow_attr *attr); 458 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 459 u16 vport, u16 vlan, u8 qos, u8 set_flags); 460 461 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw) 462 { 463 return esw->qos.enabled; 464 } 465 466 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 467 u8 vlan_depth) 468 { 469 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 470 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 471 472 if (vlan_depth == 1) 473 return ret; 474 475 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 476 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 477 } 478 479 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, 480 struct mlx5_core_dev *dev1); 481 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 482 struct mlx5_core_dev *dev1); 483 484 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 485 486 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 487 488 #define esw_info(__dev, format, ...) \ 489 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 490 491 #define esw_warn(__dev, format, ...) \ 492 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 493 494 #define esw_debug(dev, format, ...) \ 495 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 496 497 /* The returned number is valid only when the dev is eswitch manager. */ 498 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) 499 { 500 return mlx5_core_is_ecpf_esw_manager(dev) ? 501 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 502 } 503 504 static inline bool 505 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 506 { 507 return esw->manager_vport == vport_num; 508 } 509 510 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 511 { 512 return mlx5_core_is_ecpf_esw_manager(dev) ? 513 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 514 } 515 516 static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw) 517 { 518 /* PF and VF vports indices start from 0 to max_vfs */ 519 return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev); 520 } 521 522 static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw) 523 { 524 return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev); 525 } 526 527 static inline int 528 mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num) 529 { 530 return vport_num - mlx5_sf_start_function_id(esw->dev) + 531 MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev); 532 } 533 534 static inline u16 535 mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx) 536 { 537 return mlx5_sf_start_function_id(esw->dev) + idx - 538 (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev)); 539 } 540 541 static inline bool 542 mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num) 543 { 544 return mlx5_sf_supported(esw->dev) && 545 vport_num >= mlx5_sf_start_function_id(esw->dev) && 546 (vport_num < (mlx5_sf_start_function_id(esw->dev) + 547 mlx5_sf_max_functions(esw->dev))); 548 } 549 550 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) 551 { 552 return mlx5_core_is_ecpf_esw_manager(dev); 553 } 554 555 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) 556 { 557 /* Uplink always locate at the last element of the array.*/ 558 return esw->total_vports - 1; 559 } 560 561 static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw) 562 { 563 return esw->total_vports - 2; 564 } 565 566 static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, 567 u16 vport_num) 568 { 569 if (vport_num == MLX5_VPORT_ECPF) { 570 if (!mlx5_ecpf_vport_exists(esw->dev)) 571 esw_warn(esw->dev, "ECPF vport doesn't exist!\n"); 572 return mlx5_eswitch_ecpf_idx(esw); 573 } 574 575 if (vport_num == MLX5_VPORT_UPLINK) 576 return mlx5_eswitch_uplink_idx(esw); 577 578 if (mlx5_esw_is_sf_vport(esw, vport_num)) 579 return mlx5_esw_sf_vport_num_to_index(esw, vport_num); 580 581 /* PF and VF vports start from 0 to max_vfs */ 582 return vport_num; 583 } 584 585 static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, 586 int index) 587 { 588 if (index == mlx5_eswitch_ecpf_idx(esw) && 589 mlx5_ecpf_vport_exists(esw->dev)) 590 return MLX5_VPORT_ECPF; 591 592 if (index == mlx5_eswitch_uplink_idx(esw)) 593 return MLX5_VPORT_UPLINK; 594 595 /* SF vports indices are after VFs and before ECPF */ 596 if (mlx5_sf_supported(esw->dev) && 597 index > mlx5_core_max_vfs(esw->dev)) 598 return mlx5_esw_sf_vport_index_to_num(esw, index); 599 600 /* PF and VF vports start from 0 to max_vfs */ 601 return index; 602 } 603 604 static inline unsigned int 605 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, 606 u16 vport_num) 607 { 608 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; 609 } 610 611 static inline u16 612 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) 613 { 614 return dl_port_index & 0xffff; 615 } 616 617 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 618 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 619 620 /* The vport getter/iterator are only valid after esw->total_vports 621 * and vport->vport are initialized in mlx5_eswitch_init. 622 */ 623 #define mlx5_esw_for_all_vports(esw, i, vport) \ 624 for ((i) = MLX5_VPORT_PF; \ 625 (vport) = &(esw)->vports[i], \ 626 (i) < (esw)->total_vports; (i)++) 627 628 #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ 629 for ((i) = (esw)->total_vports - 1; \ 630 (vport) = &(esw)->vports[i], \ 631 (i) >= MLX5_VPORT_PF; (i)--) 632 633 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ 634 for ((i) = MLX5_VPORT_FIRST_VF; \ 635 (vport) = &(esw)->vports[(i)], \ 636 (i) <= (nvfs); (i)++) 637 638 #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \ 639 for ((i) = (nvfs); \ 640 (vport) = &(esw)->vports[(i)], \ 641 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 642 643 /* The rep getter/iterator are only valid after esw->total_vports 644 * and vport->vport are initialized in mlx5_eswitch_init. 645 */ 646 #define mlx5_esw_for_all_reps(esw, i, rep) \ 647 for ((i) = MLX5_VPORT_PF; \ 648 (rep) = &(esw)->offloads.vport_reps[i], \ 649 (i) < (esw)->total_vports; (i)++) 650 651 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ 652 for ((i) = MLX5_VPORT_FIRST_VF; \ 653 (rep) = &(esw)->offloads.vport_reps[i], \ 654 (i) <= (nvfs); (i)++) 655 656 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ 657 for ((i) = (nvfs); \ 658 (rep) = &(esw)->offloads.vport_reps[i], \ 659 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 660 661 #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ 662 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++) 663 664 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ 665 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) 666 667 /* Includes host PF (vport 0) if it's not esw manager. */ 668 #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \ 669 for ((i) = (esw)->first_host_vport; \ 670 (rep) = &(esw)->offloads.vport_reps[i], \ 671 (i) <= (nvfs); (i)++) 672 673 #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \ 674 for ((i) = (nvfs); \ 675 (rep) = &(esw)->offloads.vport_reps[i], \ 676 (i) >= (esw)->first_host_vport; (i)--) 677 678 #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \ 679 for ((vport) = (esw)->first_host_vport; \ 680 (vport) <= (nvfs); (vport)++) 681 682 #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \ 683 for ((vport) = (nvfs); \ 684 (vport) >= (esw)->first_host_vport; (vport)--) 685 686 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \ 687 for ((i) = mlx5_esw_sf_start_idx(esw); \ 688 (rep) = &(esw)->offloads.vport_reps[(i)], \ 689 (i) < mlx5_esw_sf_end_idx(esw); (i++)) 690 691 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); 692 struct mlx5_vport *__must_check 693 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 694 695 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); 696 697 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 698 699 int 700 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 701 enum mlx5_eswitch_vport_event enabled_events); 702 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 703 704 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num, 705 enum mlx5_eswitch_vport_event enabled_events); 706 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); 707 708 int 709 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 710 struct mlx5_vport *vport); 711 void 712 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 713 struct mlx5_vport *vport); 714 715 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw); 716 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw); 717 718 struct mlx5_flow_handle * 719 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 720 u32 721 esw_get_max_restore_tag(struct mlx5_eswitch *esw); 722 723 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); 724 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); 725 726 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num); 727 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num); 728 729 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 730 enum mlx5_eswitch_vport_event enabled_events); 731 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num); 732 733 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 734 enum mlx5_eswitch_vport_event enabled_events); 735 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); 736 737 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num); 738 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); 739 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num); 740 741 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 742 u16 vport_num, u32 sfnum); 743 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num); 744 745 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port, 746 u16 vport_num, u32 sfnum); 747 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num); 748 749 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num); 750 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num); 751 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num); 752 753 /** 754 * mlx5_esw_event_info - Indicates eswitch mode changed/changing. 755 * 756 * @new_mode: New mode of eswitch. 757 */ 758 struct mlx5_esw_event_info { 759 u16 new_mode; 760 }; 761 762 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n); 763 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n); 764 #else /* CONFIG_MLX5_ESWITCH */ 765 /* eswitch API stubs */ 766 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 767 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 768 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } 769 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} 770 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } 771 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 772 static inline 773 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; } 774 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 775 { 776 return ERR_PTR(-EOPNOTSUPP); 777 } 778 779 static inline struct mlx5_flow_handle * 780 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 781 { 782 return ERR_PTR(-EOPNOTSUPP); 783 } 784 #endif /* CONFIG_MLX5_ESWITCH */ 785 786 #endif /* __MLX5_ESWITCH_H__ */ 787