1 /*
2  * Copyright (c) 2015, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef __MLX5_ESWITCH_H__
34 #define __MLX5_ESWITCH_H__
35 
36 #include <linux/if_ether.h>
37 #include <linux/if_link.h>
38 #include <linux/atomic.h>
39 #include <linux/xarray.h>
40 #include <net/devlink.h>
41 #include <linux/mlx5/device.h>
42 #include <linux/mlx5/eswitch.h>
43 #include <linux/mlx5/vport.h>
44 #include <linux/mlx5/fs.h>
45 #include "lib/mpfs.h"
46 #include "lib/fs_chains.h"
47 #include "sf/sf.h"
48 #include "en/tc_ct.h"
49 #include "en/tc/sample.h"
50 
51 enum mlx5_mapped_obj_type {
52 	MLX5_MAPPED_OBJ_CHAIN,
53 	MLX5_MAPPED_OBJ_SAMPLE,
54 };
55 
56 struct mlx5_mapped_obj {
57 	enum mlx5_mapped_obj_type type;
58 	union {
59 		u32 chain;
60 		struct {
61 			u32 group_id;
62 			u32 rate;
63 			u32 trunc_size;
64 			u32 tunnel_id;
65 		} sample;
66 	};
67 };
68 
69 #ifdef CONFIG_MLX5_ESWITCH
70 
71 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
72 
73 #define MLX5_MAX_UC_PER_VPORT(dev) \
74 	(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
75 
76 #define MLX5_MAX_MC_PER_VPORT(dev) \
77 	(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
78 
79 #define mlx5_esw_has_fwd_fdb(dev) \
80 	MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
81 
82 #define esw_chains(esw) \
83 	((esw)->fdb_table.offloads.esw_chains_priv)
84 
85 enum {
86 	MAPPING_TYPE_CHAIN,
87 	MAPPING_TYPE_TUNNEL,
88 	MAPPING_TYPE_TUNNEL_ENC_OPTS,
89 	MAPPING_TYPE_LABELS,
90 	MAPPING_TYPE_ZONE,
91 };
92 
93 struct vport_ingress {
94 	struct mlx5_flow_table *acl;
95 	struct mlx5_flow_handle *allow_rule;
96 	struct {
97 		struct mlx5_flow_group *allow_spoofchk_only_grp;
98 		struct mlx5_flow_group *allow_untagged_spoofchk_grp;
99 		struct mlx5_flow_group *allow_untagged_only_grp;
100 		struct mlx5_flow_group *drop_grp;
101 		struct mlx5_flow_handle *drop_rule;
102 		struct mlx5_fc *drop_counter;
103 	} legacy;
104 	struct {
105 		/* Optional group to add an FTE to do internal priority
106 		 * tagging on ingress packets.
107 		 */
108 		struct mlx5_flow_group *metadata_prio_tag_grp;
109 		/* Group to add default match-all FTE entry to tag ingress
110 		 * packet with metadata.
111 		 */
112 		struct mlx5_flow_group *metadata_allmatch_grp;
113 		struct mlx5_modify_hdr *modify_metadata;
114 		struct mlx5_flow_handle *modify_metadata_rule;
115 	} offloads;
116 };
117 
118 struct vport_egress {
119 	struct mlx5_flow_table *acl;
120 	struct mlx5_flow_handle  *allowed_vlan;
121 	struct mlx5_flow_group *vlan_grp;
122 	union {
123 		struct {
124 			struct mlx5_flow_group *drop_grp;
125 			struct mlx5_flow_handle *drop_rule;
126 			struct mlx5_fc *drop_counter;
127 		} legacy;
128 		struct {
129 			struct mlx5_flow_group *fwd_grp;
130 			struct mlx5_flow_handle *fwd_rule;
131 			struct mlx5_flow_handle *bounce_rule;
132 			struct mlx5_flow_group *bounce_grp;
133 		} offloads;
134 	};
135 };
136 
137 struct mlx5_vport_drop_stats {
138 	u64 rx_dropped;
139 	u64 tx_dropped;
140 };
141 
142 struct mlx5_vport_info {
143 	u8                      mac[ETH_ALEN];
144 	u16                     vlan;
145 	u64                     node_guid;
146 	int                     link_state;
147 	u8                      qos;
148 	u8                      spoofchk: 1;
149 	u8                      trusted: 1;
150 };
151 
152 /* Vport context events */
153 enum mlx5_eswitch_vport_event {
154 	MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
155 	MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
156 	MLX5_VPORT_PROMISC_CHANGE = BIT(3),
157 };
158 
159 struct mlx5_vport {
160 	struct mlx5_core_dev    *dev;
161 	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
162 	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
163 	struct mlx5_flow_handle *promisc_rule;
164 	struct mlx5_flow_handle *allmulti_rule;
165 	struct work_struct      vport_change_handler;
166 
167 	struct vport_ingress    ingress;
168 	struct vport_egress     egress;
169 	u32                     default_metadata;
170 	u32                     metadata;
171 
172 	struct mlx5_vport_info  info;
173 
174 	struct {
175 		bool            enabled;
176 		u32             esw_tsar_ix;
177 		u32             bw_share;
178 		u32 min_rate;
179 		u32 max_rate;
180 		struct mlx5_esw_rate_group *group;
181 	} qos;
182 
183 	u16 vport;
184 	bool                    enabled;
185 	enum mlx5_eswitch_vport_event enabled_events;
186 	int index;
187 	struct devlink_port *dl_port;
188 };
189 
190 struct mlx5_esw_indir_table;
191 
192 struct mlx5_eswitch_fdb {
193 	union {
194 		struct legacy_fdb {
195 			struct mlx5_flow_table *fdb;
196 			struct mlx5_flow_group *addr_grp;
197 			struct mlx5_flow_group *allmulti_grp;
198 			struct mlx5_flow_group *promisc_grp;
199 			struct mlx5_flow_table *vepa_fdb;
200 			struct mlx5_flow_handle *vepa_uplink_rule;
201 			struct mlx5_flow_handle *vepa_star_rule;
202 		} legacy;
203 
204 		struct offloads_fdb {
205 			struct mlx5_flow_namespace *ns;
206 			struct mlx5_flow_table *tc_miss_table;
207 			struct mlx5_flow_table *slow_fdb;
208 			struct mlx5_flow_group *send_to_vport_grp;
209 			struct mlx5_flow_group *send_to_vport_meta_grp;
210 			struct mlx5_flow_group *peer_miss_grp;
211 			struct mlx5_flow_handle **peer_miss_rules;
212 			struct mlx5_flow_group *miss_grp;
213 			struct mlx5_flow_handle **send_to_vport_meta_rules;
214 			struct mlx5_flow_handle *miss_rule_uni;
215 			struct mlx5_flow_handle *miss_rule_multi;
216 			int vlan_push_pop_refcount;
217 
218 			struct mlx5_fs_chains *esw_chains_priv;
219 			struct {
220 				DECLARE_HASHTABLE(table, 8);
221 				/* Protects vports.table */
222 				struct mutex lock;
223 			} vports;
224 
225 			struct mlx5_esw_indir_table *indir;
226 
227 		} offloads;
228 	};
229 	u32 flags;
230 };
231 
232 struct mlx5_esw_offload {
233 	struct mlx5_flow_table *ft_offloads_restore;
234 	struct mlx5_flow_group *restore_group;
235 	struct mlx5_modify_hdr *restore_copy_hdr_id;
236 	struct mapping_ctx *reg_c0_obj_pool;
237 
238 	struct mlx5_flow_table *ft_offloads;
239 	struct mlx5_flow_group *vport_rx_group;
240 	struct xarray vport_reps;
241 	struct list_head peer_flows;
242 	struct mutex peer_mutex;
243 	struct mutex encap_tbl_lock; /* protects encap_tbl */
244 	DECLARE_HASHTABLE(encap_tbl, 8);
245 	struct mutex decap_tbl_lock; /* protects decap_tbl */
246 	DECLARE_HASHTABLE(decap_tbl, 8);
247 	struct mod_hdr_tbl mod_hdr;
248 	DECLARE_HASHTABLE(termtbl_tbl, 8);
249 	struct mutex termtbl_mutex; /* protects termtbl hash */
250 	struct xarray vhca_map;
251 	const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
252 	u8 inline_mode;
253 	atomic64_t num_flows;
254 	enum devlink_eswitch_encap_mode encap;
255 	struct ida vport_metadata_ida;
256 	unsigned int host_number; /* ECPF supports one external host */
257 };
258 
259 /* E-Switch MC FDB table hash node */
260 struct esw_mc_addr { /* SRIOV only */
261 	struct l2addr_node     node;
262 	struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
263 	u32                    refcnt;
264 };
265 
266 struct mlx5_host_work {
267 	struct work_struct	work;
268 	struct mlx5_eswitch	*esw;
269 };
270 
271 struct mlx5_esw_functions {
272 	struct mlx5_nb		nb;
273 	u16			num_vfs;
274 };
275 
276 enum {
277 	MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0),
278 	MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
279 };
280 
281 struct mlx5_esw_bridge_offloads;
282 
283 struct mlx5_eswitch {
284 	struct mlx5_core_dev    *dev;
285 	struct mlx5_nb          nb;
286 	struct mlx5_eswitch_fdb fdb_table;
287 	/* legacy data structures */
288 	struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
289 	struct esw_mc_addr mc_promisc;
290 	/* end of legacy */
291 	struct workqueue_struct *work_queue;
292 	struct xarray vports;
293 	u32 flags;
294 	int                     total_vports;
295 	int                     enabled_vports;
296 	/* Synchronize between vport change events
297 	 * and async SRIOV admin state changes
298 	 */
299 	struct mutex            state_lock;
300 
301 	/* Protects eswitch mode change that occurs via one or more
302 	 * user commands, i.e. sriov state change, devlink commands.
303 	 */
304 	struct rw_semaphore mode_lock;
305 	atomic64_t user_count;
306 
307 	struct {
308 		bool            enabled;
309 		u32             root_tsar_ix;
310 		struct mlx5_esw_rate_group *group0;
311 		struct list_head groups; /* Protected by esw->state_lock */
312 	} qos;
313 
314 	struct mlx5_esw_bridge_offloads *br_offloads;
315 	struct mlx5_esw_offload offloads;
316 	int                     mode;
317 	u16                     manager_vport;
318 	u16                     first_host_vport;
319 	struct mlx5_esw_functions esw_funcs;
320 	struct {
321 		u32             large_group_num;
322 	}  params;
323 	struct blocking_notifier_head n_head;
324 	struct lock_class_key mode_lock_key;
325 };
326 
327 void esw_offloads_disable(struct mlx5_eswitch *esw);
328 int esw_offloads_enable(struct mlx5_eswitch *esw);
329 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
330 int esw_offloads_init_reps(struct mlx5_eswitch *esw);
331 
332 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
333 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
334 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw);
335 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
336 
337 int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
338 
339 /* E-Switch API */
340 int mlx5_eswitch_init(struct mlx5_core_dev *dev);
341 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
342 
343 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1)
344 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs);
345 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
346 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
347 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
348 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
349 			       u16 vport, const u8 *mac);
350 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
351 				 u16 vport, int link_state);
352 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
353 				u16 vport, u16 vlan, u8 qos);
354 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
355 				    u16 vport, bool spoofchk);
356 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
357 				 u16 vport_num, bool setting);
358 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
359 				u32 max_rate, u32 min_rate);
360 int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw,
361 				    struct mlx5_vport *vport,
362 				    struct mlx5_esw_rate_group *group,
363 				    struct netlink_ext_ack *extack);
364 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting);
365 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting);
366 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
367 				  u16 vport, struct ifla_vf_info *ivi);
368 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
369 				 u16 vport,
370 				 struct ifla_vf_stats *vf_stats);
371 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
372 
373 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
374 					  bool other_vport, void *in);
375 
376 struct mlx5_flow_spec;
377 struct mlx5_esw_flow_attr;
378 struct mlx5_termtbl_handle;
379 
380 bool
381 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
382 			      struct mlx5_flow_attr *attr,
383 			      struct mlx5_flow_act *flow_act,
384 			      struct mlx5_flow_spec *spec);
385 
386 struct mlx5_flow_handle *
387 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
388 			      struct mlx5_flow_table *ft,
389 			      struct mlx5_flow_spec *spec,
390 			      struct mlx5_esw_flow_attr *attr,
391 			      struct mlx5_flow_act *flow_act,
392 			      struct mlx5_flow_destination *dest,
393 			      int num_dest);
394 
395 void
396 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
397 			 struct mlx5_termtbl_handle *tt);
398 
399 void
400 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec);
401 
402 struct mlx5_flow_handle *
403 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
404 				struct mlx5_flow_spec *spec,
405 				struct mlx5_flow_attr *attr);
406 struct mlx5_flow_handle *
407 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
408 			  struct mlx5_flow_spec *spec,
409 			  struct mlx5_flow_attr *attr);
410 void
411 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
412 				struct mlx5_flow_handle *rule,
413 				struct mlx5_flow_attr *attr);
414 void
415 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
416 			  struct mlx5_flow_handle *rule,
417 			  struct mlx5_flow_attr *attr);
418 
419 struct mlx5_flow_handle *
420 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
421 				  struct mlx5_flow_destination *dest);
422 
423 enum {
424 	SET_VLAN_STRIP	= BIT(0),
425 	SET_VLAN_INSERT	= BIT(1)
426 };
427 
428 enum mlx5_flow_match_level {
429 	MLX5_MATCH_NONE	= MLX5_INLINE_MODE_NONE,
430 	MLX5_MATCH_L2	= MLX5_INLINE_MODE_L2,
431 	MLX5_MATCH_L3	= MLX5_INLINE_MODE_IP,
432 	MLX5_MATCH_L4	= MLX5_INLINE_MODE_TCP_UDP,
433 };
434 
435 /* current maximum for flow based vport multicasting */
436 #define MLX5_MAX_FLOW_FWD_VPORTS 2
437 
438 enum {
439 	MLX5_ESW_DEST_ENCAP         = BIT(0),
440 	MLX5_ESW_DEST_ENCAP_VALID   = BIT(1),
441 	MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE  = BIT(2),
442 };
443 
444 enum {
445 	MLX5_ESW_ATTR_FLAG_VLAN_HANDLED  = BIT(0),
446 	MLX5_ESW_ATTR_FLAG_SLOW_PATH     = BIT(1),
447 	MLX5_ESW_ATTR_FLAG_NO_IN_PORT    = BIT(2),
448 	MLX5_ESW_ATTR_FLAG_SRC_REWRITE   = BIT(3),
449 	MLX5_ESW_ATTR_FLAG_SAMPLE        = BIT(4),
450 };
451 
452 struct mlx5_esw_flow_attr {
453 	struct mlx5_eswitch_rep *in_rep;
454 	struct mlx5_core_dev	*in_mdev;
455 	struct mlx5_core_dev    *counter_dev;
456 
457 	int split_count;
458 	int out_count;
459 
460 	__be16	vlan_proto[MLX5_FS_VLAN_DEPTH];
461 	u16	vlan_vid[MLX5_FS_VLAN_DEPTH];
462 	u8	vlan_prio[MLX5_FS_VLAN_DEPTH];
463 	u8	total_vlan;
464 	struct {
465 		u32 flags;
466 		struct mlx5_eswitch_rep *rep;
467 		struct mlx5_pkt_reformat *pkt_reformat;
468 		struct mlx5_core_dev *mdev;
469 		struct mlx5_termtbl_handle *termtbl;
470 		int src_port_rewrite_act_id;
471 	} dests[MLX5_MAX_FLOW_FWD_VPORTS];
472 	struct mlx5_rx_tun_attr *rx_tun_attr;
473 	struct mlx5_pkt_reformat *decap_pkt_reformat;
474 };
475 
476 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
477 				  struct netlink_ext_ack *extack);
478 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
479 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
480 					 struct netlink_ext_ack *extack);
481 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
482 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
483 					enum devlink_eswitch_encap_mode encap,
484 					struct netlink_ext_ack *extack);
485 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
486 					enum devlink_eswitch_encap_mode *encap);
487 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
488 					   u8 *hw_addr, int *hw_addr_len,
489 					   struct netlink_ext_ack *extack);
490 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
491 					   const u8 *hw_addr, int hw_addr_len,
492 					   struct netlink_ext_ack *extack);
493 
494 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
495 
496 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
497 				 struct mlx5_flow_attr *attr);
498 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
499 				 struct mlx5_flow_attr *attr);
500 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
501 				  u16 vport, u16 vlan, u8 qos, u8 set_flags);
502 
503 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw)
504 {
505 	return esw->qos.enabled;
506 }
507 
508 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
509 						       u8 vlan_depth)
510 {
511 	bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) &&
512 		   MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan);
513 
514 	if (vlan_depth == 1)
515 		return ret;
516 
517 	return  ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) &&
518 		MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
519 }
520 
521 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
522 			 struct mlx5_core_dev *dev1);
523 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
524 			       struct mlx5_core_dev *dev1);
525 
526 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev);
527 
528 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
529 
530 #define esw_info(__dev, format, ...)			\
531 	dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
532 
533 #define esw_warn(__dev, format, ...)			\
534 	dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
535 
536 #define esw_debug(dev, format, ...)				\
537 	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
538 
539 static inline bool mlx5_esw_allowed(const struct mlx5_eswitch *esw)
540 {
541 	return esw && MLX5_ESWITCH_MANAGER(esw->dev);
542 }
543 
544 /* The returned number is valid only when the dev is eswitch manager. */
545 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
546 {
547 	return mlx5_core_is_ecpf_esw_manager(dev) ?
548 		MLX5_VPORT_ECPF : MLX5_VPORT_PF;
549 }
550 
551 static inline bool
552 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
553 {
554 	return esw->manager_vport == vport_num;
555 }
556 
557 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
558 {
559 	return mlx5_core_is_ecpf_esw_manager(dev) ?
560 		MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
561 }
562 
563 static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
564 {
565 	return mlx5_core_is_ecpf_esw_manager(dev);
566 }
567 
568 static inline unsigned int
569 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
570 				     u16 vport_num)
571 {
572 	return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
573 }
574 
575 static inline u16
576 mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
577 {
578 	return dl_port_index & 0xffff;
579 }
580 
581 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
582 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
583 
584 /* Each mark identifies eswitch vport type.
585  * MLX5_ESW_VPT_HOST_FN is used to identify both PF and VF ports using
586  * a single mark.
587  * MLX5_ESW_VPT_VF identifies a SRIOV VF vport.
588  * MLX5_ESW_VPT_SF identifies SF vport.
589  */
590 #define MLX5_ESW_VPT_HOST_FN XA_MARK_0
591 #define MLX5_ESW_VPT_VF XA_MARK_1
592 #define MLX5_ESW_VPT_SF XA_MARK_2
593 
594 /* The vport iterator is valid only after vport are initialized in mlx5_eswitch_init.
595  * Borrowed the idea from xa_for_each_marked() but with support for desired last element.
596  */
597 
598 #define mlx5_esw_for_each_vport(esw, index, vport) \
599 	xa_for_each(&((esw)->vports), index, vport)
600 
601 #define mlx5_esw_for_each_entry_marked(xa, index, entry, last, filter)	\
602 	for (index = 0, entry = xa_find(xa, &index, last, filter); \
603 	     entry; entry = xa_find_after(xa, &index, last, filter))
604 
605 #define mlx5_esw_for_each_vport_marked(esw, index, vport, last, filter)	\
606 	mlx5_esw_for_each_entry_marked(&((esw)->vports), index, vport, last, filter)
607 
608 #define mlx5_esw_for_each_vf_vport(esw, index, vport, last)	\
609 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_VF)
610 
611 #define mlx5_esw_for_each_host_func_vport(esw, index, vport, last)	\
612 	mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
613 
614 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
615 struct mlx5_vport *__must_check
616 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
617 
618 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num);
619 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num);
620 
621 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
622 
623 int
624 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
625 				 enum mlx5_eswitch_vport_event enabled_events);
626 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
627 
628 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
629 			  enum mlx5_eswitch_vport_event enabled_events);
630 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
631 
632 int
633 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
634 				     struct mlx5_vport *vport);
635 void
636 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
637 				      struct mlx5_vport *vport);
638 
639 struct esw_vport_tbl_namespace {
640 	int max_fte;
641 	int max_num_groups;
642 	u32 flags;
643 };
644 
645 struct mlx5_vport_tbl_attr {
646 	u32 chain;
647 	u16 prio;
648 	u16 vport;
649 	const struct esw_vport_tbl_namespace *vport_ns;
650 };
651 
652 struct mlx5_flow_table *
653 mlx5_esw_vporttbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
654 void
655 mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr);
656 
657 struct mlx5_flow_handle *
658 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
659 
660 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
661 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
662 
663 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
664 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
665 
666 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
667 			    enum mlx5_eswitch_vport_event enabled_events);
668 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
669 
670 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
671 				enum mlx5_eswitch_vport_event enabled_events);
672 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
673 
674 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
675 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
676 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
677 
678 int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
679 				      u16 vport_num, u32 controller, u32 sfnum);
680 void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
681 
682 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
683 				      u16 vport_num, u32 controller, u32 sfnum);
684 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
685 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id);
686 
687 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
688 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
689 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
690 
691 /**
692  * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
693  *
694  * @new_mode: New mode of eswitch.
695  */
696 struct mlx5_esw_event_info {
697 	u16 new_mode;
698 };
699 
700 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
701 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
702 
703 bool mlx5_esw_hold(struct mlx5_core_dev *dev);
704 void mlx5_esw_release(struct mlx5_core_dev *dev);
705 void mlx5_esw_get(struct mlx5_core_dev *dev);
706 void mlx5_esw_put(struct mlx5_core_dev *dev);
707 int mlx5_esw_try_lock(struct mlx5_eswitch *esw);
708 void mlx5_esw_unlock(struct mlx5_eswitch *esw);
709 void mlx5_esw_lock(struct mlx5_eswitch *esw);
710 
711 void esw_vport_change_handle_locked(struct mlx5_vport *vport);
712 
713 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
714 
715 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
716 					    struct mlx5_eswitch *slave_esw);
717 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
718 					      struct mlx5_eswitch *slave_esw);
719 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
720 
721 #else  /* CONFIG_MLX5_ESWITCH */
722 /* eswitch API stubs */
723 static inline int  mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
724 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
725 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
726 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
727 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
728 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
729 static inline
730 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
731 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
732 {
733 	return ERR_PTR(-EOPNOTSUPP);
734 }
735 
736 static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; }
737 static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; }
738 
739 static inline struct mlx5_flow_handle *
740 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
741 {
742 	return ERR_PTR(-EOPNOTSUPP);
743 }
744 
745 static inline unsigned int
746 mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
747 				     u16 vport_num)
748 {
749 	return vport_num;
750 }
751 
752 static inline int
753 mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
754 					struct mlx5_eswitch *slave_esw)
755 {
756 	return 0;
757 }
758 
759 static inline void
760 mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
761 					 struct mlx5_eswitch *slave_esw) {}
762 
763 static inline int
764 mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
765 {
766 	return 0;
767 }
768 #endif /* CONFIG_MLX5_ESWITCH */
769 
770 #endif /* __MLX5_ESWITCH_H__ */
771