1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019, Mellanox Technologies */
3 
4 #ifndef	_DR_TYPES_
5 #define	_DR_TYPES_
6 
7 #include <linux/mlx5/driver.h>
8 #include <linux/refcount.h>
9 #include "fs_core.h"
10 #include "wq.h"
11 #include "lib/mlx5.h"
12 #include "mlx5_ifc_dr.h"
13 #include "mlx5dr.h"
14 
15 #define DR_RULE_MAX_STES 18
16 #define DR_ACTION_MAX_STES 5
17 #define WIRE_PORT 0xFFFF
18 #define DR_STE_SVLAN 0x1
19 #define DR_STE_CVLAN 0x2
20 #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
21 #define DR_NUM_OF_FLEX_PARSERS 8
22 #define DR_STE_MAX_FLEX_0_ID 3
23 #define DR_STE_MAX_FLEX_1_ID 7
24 
25 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
26 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
27 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
28 
29 static inline bool dr_is_flex_parser_0_id(u8 parser_id)
30 {
31 	return parser_id <= DR_STE_MAX_FLEX_0_ID;
32 }
33 
34 static inline bool dr_is_flex_parser_1_id(u8 parser_id)
35 {
36 	return parser_id > DR_STE_MAX_FLEX_0_ID;
37 }
38 
39 enum mlx5dr_icm_chunk_size {
40 	DR_CHUNK_SIZE_1,
41 	DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
42 	DR_CHUNK_SIZE_2,
43 	DR_CHUNK_SIZE_4,
44 	DR_CHUNK_SIZE_8,
45 	DR_CHUNK_SIZE_16,
46 	DR_CHUNK_SIZE_32,
47 	DR_CHUNK_SIZE_64,
48 	DR_CHUNK_SIZE_128,
49 	DR_CHUNK_SIZE_256,
50 	DR_CHUNK_SIZE_512,
51 	DR_CHUNK_SIZE_1K,
52 	DR_CHUNK_SIZE_2K,
53 	DR_CHUNK_SIZE_4K,
54 	DR_CHUNK_SIZE_8K,
55 	DR_CHUNK_SIZE_16K,
56 	DR_CHUNK_SIZE_32K,
57 	DR_CHUNK_SIZE_64K,
58 	DR_CHUNK_SIZE_128K,
59 	DR_CHUNK_SIZE_256K,
60 	DR_CHUNK_SIZE_512K,
61 	DR_CHUNK_SIZE_1024K,
62 	DR_CHUNK_SIZE_2048K,
63 	DR_CHUNK_SIZE_MAX,
64 };
65 
66 enum mlx5dr_icm_type {
67 	DR_ICM_TYPE_STE,
68 	DR_ICM_TYPE_MODIFY_ACTION,
69 };
70 
71 static inline enum mlx5dr_icm_chunk_size
72 mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
73 {
74 	chunk += 2;
75 	if (chunk < DR_CHUNK_SIZE_MAX)
76 		return chunk;
77 
78 	return DR_CHUNK_SIZE_MAX;
79 }
80 
81 enum {
82 	DR_STE_SIZE = 64,
83 	DR_STE_SIZE_CTRL = 32,
84 	DR_STE_SIZE_TAG = 16,
85 	DR_STE_SIZE_MASK = 16,
86 	DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
87 };
88 
89 enum mlx5dr_ste_ctx_action_cap {
90 	DR_STE_CTX_ACTION_CAP_NONE = 0,
91 	DR_STE_CTX_ACTION_CAP_TX_POP   = 1 << 0,
92 	DR_STE_CTX_ACTION_CAP_RX_PUSH  = 1 << 1,
93 	DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
94 };
95 
96 enum {
97 	DR_MODIFY_ACTION_SIZE = 8,
98 };
99 
100 enum mlx5dr_matcher_criteria {
101 	DR_MATCHER_CRITERIA_EMPTY = 0,
102 	DR_MATCHER_CRITERIA_OUTER = 1 << 0,
103 	DR_MATCHER_CRITERIA_MISC = 1 << 1,
104 	DR_MATCHER_CRITERIA_INNER = 1 << 2,
105 	DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
106 	DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
107 	DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
108 	DR_MATCHER_CRITERIA_MAX = 1 << 6,
109 };
110 
111 enum mlx5dr_action_type {
112 	DR_ACTION_TYP_TNL_L2_TO_L2,
113 	DR_ACTION_TYP_L2_TO_TNL_L2,
114 	DR_ACTION_TYP_TNL_L3_TO_L2,
115 	DR_ACTION_TYP_L2_TO_TNL_L3,
116 	DR_ACTION_TYP_DROP,
117 	DR_ACTION_TYP_QP,
118 	DR_ACTION_TYP_FT,
119 	DR_ACTION_TYP_CTR,
120 	DR_ACTION_TYP_TAG,
121 	DR_ACTION_TYP_MODIFY_HDR,
122 	DR_ACTION_TYP_VPORT,
123 	DR_ACTION_TYP_POP_VLAN,
124 	DR_ACTION_TYP_PUSH_VLAN,
125 	DR_ACTION_TYP_INSERT_HDR,
126 	DR_ACTION_TYP_REMOVE_HDR,
127 	DR_ACTION_TYP_SAMPLER,
128 	DR_ACTION_TYP_MAX,
129 };
130 
131 enum mlx5dr_ipv {
132 	DR_RULE_IPV4,
133 	DR_RULE_IPV6,
134 	DR_RULE_IPV_MAX,
135 };
136 
137 struct mlx5dr_icm_pool;
138 struct mlx5dr_icm_chunk;
139 struct mlx5dr_icm_buddy_mem;
140 struct mlx5dr_ste_htbl;
141 struct mlx5dr_match_param;
142 struct mlx5dr_cmd_caps;
143 struct mlx5dr_rule_rx_tx;
144 struct mlx5dr_matcher_rx_tx;
145 struct mlx5dr_ste_ctx;
146 
147 struct mlx5dr_ste {
148 	u8 *hw_ste;
149 	/* refcount: indicates the num of rules that using this ste */
150 	u32 refcount;
151 
152 	/* attached to the miss_list head at each htbl entry */
153 	struct list_head miss_list_node;
154 
155 	/* this ste is member of htbl */
156 	struct mlx5dr_ste_htbl *htbl;
157 
158 	struct mlx5dr_ste_htbl *next_htbl;
159 
160 	/* The rule this STE belongs to */
161 	struct mlx5dr_rule_rx_tx *rule_rx_tx;
162 
163 	/* this ste is part of a rule, located in ste's chain */
164 	u8 ste_chain_location;
165 };
166 
167 struct mlx5dr_ste_htbl_ctrl {
168 	/* total number of valid entries belonging to this hash table. This
169 	 * includes the non collision and collision entries
170 	 */
171 	unsigned int num_of_valid_entries;
172 
173 	/* total number of collisions entries attached to this table */
174 	unsigned int num_of_collisions;
175 };
176 
177 struct mlx5dr_ste_htbl {
178 	u16 lu_type;
179 	u16 byte_mask;
180 	u32 refcount;
181 	struct mlx5dr_icm_chunk *chunk;
182 	struct mlx5dr_ste *ste_arr;
183 	u8 *hw_ste_arr;
184 
185 	struct list_head *miss_list;
186 
187 	enum mlx5dr_icm_chunk_size chunk_size;
188 	struct mlx5dr_ste *pointing_ste;
189 
190 	struct mlx5dr_ste_htbl_ctrl ctrl;
191 };
192 
193 struct mlx5dr_ste_send_info {
194 	struct mlx5dr_ste *ste;
195 	struct list_head send_list;
196 	u16 size;
197 	u16 offset;
198 	u8 data_cont[DR_STE_SIZE];
199 	u8 *data;
200 };
201 
202 void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
203 					       u16 offset, u8 *data,
204 					       struct mlx5dr_ste_send_info *ste_info,
205 					       struct list_head *send_list,
206 					       bool copy_data);
207 
208 struct mlx5dr_ste_build {
209 	u8 inner:1;
210 	u8 rx:1;
211 	u8 vhca_id_valid:1;
212 	struct mlx5dr_domain *dmn;
213 	struct mlx5dr_cmd_caps *caps;
214 	u16 lu_type;
215 	u16 byte_mask;
216 	u8 bit_mask[DR_STE_SIZE_MASK];
217 	int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
218 				  struct mlx5dr_ste_build *sb,
219 				  u8 *tag);
220 };
221 
222 struct mlx5dr_ste_htbl *
223 mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
224 		      enum mlx5dr_icm_chunk_size chunk_size,
225 		      u16 lu_type, u16 byte_mask);
226 
227 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
228 
229 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
230 {
231 	htbl->refcount--;
232 	if (!htbl->refcount)
233 		mlx5dr_ste_htbl_free(htbl);
234 }
235 
236 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
237 {
238 	htbl->refcount++;
239 }
240 
241 /* STE utils */
242 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
243 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
244 			      u8 *hw_ste, u64 miss_addr);
245 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
246 			     u8 *hw_ste, u64 icm_addr, u32 ht_size);
247 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
248 					  u8 *hw_ste,
249 					  struct mlx5dr_ste_htbl *next_htbl);
250 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
251 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
252 				u8 ste_location);
253 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
254 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
255 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
256 
257 #define MLX5DR_MAX_VLANS 2
258 
259 struct mlx5dr_ste_actions_attr {
260 	u32	modify_index;
261 	u16	modify_actions;
262 	u32	decap_index;
263 	u16	decap_actions;
264 	u8	decap_with_vlan:1;
265 	u64	final_icm_addr;
266 	u32	flow_tag;
267 	u32	ctr_id;
268 	u16	gvmi;
269 	u16	hit_gvmi;
270 	struct {
271 		u32	id;
272 		u32	size;
273 		u8	param_0;
274 		u8	param_1;
275 	} reformat;
276 	struct {
277 		int	count;
278 		u32	headers[MLX5DR_MAX_VLANS];
279 	} vlans;
280 };
281 
282 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
283 			       struct mlx5dr_domain *dmn,
284 			       u8 *action_type_set,
285 			       u8 *last_ste,
286 			       struct mlx5dr_ste_actions_attr *attr,
287 			       u32 *added_stes);
288 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
289 			       struct mlx5dr_domain *dmn,
290 			       u8 *action_type_set,
291 			       u8 *last_ste,
292 			       struct mlx5dr_ste_actions_attr *attr,
293 			       u32 *added_stes);
294 
295 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
296 			       __be64 *hw_action,
297 			       u8 hw_field,
298 			       u8 shifter,
299 			       u8 length,
300 			       u32 data);
301 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
302 			       __be64 *hw_action,
303 			       u8 hw_field,
304 			       u8 shifter,
305 			       u8 length,
306 			       u32 data);
307 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
308 				__be64 *hw_action,
309 				u8 dst_hw_field,
310 				u8 dst_shifter,
311 				u8 dst_len,
312 				u8 src_hw_field,
313 				u8 src_shifter);
314 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
315 					void *data,
316 					u32 data_sz,
317 					u8 *hw_action,
318 					u32 hw_action_sz,
319 					u16 *used_hw_action_num);
320 
321 const struct mlx5dr_ste_action_modify_field *
322 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
323 
324 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
325 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
326 		     struct mlx5dr_matcher *matcher,
327 		     struct mlx5dr_matcher_rx_tx *nic_matcher);
328 static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
329 				  struct mlx5dr_matcher *matcher,
330 				  struct mlx5dr_matcher_rx_tx *nic_matcher)
331 {
332 	ste->refcount--;
333 	if (!ste->refcount)
334 		mlx5dr_ste_free(ste, matcher, nic_matcher);
335 }
336 
337 /* initial as 0, increased only when ste appears in a new rule */
338 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
339 {
340 	ste->refcount++;
341 }
342 
343 static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
344 {
345 	return !ste->refcount;
346 }
347 
348 bool mlx5dr_ste_equal_tag(void *src, void *dst);
349 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
350 				struct mlx5dr_matcher_rx_tx *nic_matcher,
351 				struct mlx5dr_ste *ste,
352 				u8 *cur_hw_ste,
353 				enum mlx5dr_icm_chunk_size log_table_size);
354 
355 /* STE build functions */
356 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
357 			       u8 match_criteria,
358 			       struct mlx5dr_match_param *mask,
359 			       struct mlx5dr_match_param *value);
360 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
361 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
362 			     struct mlx5dr_match_param *value,
363 			     u8 *ste_arr);
364 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
365 				     struct mlx5dr_ste_build *builder,
366 				     struct mlx5dr_match_param *mask,
367 				     bool inner, bool rx);
368 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
369 					  struct mlx5dr_ste_build *sb,
370 					  struct mlx5dr_match_param *mask,
371 					  bool inner, bool rx);
372 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
373 				       struct mlx5dr_ste_build *sb,
374 				       struct mlx5dr_match_param *mask,
375 				       bool inner, bool rx);
376 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
377 				      struct mlx5dr_ste_build *sb,
378 				      struct mlx5dr_match_param *mask,
379 				      bool inner, bool rx);
380 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
381 				      struct mlx5dr_ste_build *sb,
382 				      struct mlx5dr_match_param *mask,
383 				      bool inner, bool rx);
384 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
385 				 struct mlx5dr_ste_build *sb,
386 				 struct mlx5dr_match_param *mask,
387 				 bool inner, bool rx);
388 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
389 				 struct mlx5dr_ste_build *sb,
390 				 struct mlx5dr_match_param *mask,
391 				 bool inner, bool rx);
392 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
393 				 struct mlx5dr_ste_build *sb,
394 				 struct mlx5dr_match_param *mask,
395 				 bool inner, bool rx);
396 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
397 				     struct mlx5dr_ste_build *sb,
398 				     struct mlx5dr_match_param *mask,
399 				     bool inner, bool rx);
400 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
401 				  struct mlx5dr_ste_build *sb,
402 				  struct mlx5dr_match_param *mask,
403 				  bool inner, bool rx);
404 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
405 			      struct mlx5dr_ste_build *sb,
406 			      struct mlx5dr_match_param *mask,
407 			      bool inner, bool rx);
408 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
409 			   struct mlx5dr_ste_build *sb,
410 			   struct mlx5dr_match_param *mask,
411 			   bool inner, bool rx);
412 void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
413 			       struct mlx5dr_ste_build *sb,
414 			       struct mlx5dr_match_param *mask,
415 			       bool inner, bool rx);
416 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
417 					struct mlx5dr_ste_build *sb,
418 					struct mlx5dr_match_param *mask,
419 					struct mlx5dr_cmd_caps *caps,
420 					bool inner, bool rx);
421 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
422 					struct mlx5dr_ste_build *sb,
423 					struct mlx5dr_match_param *mask,
424 					struct mlx5dr_cmd_caps *caps,
425 					bool inner, bool rx);
426 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
427 			   struct mlx5dr_ste_build *sb,
428 			   struct mlx5dr_match_param *mask,
429 			   struct mlx5dr_cmd_caps *caps,
430 			   bool inner, bool rx);
431 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
432 				    struct mlx5dr_ste_build *sb,
433 				    struct mlx5dr_match_param *mask,
434 				    bool inner, bool rx);
435 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
436 				 struct mlx5dr_ste_build *sb,
437 				 struct mlx5dr_match_param *mask,
438 				 bool inner, bool rx);
439 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
440 					 struct mlx5dr_ste_build *sb,
441 					 struct mlx5dr_match_param *mask,
442 					 struct mlx5dr_cmd_caps *caps,
443 					 bool inner, bool rx);
444 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
445 			       struct mlx5dr_ste_build *sb,
446 			       struct mlx5dr_match_param *mask,
447 			       bool inner, bool rx);
448 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
449 					     struct mlx5dr_ste_build *sb,
450 					     struct mlx5dr_match_param *mask,
451 					     struct mlx5dr_cmd_caps *caps,
452 					     bool inner, bool rx);
453 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
454 					     struct mlx5dr_ste_build *sb,
455 					     struct mlx5dr_match_param *mask,
456 					     struct mlx5dr_cmd_caps *caps,
457 					     bool inner, bool rx);
458 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
459 				      struct mlx5dr_ste_build *sb,
460 				      struct mlx5dr_match_param *mask,
461 				      bool inner, bool rx);
462 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
463 				 struct mlx5dr_ste_build *sb,
464 				 struct mlx5dr_match_param *mask,
465 				 bool inner, bool rx);
466 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
467 				 struct mlx5dr_ste_build *sb,
468 				 struct mlx5dr_match_param *mask,
469 				 bool inner, bool rx);
470 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
471 				   struct mlx5dr_ste_build *sb,
472 				   struct mlx5dr_match_param *mask,
473 				   struct mlx5dr_domain *dmn,
474 				   bool inner, bool rx);
475 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
476 				    struct mlx5dr_ste_build *sb,
477 				    struct mlx5dr_match_param *mask,
478 				    bool inner, bool rx);
479 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
480 				    struct mlx5dr_ste_build *sb,
481 				    struct mlx5dr_match_param *mask,
482 				    bool inner, bool rx);
483 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
484 
485 /* Actions utils */
486 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
487 				 struct mlx5dr_matcher_rx_tx *nic_matcher,
488 				 struct mlx5dr_action *actions[],
489 				 u32 num_actions,
490 				 u8 *ste_arr,
491 				 u32 *new_hw_ste_arr_sz);
492 
493 struct mlx5dr_match_spec {
494 	u32 smac_47_16;		/* Source MAC address of incoming packet */
495 	/* Incoming packet Ethertype - this is the Ethertype
496 	 * following the last VLAN tag of the packet
497 	 */
498 	u32 ethertype:16;
499 	u32 smac_15_0:16;	/* Source MAC address of incoming packet */
500 	u32 dmac_47_16;		/* Destination MAC address of incoming packet */
501 	/* VLAN ID of first VLAN tag in the incoming packet.
502 	 * Valid only when cvlan_tag==1 or svlan_tag==1
503 	 */
504 	u32 first_vid:12;
505 	/* CFI bit of first VLAN tag in the incoming packet.
506 	 * Valid only when cvlan_tag==1 or svlan_tag==1
507 	 */
508 	u32 first_cfi:1;
509 	/* Priority of first VLAN tag in the incoming packet.
510 	 * Valid only when cvlan_tag==1 or svlan_tag==1
511 	 */
512 	u32 first_prio:3;
513 	u32 dmac_15_0:16;	/* Destination MAC address of incoming packet */
514 	/* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
515 	 *             Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
516 	 */
517 	u32 tcp_flags:9;
518 	u32 ip_version:4;	/* IP version */
519 	u32 frag:1;		/* Packet is an IP fragment */
520 	/* The first vlan in the packet is s-vlan (0x8a88).
521 	 * cvlan_tag and svlan_tag cannot be set together
522 	 */
523 	u32 svlan_tag:1;
524 	/* The first vlan in the packet is c-vlan (0x8100).
525 	 * cvlan_tag and svlan_tag cannot be set together
526 	 */
527 	u32 cvlan_tag:1;
528 	/* Explicit Congestion Notification derived from
529 	 * Traffic Class/TOS field of IPv6/v4
530 	 */
531 	u32 ip_ecn:2;
532 	/* Differentiated Services Code Point derived from
533 	 * Traffic Class/TOS field of IPv6/v4
534 	 */
535 	u32 ip_dscp:6;
536 	u32 ip_protocol:8;	/* IP protocol */
537 	/* TCP destination port.
538 	 * tcp and udp sport/dport are mutually exclusive
539 	 */
540 	u32 tcp_dport:16;
541 	/* TCP source port.;tcp and udp sport/dport are mutually exclusive */
542 	u32 tcp_sport:16;
543 	u32 ttl_hoplimit:8;
544 	u32 reserved:24;
545 	/* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
546 	u32 udp_dport:16;
547 	/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
548 	u32 udp_sport:16;
549 	/* IPv6 source address of incoming packets
550 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
551 	 * This field should be qualified by an appropriate ethertype
552 	 */
553 	u32 src_ip_127_96;
554 	/* IPv6 source address of incoming packets
555 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
556 	 * This field should be qualified by an appropriate ethertype
557 	 */
558 	u32 src_ip_95_64;
559 	/* IPv6 source address of incoming packets
560 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
561 	 * This field should be qualified by an appropriate ethertype
562 	 */
563 	u32 src_ip_63_32;
564 	/* IPv6 source address of incoming packets
565 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
566 	 * This field should be qualified by an appropriate ethertype
567 	 */
568 	u32 src_ip_31_0;
569 	/* IPv6 destination address of incoming packets
570 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
571 	 * This field should be qualified by an appropriate ethertype
572 	 */
573 	u32 dst_ip_127_96;
574 	/* IPv6 destination address of incoming packets
575 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
576 	 * This field should be qualified by an appropriate ethertype
577 	 */
578 	u32 dst_ip_95_64;
579 	/* IPv6 destination address of incoming packets
580 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
581 	 * This field should be qualified by an appropriate ethertype
582 	 */
583 	u32 dst_ip_63_32;
584 	/* IPv6 destination address of incoming packets
585 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
586 	 * This field should be qualified by an appropriate ethertype
587 	 */
588 	u32 dst_ip_31_0;
589 };
590 
591 struct mlx5dr_match_misc {
592 	u32 source_sqn:24;		/* Source SQN */
593 	u32 source_vhca_port:4;
594 	/* used with GRE, sequence number exist when gre_s_present == 1 */
595 	u32 gre_s_present:1;
596 	/* used with GRE, key exist when gre_k_present == 1 */
597 	u32 gre_k_present:1;
598 	u32 reserved_auto1:1;
599 	/* used with GRE, checksum exist when gre_c_present == 1 */
600 	u32 gre_c_present:1;
601 	/* Source port.;0xffff determines wire port */
602 	u32 source_port:16;
603 	u32 source_eswitch_owner_vhca_id:16;
604 	/* VLAN ID of first VLAN tag the inner header of the incoming packet.
605 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
606 	 */
607 	u32 inner_second_vid:12;
608 	/* CFI bit of first VLAN tag in the inner header of the incoming packet.
609 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
610 	 */
611 	u32 inner_second_cfi:1;
612 	/* Priority of second VLAN tag in the inner header of the incoming packet.
613 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
614 	 */
615 	u32 inner_second_prio:3;
616 	/* VLAN ID of first VLAN tag the outer header of the incoming packet.
617 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
618 	 */
619 	u32 outer_second_vid:12;
620 	/* CFI bit of first VLAN tag in the outer header of the incoming packet.
621 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
622 	 */
623 	u32 outer_second_cfi:1;
624 	/* Priority of second VLAN tag in the outer header of the incoming packet.
625 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
626 	 */
627 	u32 outer_second_prio:3;
628 	u32 gre_protocol:16;		/* GRE Protocol (outer) */
629 	u32 reserved_auto3:12;
630 	/* The second vlan in the inner header of the packet is s-vlan (0x8a88).
631 	 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
632 	 */
633 	u32 inner_second_svlan_tag:1;
634 	/* The second vlan in the outer header of the packet is s-vlan (0x8a88).
635 	 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
636 	 */
637 	u32 outer_second_svlan_tag:1;
638 	/* The second vlan in the inner header of the packet is c-vlan (0x8100).
639 	 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
640 	 */
641 	u32 inner_second_cvlan_tag:1;
642 	/* The second vlan in the outer header of the packet is c-vlan (0x8100).
643 	 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
644 	 */
645 	u32 outer_second_cvlan_tag:1;
646 	u32 gre_key_l:8;		/* GRE Key [7:0] (outer) */
647 	u32 gre_key_h:24;		/* GRE Key[31:8] (outer) */
648 	u32 reserved_auto4:8;
649 	u32 vxlan_vni:24;		/* VXLAN VNI (outer) */
650 	u32 geneve_oam:1;		/* GENEVE OAM field (outer) */
651 	u32 reserved_auto5:7;
652 	u32 geneve_vni:24;		/* GENEVE VNI field (outer) */
653 	u32 outer_ipv6_flow_label:20;	/* Flow label of incoming IPv6 packet (outer) */
654 	u32 reserved_auto6:12;
655 	u32 inner_ipv6_flow_label:20;	/* Flow label of incoming IPv6 packet (inner) */
656 	u32 reserved_auto7:12;
657 	u32 geneve_protocol_type:16;	/* GENEVE protocol type (outer) */
658 	u32 geneve_opt_len:6;		/* GENEVE OptLen (outer) */
659 	u32 reserved_auto8:10;
660 	u32 bth_dst_qp:24;		/* Destination QP in BTH header */
661 	u32 reserved_auto9:8;
662 	u8 reserved_auto10[20];
663 };
664 
665 struct mlx5dr_match_misc2 {
666 	u32 outer_first_mpls_ttl:8;		/* First MPLS TTL (outer) */
667 	u32 outer_first_mpls_s_bos:1;		/* First MPLS S_BOS (outer) */
668 	u32 outer_first_mpls_exp:3;		/* First MPLS EXP (outer) */
669 	u32 outer_first_mpls_label:20;		/* First MPLS LABEL (outer) */
670 	u32 inner_first_mpls_ttl:8;		/* First MPLS TTL (inner) */
671 	u32 inner_first_mpls_s_bos:1;		/* First MPLS S_BOS (inner) */
672 	u32 inner_first_mpls_exp:3;		/* First MPLS EXP (inner) */
673 	u32 inner_first_mpls_label:20;		/* First MPLS LABEL (inner) */
674 	u32 outer_first_mpls_over_gre_ttl:8;	/* last MPLS TTL (outer) */
675 	u32 outer_first_mpls_over_gre_s_bos:1;	/* last MPLS S_BOS (outer) */
676 	u32 outer_first_mpls_over_gre_exp:3;	/* last MPLS EXP (outer) */
677 	u32 outer_first_mpls_over_gre_label:20;	/* last MPLS LABEL (outer) */
678 	u32 outer_first_mpls_over_udp_ttl:8;	/* last MPLS TTL (outer) */
679 	u32 outer_first_mpls_over_udp_s_bos:1;	/* last MPLS S_BOS (outer) */
680 	u32 outer_first_mpls_over_udp_exp:3;	/* last MPLS EXP (outer) */
681 	u32 outer_first_mpls_over_udp_label:20;	/* last MPLS LABEL (outer) */
682 	u32 metadata_reg_c_7;			/* metadata_reg_c_7 */
683 	u32 metadata_reg_c_6;			/* metadata_reg_c_6 */
684 	u32 metadata_reg_c_5;			/* metadata_reg_c_5 */
685 	u32 metadata_reg_c_4;			/* metadata_reg_c_4 */
686 	u32 metadata_reg_c_3;			/* metadata_reg_c_3 */
687 	u32 metadata_reg_c_2;			/* metadata_reg_c_2 */
688 	u32 metadata_reg_c_1;			/* metadata_reg_c_1 */
689 	u32 metadata_reg_c_0;			/* metadata_reg_c_0 */
690 	u32 metadata_reg_a;			/* metadata_reg_a */
691 	u8 reserved_auto2[12];
692 };
693 
694 struct mlx5dr_match_misc3 {
695 	u32 inner_tcp_seq_num;
696 	u32 outer_tcp_seq_num;
697 	u32 inner_tcp_ack_num;
698 	u32 outer_tcp_ack_num;
699 	u32 outer_vxlan_gpe_vni:24;
700 	u32 reserved_auto1:8;
701 	u32 reserved_auto2:16;
702 	u32 outer_vxlan_gpe_flags:8;
703 	u32 outer_vxlan_gpe_next_protocol:8;
704 	u32 icmpv4_header_data;
705 	u32 icmpv6_header_data;
706 	u8 icmpv6_code;
707 	u8 icmpv6_type;
708 	u8 icmpv4_code;
709 	u8 icmpv4_type;
710 	u32 geneve_tlv_option_0_data;
711 	u8 gtpu_msg_flags;
712 	u8 gtpu_msg_type;
713 	u32 gtpu_teid;
714 	u32 gtpu_dw_2;
715 	u32 gtpu_first_ext_dw_0;
716 	u32 gtpu_dw_0;
717 };
718 
719 struct mlx5dr_match_misc4 {
720 	u32 prog_sample_field_value_0;
721 	u32 prog_sample_field_id_0;
722 	u32 prog_sample_field_value_1;
723 	u32 prog_sample_field_id_1;
724 	u32 prog_sample_field_value_2;
725 	u32 prog_sample_field_id_2;
726 	u32 prog_sample_field_value_3;
727 	u32 prog_sample_field_id_3;
728 };
729 
730 struct mlx5dr_match_param {
731 	struct mlx5dr_match_spec outer;
732 	struct mlx5dr_match_misc misc;
733 	struct mlx5dr_match_spec inner;
734 	struct mlx5dr_match_misc2 misc2;
735 	struct mlx5dr_match_misc3 misc3;
736 	struct mlx5dr_match_misc4 misc4;
737 };
738 
739 #define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
740 				       (_misc3)->icmpv4_code || \
741 				       (_misc3)->icmpv4_header_data)
742 
743 struct mlx5dr_esw_caps {
744 	u64 drop_icm_address_rx;
745 	u64 drop_icm_address_tx;
746 	u64 uplink_icm_address_rx;
747 	u64 uplink_icm_address_tx;
748 	u8 sw_owner:1;
749 	u8 sw_owner_v2:1;
750 };
751 
752 struct mlx5dr_cmd_vport_cap {
753 	u16 vport_gvmi;
754 	u16 vhca_gvmi;
755 	u64 icm_address_rx;
756 	u64 icm_address_tx;
757 	u32 num;
758 };
759 
760 struct mlx5dr_roce_cap {
761 	u8 roce_en:1;
762 	u8 fl_rc_qp_when_roce_disabled:1;
763 	u8 fl_rc_qp_when_roce_enabled:1;
764 };
765 
766 struct mlx5dr_cmd_caps {
767 	u16 gvmi;
768 	u64 nic_rx_drop_address;
769 	u64 nic_tx_drop_address;
770 	u64 nic_tx_allow_address;
771 	u64 esw_rx_drop_address;
772 	u64 esw_tx_drop_address;
773 	u32 log_icm_size;
774 	u64 hdr_modify_icm_addr;
775 	u32 flex_protocols;
776 	u8 flex_parser_id_icmp_dw0;
777 	u8 flex_parser_id_icmp_dw1;
778 	u8 flex_parser_id_icmpv6_dw0;
779 	u8 flex_parser_id_icmpv6_dw1;
780 	u8 flex_parser_id_geneve_tlv_option_0;
781 	u8 flex_parser_id_mpls_over_gre;
782 	u8 flex_parser_id_mpls_over_udp;
783 	u8 flex_parser_id_gtpu_dw_0;
784 	u8 flex_parser_id_gtpu_teid;
785 	u8 flex_parser_id_gtpu_dw_2;
786 	u8 flex_parser_id_gtpu_first_ext_dw_0;
787 	u8 max_ft_level;
788 	u16 roce_min_src_udp;
789 	u8 num_esw_ports;
790 	u8 sw_format_ver;
791 	bool eswitch_manager;
792 	bool rx_sw_owner;
793 	bool tx_sw_owner;
794 	bool fdb_sw_owner;
795 	u8 rx_sw_owner_v2:1;
796 	u8 tx_sw_owner_v2:1;
797 	u8 fdb_sw_owner_v2:1;
798 	u32 num_vports;
799 	struct mlx5dr_esw_caps esw_caps;
800 	struct mlx5dr_cmd_vport_cap *vports_caps;
801 	bool prio_tag_required;
802 	struct mlx5dr_roce_cap roce_caps;
803 	u8 isolate_vl_tc:1;
804 };
805 
806 enum mlx5dr_domain_nic_type {
807 	DR_DOMAIN_NIC_TYPE_RX,
808 	DR_DOMAIN_NIC_TYPE_TX,
809 };
810 
811 struct mlx5dr_domain_rx_tx {
812 	u64 drop_icm_addr;
813 	u64 default_icm_addr;
814 	enum mlx5dr_domain_nic_type type;
815 	struct mutex mutex; /* protect rx/tx domain */
816 };
817 
818 struct mlx5dr_domain_info {
819 	bool supp_sw_steering;
820 	u32 max_inline_size;
821 	u32 max_send_wr;
822 	u32 max_log_sw_icm_sz;
823 	u32 max_log_action_icm_sz;
824 	struct mlx5dr_domain_rx_tx rx;
825 	struct mlx5dr_domain_rx_tx tx;
826 	struct mlx5dr_cmd_caps caps;
827 };
828 
829 struct mlx5dr_domain_cache {
830 	struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
831 };
832 
833 struct mlx5dr_domain {
834 	struct mlx5dr_domain *peer_dmn;
835 	struct mlx5_core_dev *mdev;
836 	u32 pdn;
837 	struct mlx5_uars_page *uar;
838 	enum mlx5dr_domain_type type;
839 	refcount_t refcount;
840 	struct mlx5dr_icm_pool *ste_icm_pool;
841 	struct mlx5dr_icm_pool *action_icm_pool;
842 	struct mlx5dr_send_ring *send_ring;
843 	struct mlx5dr_domain_info info;
844 	struct mlx5dr_domain_cache cache;
845 	struct mlx5dr_ste_ctx *ste_ctx;
846 };
847 
848 struct mlx5dr_table_rx_tx {
849 	struct mlx5dr_ste_htbl *s_anchor;
850 	struct mlx5dr_domain_rx_tx *nic_dmn;
851 	u64 default_icm_addr;
852 };
853 
854 struct mlx5dr_table {
855 	struct mlx5dr_domain *dmn;
856 	struct mlx5dr_table_rx_tx rx;
857 	struct mlx5dr_table_rx_tx tx;
858 	u32 level;
859 	u32 table_type;
860 	u32 table_id;
861 	u32 flags;
862 	struct list_head matcher_list;
863 	struct mlx5dr_action *miss_action;
864 	refcount_t refcount;
865 };
866 
867 struct mlx5dr_matcher_rx_tx {
868 	struct mlx5dr_ste_htbl *s_htbl;
869 	struct mlx5dr_ste_htbl *e_anchor;
870 	struct mlx5dr_ste_build *ste_builder;
871 	struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
872 					       [DR_RULE_IPV_MAX]
873 					       [DR_RULE_MAX_STES];
874 	u8 num_of_builders;
875 	u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
876 	u64 default_icm_addr;
877 	struct mlx5dr_table_rx_tx *nic_tbl;
878 };
879 
880 struct mlx5dr_matcher {
881 	struct mlx5dr_table *tbl;
882 	struct mlx5dr_matcher_rx_tx rx;
883 	struct mlx5dr_matcher_rx_tx tx;
884 	struct list_head matcher_list;
885 	u32 prio;
886 	struct mlx5dr_match_param mask;
887 	u8 match_criteria;
888 	refcount_t refcount;
889 	struct mlx5dv_flow_matcher *dv_matcher;
890 };
891 
892 struct mlx5dr_ste_action_modify_field {
893 	u16 hw_field;
894 	u8 start;
895 	u8 end;
896 	u8 l3_type;
897 	u8 l4_type;
898 };
899 
900 struct mlx5dr_action_rewrite {
901 	struct mlx5dr_domain *dmn;
902 	struct mlx5dr_icm_chunk *chunk;
903 	u8 *data;
904 	u16 num_of_actions;
905 	u32 index;
906 	u8 allow_rx:1;
907 	u8 allow_tx:1;
908 	u8 modify_ttl:1;
909 };
910 
911 struct mlx5dr_action_reformat {
912 	struct mlx5dr_domain *dmn;
913 	u32 id;
914 	u32 size;
915 	u8 param_0;
916 	u8 param_1;
917 };
918 
919 struct mlx5dr_action_sampler {
920 	struct mlx5dr_domain *dmn;
921 	u64 rx_icm_addr;
922 	u64 tx_icm_addr;
923 	u32 sampler_id;
924 };
925 
926 struct mlx5dr_action_dest_tbl {
927 	u8 is_fw_tbl:1;
928 	union {
929 		struct mlx5dr_table *tbl;
930 		struct {
931 			struct mlx5dr_domain *dmn;
932 			u32 id;
933 			u32 group_id;
934 			enum fs_flow_table_type type;
935 			u64 rx_icm_addr;
936 			u64 tx_icm_addr;
937 			struct mlx5dr_action **ref_actions;
938 			u32 num_of_ref_actions;
939 		} fw_tbl;
940 	};
941 };
942 
943 struct mlx5dr_action_ctr {
944 	u32 ctr_id;
945 	u32 offeset;
946 };
947 
948 struct mlx5dr_action_vport {
949 	struct mlx5dr_domain *dmn;
950 	struct mlx5dr_cmd_vport_cap *caps;
951 };
952 
953 struct mlx5dr_action_push_vlan {
954 	u32 vlan_hdr; /* tpid_pcp_dei_vid */
955 };
956 
957 struct mlx5dr_action_flow_tag {
958 	u32 flow_tag;
959 };
960 
961 struct mlx5dr_action {
962 	enum mlx5dr_action_type action_type;
963 	refcount_t refcount;
964 
965 	union {
966 		void *data;
967 		struct mlx5dr_action_rewrite *rewrite;
968 		struct mlx5dr_action_reformat *reformat;
969 		struct mlx5dr_action_sampler *sampler;
970 		struct mlx5dr_action_dest_tbl *dest_tbl;
971 		struct mlx5dr_action_ctr *ctr;
972 		struct mlx5dr_action_vport *vport;
973 		struct mlx5dr_action_push_vlan *push_vlan;
974 		struct mlx5dr_action_flow_tag *flow_tag;
975 	};
976 };
977 
978 enum mlx5dr_connect_type {
979 	CONNECT_HIT	= 1,
980 	CONNECT_MISS	= 2,
981 };
982 
983 struct mlx5dr_htbl_connect_info {
984 	enum mlx5dr_connect_type type;
985 	union {
986 		struct mlx5dr_ste_htbl *hit_next_htbl;
987 		u64 miss_icm_addr;
988 	};
989 };
990 
991 struct mlx5dr_rule_rx_tx {
992 	struct mlx5dr_matcher_rx_tx *nic_matcher;
993 	struct mlx5dr_ste *last_rule_ste;
994 };
995 
996 struct mlx5dr_rule {
997 	struct mlx5dr_matcher *matcher;
998 	struct mlx5dr_rule_rx_tx rx;
999 	struct mlx5dr_rule_rx_tx tx;
1000 	struct list_head rule_actions_list;
1001 	u32 flow_source;
1002 };
1003 
1004 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
1005 				 struct mlx5dr_ste *ste,
1006 				 bool force);
1007 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
1008 					 struct mlx5dr_ste *curr_ste,
1009 					 int *num_of_stes);
1010 
1011 struct mlx5dr_icm_chunk {
1012 	struct mlx5dr_icm_buddy_mem *buddy_mem;
1013 	struct list_head chunk_list;
1014 	u32 rkey;
1015 	u32 num_of_entries;
1016 	u32 byte_size;
1017 	u64 icm_addr;
1018 	u64 mr_addr;
1019 
1020 	/* indicates the index of this chunk in the whole memory,
1021 	 * used for deleting the chunk from the buddy
1022 	 */
1023 	unsigned int seg;
1024 
1025 	/* Memory optimisation */
1026 	struct mlx5dr_ste *ste_arr;
1027 	u8 *hw_ste_arr;
1028 	struct list_head *miss_list;
1029 };
1030 
1031 static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
1032 {
1033 	mutex_lock(&nic_dmn->mutex);
1034 }
1035 
1036 static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
1037 {
1038 	mutex_unlock(&nic_dmn->mutex);
1039 }
1040 
1041 static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
1042 {
1043 	mlx5dr_domain_nic_lock(&dmn->info.rx);
1044 	mlx5dr_domain_nic_lock(&dmn->info.tx);
1045 }
1046 
1047 static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
1048 {
1049 	mlx5dr_domain_nic_unlock(&dmn->info.tx);
1050 	mlx5dr_domain_nic_unlock(&dmn->info.rx);
1051 }
1052 
1053 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
1054 				   struct mlx5dr_matcher_rx_tx *nic_matcher,
1055 				   enum mlx5dr_ipv outer_ipv,
1056 				   enum mlx5dr_ipv inner_ipv);
1057 
1058 static inline int
1059 mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
1060 {
1061 	if (icm_type == DR_ICM_TYPE_STE)
1062 		return DR_STE_SIZE;
1063 
1064 	return DR_MODIFY_ACTION_SIZE;
1065 }
1066 
1067 static inline u32
1068 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
1069 {
1070 	return 1 << chunk_size;
1071 }
1072 
1073 static inline int
1074 mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
1075 				   enum mlx5dr_icm_type icm_type)
1076 {
1077 	int num_of_entries;
1078 	int entry_size;
1079 
1080 	entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type);
1081 	num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
1082 
1083 	return entry_size * num_of_entries;
1084 }
1085 
1086 static inline int
1087 mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
1088 {
1089 	int num_of_entries =
1090 		mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
1091 
1092 	/* Threshold is 50%, one is added to table of size 1 */
1093 	return (num_of_entries + 1) / 2;
1094 }
1095 
1096 static inline bool
1097 mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
1098 {
1099 	if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
1100 		return false;
1101 
1102 	return true;
1103 }
1104 
1105 static inline struct mlx5dr_cmd_vport_cap *
1106 mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
1107 {
1108 	if (!caps->vports_caps ||
1109 	    (vport >= caps->num_vports && vport != WIRE_PORT))
1110 		return NULL;
1111 
1112 	if (vport == WIRE_PORT)
1113 		vport = caps->num_vports;
1114 
1115 	return &caps->vports_caps[vport];
1116 }
1117 
1118 struct mlx5dr_cmd_query_flow_table_details {
1119 	u8 status;
1120 	u8 level;
1121 	u64 sw_owner_icm_root_1;
1122 	u64 sw_owner_icm_root_0;
1123 };
1124 
1125 struct mlx5dr_cmd_create_flow_table_attr {
1126 	u32 table_type;
1127 	u64 icm_addr_rx;
1128 	u64 icm_addr_tx;
1129 	u8 level;
1130 	bool sw_owner;
1131 	bool term_tbl;
1132 	bool decap_en;
1133 	bool reformat_en;
1134 };
1135 
1136 /* internal API functions */
1137 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
1138 			    struct mlx5dr_cmd_caps *caps);
1139 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
1140 				       bool other_vport, u16 vport_number,
1141 				       u64 *icm_address_rx,
1142 				       u64 *icm_address_tx);
1143 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
1144 			  bool other_vport, u16 vport_number, u16 *gvmi);
1145 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
1146 			      struct mlx5dr_esw_caps *caps);
1147 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
1148 				  u32 sampler_id,
1149 				  u64 *rx_icm_addr,
1150 				  u64 *tx_icm_addr);
1151 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
1152 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
1153 					u32 table_type,
1154 					u32 table_id,
1155 					u32 group_id,
1156 					u32 modify_header_id,
1157 					u32 vport_id);
1158 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
1159 				    u32 table_type,
1160 				    u32 table_id);
1161 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
1162 				   u32 table_type,
1163 				   u8 num_of_actions,
1164 				   u64 *actions,
1165 				   u32 *modify_header_id);
1166 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
1167 				     u32 modify_header_id);
1168 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
1169 				       u32 table_type,
1170 				       u32 table_id,
1171 				       u32 *group_id);
1172 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
1173 				  u32 table_type,
1174 				  u32 table_id,
1175 				  u32 group_id);
1176 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
1177 				 struct mlx5dr_cmd_create_flow_table_attr *attr,
1178 				 u64 *fdb_rx_icm_addr,
1179 				 u32 *table_id);
1180 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
1181 				  u32 table_id,
1182 				  u32 table_type);
1183 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
1184 				enum fs_flow_table_type type,
1185 				u32 table_id,
1186 				struct mlx5dr_cmd_query_flow_table_details *output);
1187 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
1188 				   enum mlx5_reformat_ctx_type rt,
1189 				   u8 reformat_param_0,
1190 				   u8 reformat_param_1,
1191 				   size_t reformat_size,
1192 				   void *reformat_data,
1193 				   u32 *reformat_id);
1194 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
1195 				     u32 reformat_id);
1196 
1197 struct mlx5dr_cmd_gid_attr {
1198 	u8 gid[16];
1199 	u8 mac[6];
1200 	u32 roce_ver;
1201 };
1202 
1203 struct mlx5dr_cmd_qp_create_attr {
1204 	u32 page_id;
1205 	u32 pdn;
1206 	u32 cqn;
1207 	u32 pm_state;
1208 	u32 service_type;
1209 	u32 buff_umem_id;
1210 	u32 db_umem_id;
1211 	u32 sq_wqe_cnt;
1212 	u32 rq_wqe_cnt;
1213 	u32 rq_wqe_shift;
1214 	u8 isolate_vl_tc:1;
1215 };
1216 
1217 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
1218 			 u16 index, struct mlx5dr_cmd_gid_attr *attr);
1219 
1220 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
1221 					       enum mlx5dr_icm_type icm_type);
1222 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
1223 
1224 struct mlx5dr_icm_chunk *
1225 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
1226 		       enum mlx5dr_icm_chunk_size chunk_size);
1227 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
1228 
1229 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
1230 				     u8 *hw_ste_p, u32 ste_size);
1231 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
1232 				      struct mlx5dr_domain_rx_tx *nic_dmn,
1233 				      struct mlx5dr_ste_htbl *htbl,
1234 				      struct mlx5dr_htbl_connect_info *connect_info,
1235 				      bool update_hw_ste);
1236 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
1237 				  u16 gvmi,
1238 				  enum mlx5dr_domain_nic_type nic_type,
1239 				  struct mlx5dr_ste_htbl *htbl,
1240 				  u8 *formatted_ste,
1241 				  struct mlx5dr_htbl_connect_info *connect_info);
1242 void mlx5dr_ste_copy_param(u8 match_criteria,
1243 			   struct mlx5dr_match_param *set_param,
1244 			   struct mlx5dr_match_parameters *mask);
1245 
1246 struct mlx5dr_qp {
1247 	struct mlx5_core_dev *mdev;
1248 	struct mlx5_wq_qp wq;
1249 	struct mlx5_uars_page *uar;
1250 	struct mlx5_wq_ctrl wq_ctrl;
1251 	u32 qpn;
1252 	struct {
1253 		unsigned int pc;
1254 		unsigned int cc;
1255 		unsigned int size;
1256 		unsigned int *wqe_head;
1257 		unsigned int wqe_cnt;
1258 	} sq;
1259 	struct {
1260 		unsigned int pc;
1261 		unsigned int cc;
1262 		unsigned int size;
1263 		unsigned int wqe_cnt;
1264 	} rq;
1265 	int max_inline_data;
1266 };
1267 
1268 struct mlx5dr_cq {
1269 	struct mlx5_core_dev *mdev;
1270 	struct mlx5_cqwq wq;
1271 	struct mlx5_wq_ctrl wq_ctrl;
1272 	struct mlx5_core_cq mcq;
1273 	struct mlx5dr_qp *qp;
1274 };
1275 
1276 struct mlx5dr_mr {
1277 	struct mlx5_core_dev *mdev;
1278 	struct mlx5_core_mkey mkey;
1279 	dma_addr_t dma_addr;
1280 	void *addr;
1281 	size_t size;
1282 };
1283 
1284 #define MAX_SEND_CQE		64
1285 #define MIN_READ_SYNC		64
1286 
1287 struct mlx5dr_send_ring {
1288 	struct mlx5dr_cq *cq;
1289 	struct mlx5dr_qp *qp;
1290 	struct mlx5dr_mr *mr;
1291 	/* How much wqes are waiting for completion */
1292 	u32 pending_wqe;
1293 	/* Signal request per this trash hold value */
1294 	u16 signal_th;
1295 	/* Each post_send_size less than max_post_send_size */
1296 	u32 max_post_send_size;
1297 	/* manage the send queue */
1298 	u32 tx_head;
1299 	void *buf;
1300 	u32 buf_size;
1301 	u8 sync_buff[MIN_READ_SYNC];
1302 	struct mlx5dr_mr *sync_mr;
1303 	spinlock_t lock; /* Protect the data path of the send ring */
1304 	bool err_state; /* send_ring is not usable in err state */
1305 };
1306 
1307 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
1308 void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1309 			   struct mlx5dr_send_ring *send_ring);
1310 int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
1311 int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
1312 			     struct mlx5dr_ste *ste,
1313 			     u8 *data,
1314 			     u16 size,
1315 			     u16 offset);
1316 int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
1317 			      struct mlx5dr_ste_htbl *htbl,
1318 			      u8 *formatted_ste, u8 *mask);
1319 int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
1320 					struct mlx5dr_ste_htbl *htbl,
1321 					u8 *ste_init_data,
1322 					bool update_hw_ste);
1323 int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
1324 				struct mlx5dr_action *action);
1325 
1326 struct mlx5dr_cmd_ft_info {
1327 	u32 id;
1328 	u16 vport;
1329 	enum fs_flow_table_type type;
1330 };
1331 
1332 struct mlx5dr_cmd_flow_destination_hw_info {
1333 	enum mlx5_flow_destination_type type;
1334 	union {
1335 		u32 tir_num;
1336 		u32 ft_num;
1337 		u32 ft_id;
1338 		u32 counter_id;
1339 		u32 sampler_id;
1340 		struct {
1341 			u16 num;
1342 			u16 vhca_id;
1343 			u32 reformat_id;
1344 			u8 flags;
1345 		} vport;
1346 	};
1347 };
1348 
1349 struct mlx5dr_cmd_fte_info {
1350 	u32 dests_size;
1351 	u32 index;
1352 	struct mlx5_flow_context flow_context;
1353 	u32 *val;
1354 	struct mlx5_flow_act action;
1355 	struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
1356 	bool ignore_flow_level;
1357 };
1358 
1359 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
1360 		       int opmod, int modify_mask,
1361 		       struct mlx5dr_cmd_ft_info *ft,
1362 		       u32 group_id,
1363 		       struct mlx5dr_cmd_fte_info *fte);
1364 
1365 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps);
1366 
1367 struct mlx5dr_fw_recalc_cs_ft {
1368 	u64 rx_icm_addr;
1369 	u32 table_id;
1370 	u32 group_id;
1371 	u32 modify_hdr_id;
1372 };
1373 
1374 struct mlx5dr_fw_recalc_cs_ft *
1375 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
1376 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
1377 				    struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
1378 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
1379 					      u32 vport_num,
1380 					      u64 *rx_icm_addr);
1381 int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
1382 			    struct mlx5dr_cmd_flow_destination_hw_info *dest,
1383 			    int num_dest,
1384 			    bool reformat_req,
1385 			    u32 *tbl_id,
1386 			    u32 *group_id,
1387 			    bool ignore_flow_level);
1388 void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
1389 			      u32 group_id);
1390 #endif  /* _DR_TYPES_H_ */
1391