1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2019, Mellanox Technologies */
3 
4 #ifndef	_DR_TYPES_
5 #define	_DR_TYPES_
6 
7 #include <linux/mlx5/driver.h>
8 #include <linux/refcount.h>
9 #include "fs_core.h"
10 #include "wq.h"
11 #include "lib/mlx5.h"
12 #include "mlx5_ifc_dr.h"
13 #include "mlx5dr.h"
14 
15 #define DR_RULE_MAX_STES 17
16 #define DR_ACTION_MAX_STES 5
17 #define WIRE_PORT 0xFFFF
18 #define DR_STE_SVLAN 0x1
19 #define DR_STE_CVLAN 0x2
20 
21 #define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
22 #define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
23 #define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
24 
25 enum mlx5dr_icm_chunk_size {
26 	DR_CHUNK_SIZE_1,
27 	DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
28 	DR_CHUNK_SIZE_2,
29 	DR_CHUNK_SIZE_4,
30 	DR_CHUNK_SIZE_8,
31 	DR_CHUNK_SIZE_16,
32 	DR_CHUNK_SIZE_32,
33 	DR_CHUNK_SIZE_64,
34 	DR_CHUNK_SIZE_128,
35 	DR_CHUNK_SIZE_256,
36 	DR_CHUNK_SIZE_512,
37 	DR_CHUNK_SIZE_1K,
38 	DR_CHUNK_SIZE_2K,
39 	DR_CHUNK_SIZE_4K,
40 	DR_CHUNK_SIZE_8K,
41 	DR_CHUNK_SIZE_16K,
42 	DR_CHUNK_SIZE_32K,
43 	DR_CHUNK_SIZE_64K,
44 	DR_CHUNK_SIZE_128K,
45 	DR_CHUNK_SIZE_256K,
46 	DR_CHUNK_SIZE_512K,
47 	DR_CHUNK_SIZE_1024K,
48 	DR_CHUNK_SIZE_2048K,
49 	DR_CHUNK_SIZE_MAX,
50 };
51 
52 enum mlx5dr_icm_type {
53 	DR_ICM_TYPE_STE,
54 	DR_ICM_TYPE_MODIFY_ACTION,
55 };
56 
57 static inline enum mlx5dr_icm_chunk_size
58 mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
59 {
60 	chunk += 2;
61 	if (chunk < DR_CHUNK_SIZE_MAX)
62 		return chunk;
63 
64 	return DR_CHUNK_SIZE_MAX;
65 }
66 
67 enum {
68 	DR_STE_SIZE = 64,
69 	DR_STE_SIZE_CTRL = 32,
70 	DR_STE_SIZE_TAG = 16,
71 	DR_STE_SIZE_MASK = 16,
72 };
73 
74 enum {
75 	DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
76 };
77 
78 enum {
79 	DR_MODIFY_ACTION_SIZE = 8,
80 };
81 
82 enum mlx5dr_matcher_criteria {
83 	DR_MATCHER_CRITERIA_EMPTY = 0,
84 	DR_MATCHER_CRITERIA_OUTER = 1 << 0,
85 	DR_MATCHER_CRITERIA_MISC = 1 << 1,
86 	DR_MATCHER_CRITERIA_INNER = 1 << 2,
87 	DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
88 	DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
89 	DR_MATCHER_CRITERIA_MAX = 1 << 5,
90 };
91 
92 enum mlx5dr_action_type {
93 	DR_ACTION_TYP_TNL_L2_TO_L2,
94 	DR_ACTION_TYP_L2_TO_TNL_L2,
95 	DR_ACTION_TYP_TNL_L3_TO_L2,
96 	DR_ACTION_TYP_L2_TO_TNL_L3,
97 	DR_ACTION_TYP_DROP,
98 	DR_ACTION_TYP_QP,
99 	DR_ACTION_TYP_FT,
100 	DR_ACTION_TYP_CTR,
101 	DR_ACTION_TYP_TAG,
102 	DR_ACTION_TYP_MODIFY_HDR,
103 	DR_ACTION_TYP_VPORT,
104 	DR_ACTION_TYP_POP_VLAN,
105 	DR_ACTION_TYP_PUSH_VLAN,
106 	DR_ACTION_TYP_MAX,
107 };
108 
109 struct mlx5dr_icm_pool;
110 struct mlx5dr_icm_chunk;
111 struct mlx5dr_icm_bucket;
112 struct mlx5dr_ste_htbl;
113 struct mlx5dr_match_param;
114 struct mlx5dr_cmd_caps;
115 struct mlx5dr_matcher_rx_tx;
116 
117 struct mlx5dr_ste {
118 	u8 *hw_ste;
119 	/* refcount: indicates the num of rules that using this ste */
120 	refcount_t refcount;
121 
122 	/* attached to the miss_list head at each htbl entry */
123 	struct list_head miss_list_node;
124 
125 	/* each rule member that uses this ste attached here */
126 	struct list_head rule_list;
127 
128 	/* this ste is member of htbl */
129 	struct mlx5dr_ste_htbl *htbl;
130 
131 	struct mlx5dr_ste_htbl *next_htbl;
132 
133 	/* this ste is part of a rule, located in ste's chain */
134 	u8 ste_chain_location;
135 };
136 
137 struct mlx5dr_ste_htbl_ctrl {
138 	/* total number of valid entries belonging to this hash table. This
139 	 * includes the non collision and collision entries
140 	 */
141 	unsigned int num_of_valid_entries;
142 
143 	/* total number of collisions entries attached to this table */
144 	unsigned int num_of_collisions;
145 	unsigned int increase_threshold;
146 	u8 may_grow:1;
147 };
148 
149 struct mlx5dr_ste_htbl {
150 	u8 lu_type;
151 	u16 byte_mask;
152 	refcount_t refcount;
153 	struct mlx5dr_icm_chunk *chunk;
154 	struct mlx5dr_ste *ste_arr;
155 	u8 *hw_ste_arr;
156 
157 	struct list_head *miss_list;
158 
159 	enum mlx5dr_icm_chunk_size chunk_size;
160 	struct mlx5dr_ste *pointing_ste;
161 
162 	struct mlx5dr_ste_htbl_ctrl ctrl;
163 };
164 
165 struct mlx5dr_ste_send_info {
166 	struct mlx5dr_ste *ste;
167 	struct list_head send_list;
168 	u16 size;
169 	u16 offset;
170 	u8 data_cont[DR_STE_SIZE];
171 	u8 *data;
172 };
173 
174 void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
175 					       u16 offset, u8 *data,
176 					       struct mlx5dr_ste_send_info *ste_info,
177 					       struct list_head *send_list,
178 					       bool copy_data);
179 
180 struct mlx5dr_ste_build {
181 	u8 inner:1;
182 	u8 rx:1;
183 	u8 vhca_id_valid:1;
184 	struct mlx5dr_domain *dmn;
185 	struct mlx5dr_cmd_caps *caps;
186 	u8 lu_type;
187 	u16 byte_mask;
188 	u8 bit_mask[DR_STE_SIZE_MASK];
189 	int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
190 				  struct mlx5dr_ste_build *sb,
191 				  u8 *hw_ste_p);
192 };
193 
194 struct mlx5dr_ste_htbl *
195 mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
196 		      enum mlx5dr_icm_chunk_size chunk_size,
197 		      u8 lu_type, u16 byte_mask);
198 
199 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
200 
201 static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
202 {
203 	if (refcount_dec_and_test(&htbl->refcount))
204 		mlx5dr_ste_htbl_free(htbl);
205 }
206 
207 static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
208 {
209 	refcount_inc(&htbl->refcount);
210 }
211 
212 /* STE utils */
213 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
214 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
215 void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
216 				struct mlx5dr_ste_htbl *next_htbl);
217 void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
218 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
219 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
220 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
221 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
222 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
223 bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
224 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
225 				u8 ste_location);
226 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
227 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
228 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
229 			     int size, bool encap_l3);
230 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
231 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
232 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
233 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
234 				 bool go_back);
235 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
236 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
237 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
238 				    u32 re_write_index);
239 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
240 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
241 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
242 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
243 
244 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
245 		     struct mlx5dr_matcher *matcher,
246 		     struct mlx5dr_matcher_rx_tx *nic_matcher);
247 static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
248 				  struct mlx5dr_matcher *matcher,
249 				  struct mlx5dr_matcher_rx_tx *nic_matcher)
250 {
251 	if (refcount_dec_and_test(&ste->refcount))
252 		mlx5dr_ste_free(ste, matcher, nic_matcher);
253 }
254 
255 /* initial as 0, increased only when ste appears in a new rule */
256 static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
257 {
258 	refcount_inc(&ste->refcount);
259 }
260 
261 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
262 					  struct mlx5dr_ste_htbl *next_htbl);
263 bool mlx5dr_ste_equal_tag(void *src, void *dst);
264 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
265 				struct mlx5dr_matcher_rx_tx *nic_matcher,
266 				struct mlx5dr_ste *ste,
267 				u8 *cur_hw_ste,
268 				enum mlx5dr_icm_chunk_size log_table_size);
269 
270 /* STE build functions */
271 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
272 			       u8 match_criteria,
273 			       struct mlx5dr_match_param *mask,
274 			       struct mlx5dr_match_param *value);
275 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
276 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
277 			     struct mlx5dr_match_param *value,
278 			     u8 *ste_arr);
279 int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
280 				    struct mlx5dr_match_param *mask,
281 				    bool inner, bool rx);
282 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
283 					  struct mlx5dr_match_param *mask,
284 					  bool inner, bool rx);
285 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
286 				       struct mlx5dr_match_param *mask,
287 				       bool inner, bool rx);
288 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
289 				      struct mlx5dr_match_param *mask,
290 				      bool inner, bool rx);
291 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
292 				      struct mlx5dr_match_param *mask,
293 				      bool inner, bool rx);
294 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
295 				 struct mlx5dr_match_param *mask,
296 				 bool inner, bool rx);
297 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
298 				 struct mlx5dr_match_param *mask,
299 				 bool inner, bool rx);
300 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
301 				 struct mlx5dr_match_param *mask,
302 				 bool inner, bool rx);
303 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
304 				 struct mlx5dr_match_param *mask,
305 				 bool inner, bool rx);
306 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
307 				  struct mlx5dr_match_param *mask,
308 				  bool inner, bool rx);
309 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
310 			  struct mlx5dr_match_param *mask,
311 			  bool inner, bool rx);
312 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
313 			   struct mlx5dr_match_param *mask,
314 			   bool inner, bool rx);
315 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
316 				    struct mlx5dr_match_param *mask,
317 				    bool inner, bool rx);
318 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
319 				   struct mlx5dr_match_param *mask,
320 				   struct mlx5dr_cmd_caps *caps,
321 				   bool inner, bool rx);
322 void mlx5dr_ste_build_flex_parser_tnl(struct mlx5dr_ste_build *sb,
323 				      struct mlx5dr_match_param *mask,
324 				      bool inner, bool rx);
325 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
326 				      struct mlx5dr_match_param *mask,
327 				      bool inner, bool rx);
328 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
329 				 struct mlx5dr_match_param *mask,
330 				 bool inner, bool rx);
331 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
332 				 struct mlx5dr_match_param *mask,
333 				 bool inner, bool rx);
334 int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
335 				  struct mlx5dr_match_param *mask,
336 				  struct mlx5dr_domain *dmn,
337 				  bool inner, bool rx);
338 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
339 
340 /* Actions utils */
341 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
342 				 struct mlx5dr_matcher_rx_tx *nic_matcher,
343 				 struct mlx5dr_action *actions[],
344 				 u32 num_actions,
345 				 u8 *ste_arr,
346 				 u32 *new_hw_ste_arr_sz);
347 
348 struct mlx5dr_match_spec {
349 	u32 smac_47_16;		/* Source MAC address of incoming packet */
350 	/* Incoming packet Ethertype - this is the Ethertype
351 	 * following the last VLAN tag of the packet
352 	 */
353 	u32 ethertype:16;
354 	u32 smac_15_0:16;	/* Source MAC address of incoming packet */
355 	u32 dmac_47_16;		/* Destination MAC address of incoming packet */
356 	/* VLAN ID of first VLAN tag in the incoming packet.
357 	 * Valid only when cvlan_tag==1 or svlan_tag==1
358 	 */
359 	u32 first_vid:12;
360 	/* CFI bit of first VLAN tag in the incoming packet.
361 	 * Valid only when cvlan_tag==1 or svlan_tag==1
362 	 */
363 	u32 first_cfi:1;
364 	/* Priority of first VLAN tag in the incoming packet.
365 	 * Valid only when cvlan_tag==1 or svlan_tag==1
366 	 */
367 	u32 first_prio:3;
368 	u32 dmac_15_0:16;	/* Destination MAC address of incoming packet */
369 	/* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
370 	 *             Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
371 	 */
372 	u32 tcp_flags:9;
373 	u32 ip_version:4;	/* IP version */
374 	u32 frag:1;		/* Packet is an IP fragment */
375 	/* The first vlan in the packet is s-vlan (0x8a88).
376 	 * cvlan_tag and svlan_tag cannot be set together
377 	 */
378 	u32 svlan_tag:1;
379 	/* The first vlan in the packet is c-vlan (0x8100).
380 	 * cvlan_tag and svlan_tag cannot be set together
381 	 */
382 	u32 cvlan_tag:1;
383 	/* Explicit Congestion Notification derived from
384 	 * Traffic Class/TOS field of IPv6/v4
385 	 */
386 	u32 ip_ecn:2;
387 	/* Differentiated Services Code Point derived from
388 	 * Traffic Class/TOS field of IPv6/v4
389 	 */
390 	u32 ip_dscp:6;
391 	u32 ip_protocol:8;	/* IP protocol */
392 	/* TCP destination port.
393 	 * tcp and udp sport/dport are mutually exclusive
394 	 */
395 	u32 tcp_dport:16;
396 	/* TCP source port.;tcp and udp sport/dport are mutually exclusive */
397 	u32 tcp_sport:16;
398 	u32 ttl_hoplimit:8;
399 	u32 reserved:24;
400 	/* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
401 	u32 udp_dport:16;
402 	/* UDP source port.;tcp and udp sport/dport are mutually exclusive */
403 	u32 udp_sport:16;
404 	/* IPv6 source address of incoming packets
405 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
406 	 * This field should be qualified by an appropriate ethertype
407 	 */
408 	u32 src_ip_127_96;
409 	/* IPv6 source address of incoming packets
410 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
411 	 * This field should be qualified by an appropriate ethertype
412 	 */
413 	u32 src_ip_95_64;
414 	/* IPv6 source address of incoming packets
415 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
416 	 * This field should be qualified by an appropriate ethertype
417 	 */
418 	u32 src_ip_63_32;
419 	/* IPv6 source address of incoming packets
420 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
421 	 * This field should be qualified by an appropriate ethertype
422 	 */
423 	u32 src_ip_31_0;
424 	/* IPv6 destination address of incoming packets
425 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
426 	 * This field should be qualified by an appropriate ethertype
427 	 */
428 	u32 dst_ip_127_96;
429 	/* IPv6 destination address of incoming packets
430 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
431 	 * This field should be qualified by an appropriate ethertype
432 	 */
433 	u32 dst_ip_95_64;
434 	/* IPv6 destination address of incoming packets
435 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
436 	 * This field should be qualified by an appropriate ethertype
437 	 */
438 	u32 dst_ip_63_32;
439 	/* IPv6 destination address of incoming packets
440 	 * For IPv4 address use bits 31:0 (rest of the bits are reserved)
441 	 * This field should be qualified by an appropriate ethertype
442 	 */
443 	u32 dst_ip_31_0;
444 };
445 
446 struct mlx5dr_match_misc {
447 	u32 source_sqn:24;		/* Source SQN */
448 	u32 source_vhca_port:4;
449 	/* used with GRE, sequence number exist when gre_s_present == 1 */
450 	u32 gre_s_present:1;
451 	/* used with GRE, key exist when gre_k_present == 1 */
452 	u32 gre_k_present:1;
453 	u32 reserved_auto1:1;
454 	/* used with GRE, checksum exist when gre_c_present == 1 */
455 	u32 gre_c_present:1;
456 	/* Source port.;0xffff determines wire port */
457 	u32 source_port:16;
458 	u32 source_eswitch_owner_vhca_id:16;
459 	/* VLAN ID of first VLAN tag the inner header of the incoming packet.
460 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
461 	 */
462 	u32 inner_second_vid:12;
463 	/* CFI bit of first VLAN tag in the inner header of the incoming packet.
464 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
465 	 */
466 	u32 inner_second_cfi:1;
467 	/* Priority of second VLAN tag in the inner header of the incoming packet.
468 	 * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
469 	 */
470 	u32 inner_second_prio:3;
471 	/* VLAN ID of first VLAN tag the outer header of the incoming packet.
472 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
473 	 */
474 	u32 outer_second_vid:12;
475 	/* CFI bit of first VLAN tag in the outer header of the incoming packet.
476 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
477 	 */
478 	u32 outer_second_cfi:1;
479 	/* Priority of second VLAN tag in the outer header of the incoming packet.
480 	 * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
481 	 */
482 	u32 outer_second_prio:3;
483 	u32 gre_protocol:16;		/* GRE Protocol (outer) */
484 	u32 reserved_auto3:12;
485 	/* The second vlan in the inner header of the packet is s-vlan (0x8a88).
486 	 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
487 	 */
488 	u32 inner_second_svlan_tag:1;
489 	/* The second vlan in the outer header of the packet is s-vlan (0x8a88).
490 	 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
491 	 */
492 	u32 outer_second_svlan_tag:1;
493 	/* The second vlan in the inner header of the packet is c-vlan (0x8100).
494 	 * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
495 	 */
496 	u32 inner_second_cvlan_tag:1;
497 	/* The second vlan in the outer header of the packet is c-vlan (0x8100).
498 	 * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
499 	 */
500 	u32 outer_second_cvlan_tag:1;
501 	u32 gre_key_l:8;		/* GRE Key [7:0] (outer) */
502 	u32 gre_key_h:24;		/* GRE Key[31:8] (outer) */
503 	u32 reserved_auto4:8;
504 	u32 vxlan_vni:24;		/* VXLAN VNI (outer) */
505 	u32 geneve_oam:1;		/* GENEVE OAM field (outer) */
506 	u32 reserved_auto5:7;
507 	u32 geneve_vni:24;		/* GENEVE VNI field (outer) */
508 	u32 outer_ipv6_flow_label:20;	/* Flow label of incoming IPv6 packet (outer) */
509 	u32 reserved_auto6:12;
510 	u32 inner_ipv6_flow_label:20;	/* Flow label of incoming IPv6 packet (inner) */
511 	u32 reserved_auto7:12;
512 	u32 geneve_protocol_type:16;	/* GENEVE protocol type (outer) */
513 	u32 geneve_opt_len:6;		/* GENEVE OptLen (outer) */
514 	u32 reserved_auto8:10;
515 	u32 bth_dst_qp:24;		/* Destination QP in BTH header */
516 	u32 reserved_auto9:8;
517 	u8 reserved_auto10[20];
518 };
519 
520 struct mlx5dr_match_misc2 {
521 	u32 outer_first_mpls_ttl:8;		/* First MPLS TTL (outer) */
522 	u32 outer_first_mpls_s_bos:1;		/* First MPLS S_BOS (outer) */
523 	u32 outer_first_mpls_exp:3;		/* First MPLS EXP (outer) */
524 	u32 outer_first_mpls_label:20;		/* First MPLS LABEL (outer) */
525 	u32 inner_first_mpls_ttl:8;		/* First MPLS TTL (inner) */
526 	u32 inner_first_mpls_s_bos:1;		/* First MPLS S_BOS (inner) */
527 	u32 inner_first_mpls_exp:3;		/* First MPLS EXP (inner) */
528 	u32 inner_first_mpls_label:20;		/* First MPLS LABEL (inner) */
529 	u32 outer_first_mpls_over_gre_ttl:8;	/* last MPLS TTL (outer) */
530 	u32 outer_first_mpls_over_gre_s_bos:1;	/* last MPLS S_BOS (outer) */
531 	u32 outer_first_mpls_over_gre_exp:3;	/* last MPLS EXP (outer) */
532 	u32 outer_first_mpls_over_gre_label:20;	/* last MPLS LABEL (outer) */
533 	u32 outer_first_mpls_over_udp_ttl:8;	/* last MPLS TTL (outer) */
534 	u32 outer_first_mpls_over_udp_s_bos:1;	/* last MPLS S_BOS (outer) */
535 	u32 outer_first_mpls_over_udp_exp:3;	/* last MPLS EXP (outer) */
536 	u32 outer_first_mpls_over_udp_label:20;	/* last MPLS LABEL (outer) */
537 	u32 metadata_reg_c_7;			/* metadata_reg_c_7 */
538 	u32 metadata_reg_c_6;			/* metadata_reg_c_6 */
539 	u32 metadata_reg_c_5;			/* metadata_reg_c_5 */
540 	u32 metadata_reg_c_4;			/* metadata_reg_c_4 */
541 	u32 metadata_reg_c_3;			/* metadata_reg_c_3 */
542 	u32 metadata_reg_c_2;			/* metadata_reg_c_2 */
543 	u32 metadata_reg_c_1;			/* metadata_reg_c_1 */
544 	u32 metadata_reg_c_0;			/* metadata_reg_c_0 */
545 	u32 metadata_reg_a;			/* metadata_reg_a */
546 	u32 metadata_reg_b;			/* metadata_reg_b */
547 	u8 reserved_auto2[8];
548 };
549 
550 struct mlx5dr_match_misc3 {
551 	u32 inner_tcp_seq_num;
552 	u32 outer_tcp_seq_num;
553 	u32 inner_tcp_ack_num;
554 	u32 outer_tcp_ack_num;
555 	u32 outer_vxlan_gpe_vni:24;
556 	u32 reserved_auto1:8;
557 	u32 reserved_auto2:16;
558 	u32 outer_vxlan_gpe_flags:8;
559 	u32 outer_vxlan_gpe_next_protocol:8;
560 	u32 icmpv4_header_data;
561 	u32 icmpv6_header_data;
562 	u32 icmpv6_code:8;
563 	u32 icmpv6_type:8;
564 	u32 icmpv4_code:8;
565 	u32 icmpv4_type:8;
566 	u8 reserved_auto3[0x1c];
567 };
568 
569 struct mlx5dr_match_param {
570 	struct mlx5dr_match_spec outer;
571 	struct mlx5dr_match_misc misc;
572 	struct mlx5dr_match_spec inner;
573 	struct mlx5dr_match_misc2 misc2;
574 	struct mlx5dr_match_misc3 misc3;
575 };
576 
577 #define DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
578 						   (_misc3)->icmpv4_code || \
579 						   (_misc3)->icmpv4_header_data)
580 
581 struct mlx5dr_esw_caps {
582 	u64 drop_icm_address_rx;
583 	u64 drop_icm_address_tx;
584 	u64 uplink_icm_address_rx;
585 	u64 uplink_icm_address_tx;
586 	bool sw_owner;
587 };
588 
589 struct mlx5dr_cmd_vport_cap {
590 	u16 vport_gvmi;
591 	u16 vhca_gvmi;
592 	u64 icm_address_rx;
593 	u64 icm_address_tx;
594 	u32 num;
595 };
596 
597 struct mlx5dr_cmd_caps {
598 	u16 gvmi;
599 	u64 nic_rx_drop_address;
600 	u64 nic_tx_drop_address;
601 	u64 nic_tx_allow_address;
602 	u64 esw_rx_drop_address;
603 	u64 esw_tx_drop_address;
604 	u32 log_icm_size;
605 	u64 hdr_modify_icm_addr;
606 	u32 flex_protocols;
607 	u8 flex_parser_id_icmp_dw0;
608 	u8 flex_parser_id_icmp_dw1;
609 	u8 flex_parser_id_icmpv6_dw0;
610 	u8 flex_parser_id_icmpv6_dw1;
611 	u8 max_ft_level;
612 	u16 roce_min_src_udp;
613 	u8 num_esw_ports;
614 	bool eswitch_manager;
615 	bool rx_sw_owner;
616 	bool tx_sw_owner;
617 	bool fdb_sw_owner;
618 	u32 num_vports;
619 	struct mlx5dr_esw_caps esw_caps;
620 	struct mlx5dr_cmd_vport_cap *vports_caps;
621 	bool prio_tag_required;
622 };
623 
624 struct mlx5dr_domain_rx_tx {
625 	u64 drop_icm_addr;
626 	u64 default_icm_addr;
627 	enum mlx5dr_ste_entry_type ste_type;
628 };
629 
630 struct mlx5dr_domain_info {
631 	bool supp_sw_steering;
632 	u32 max_inline_size;
633 	u32 max_send_wr;
634 	u32 max_log_sw_icm_sz;
635 	u32 max_log_action_icm_sz;
636 	struct mlx5dr_domain_rx_tx rx;
637 	struct mlx5dr_domain_rx_tx tx;
638 	struct mlx5dr_cmd_caps caps;
639 };
640 
641 struct mlx5dr_domain_cache {
642 	struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
643 };
644 
645 struct mlx5dr_domain {
646 	struct mlx5dr_domain *peer_dmn;
647 	struct mlx5_core_dev *mdev;
648 	u32 pdn;
649 	struct mlx5_uars_page *uar;
650 	enum mlx5dr_domain_type type;
651 	refcount_t refcount;
652 	struct mutex mutex; /* protect domain */
653 	struct mlx5dr_icm_pool *ste_icm_pool;
654 	struct mlx5dr_icm_pool *action_icm_pool;
655 	struct mlx5dr_send_ring *send_ring;
656 	struct mlx5dr_domain_info info;
657 	struct mlx5dr_domain_cache cache;
658 };
659 
660 struct mlx5dr_table_rx_tx {
661 	struct mlx5dr_ste_htbl *s_anchor;
662 	struct mlx5dr_domain_rx_tx *nic_dmn;
663 	u64 default_icm_addr;
664 };
665 
666 struct mlx5dr_table {
667 	struct mlx5dr_domain *dmn;
668 	struct mlx5dr_table_rx_tx rx;
669 	struct mlx5dr_table_rx_tx tx;
670 	u32 level;
671 	u32 table_type;
672 	u32 table_id;
673 	struct list_head matcher_list;
674 	struct mlx5dr_action *miss_action;
675 	refcount_t refcount;
676 };
677 
678 struct mlx5dr_matcher_rx_tx {
679 	struct mlx5dr_ste_htbl *s_htbl;
680 	struct mlx5dr_ste_htbl *e_anchor;
681 	struct mlx5dr_ste_build *ste_builder;
682 	struct mlx5dr_ste_build ste_builder4[DR_RULE_MAX_STES];
683 	struct mlx5dr_ste_build ste_builder6[DR_RULE_MAX_STES];
684 	u8 num_of_builders;
685 	u8 num_of_builders4;
686 	u8 num_of_builders6;
687 	u64 default_icm_addr;
688 	struct mlx5dr_table_rx_tx *nic_tbl;
689 };
690 
691 struct mlx5dr_matcher {
692 	struct mlx5dr_table *tbl;
693 	struct mlx5dr_matcher_rx_tx rx;
694 	struct mlx5dr_matcher_rx_tx tx;
695 	struct list_head matcher_list;
696 	u16 prio;
697 	struct mlx5dr_match_param mask;
698 	u8 match_criteria;
699 	refcount_t refcount;
700 	struct mlx5dv_flow_matcher *dv_matcher;
701 };
702 
703 struct mlx5dr_rule_member {
704 	struct mlx5dr_ste *ste;
705 	/* attached to mlx5dr_rule via this */
706 	struct list_head list;
707 	/* attached to mlx5dr_ste via this */
708 	struct list_head use_ste_list;
709 };
710 
711 struct mlx5dr_action {
712 	enum mlx5dr_action_type action_type;
713 	refcount_t refcount;
714 	union {
715 		struct {
716 			struct mlx5dr_domain *dmn;
717 			struct mlx5dr_icm_chunk *chunk;
718 			u8 *data;
719 			u32 data_size;
720 			u16 num_of_actions;
721 			u32 index;
722 			u8 allow_rx:1;
723 			u8 allow_tx:1;
724 			u8 modify_ttl:1;
725 		} rewrite;
726 		struct {
727 			struct mlx5dr_domain *dmn;
728 			u32 reformat_id;
729 			u32 reformat_size;
730 		} reformat;
731 		struct {
732 			u8 is_fw_tbl:1;
733 			union {
734 				struct mlx5dr_table *tbl;
735 				struct {
736 					struct mlx5_flow_table *ft;
737 					u64 rx_icm_addr;
738 					u64 tx_icm_addr;
739 					struct mlx5_core_dev *mdev;
740 				} fw_tbl;
741 			};
742 		} dest_tbl;
743 		struct {
744 			u32 ctr_id;
745 			u32 offeset;
746 		} ctr;
747 		struct {
748 			struct mlx5dr_domain *dmn;
749 			struct mlx5dr_cmd_vport_cap *caps;
750 		} vport;
751 		struct {
752 			u32 vlan_hdr; /* tpid_pcp_dei_vid */
753 		} push_vlan;
754 		u32 flow_tag;
755 	};
756 };
757 
758 enum mlx5dr_connect_type {
759 	CONNECT_HIT	= 1,
760 	CONNECT_MISS	= 2,
761 };
762 
763 struct mlx5dr_htbl_connect_info {
764 	enum mlx5dr_connect_type type;
765 	union {
766 		struct mlx5dr_ste_htbl *hit_next_htbl;
767 		u64 miss_icm_addr;
768 	};
769 };
770 
771 struct mlx5dr_rule_rx_tx {
772 	struct list_head rule_members_list;
773 	struct mlx5dr_matcher_rx_tx *nic_matcher;
774 };
775 
776 struct mlx5dr_rule {
777 	struct mlx5dr_matcher *matcher;
778 	struct mlx5dr_rule_rx_tx rx;
779 	struct mlx5dr_rule_rx_tx tx;
780 	struct list_head rule_actions_list;
781 };
782 
783 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
784 				    struct mlx5dr_ste *ste);
785 
786 struct mlx5dr_icm_chunk {
787 	struct mlx5dr_icm_bucket *bucket;
788 	struct list_head chunk_list;
789 	u32 rkey;
790 	u32 num_of_entries;
791 	u32 byte_size;
792 	u64 icm_addr;
793 	u64 mr_addr;
794 
795 	/* Memory optimisation */
796 	struct mlx5dr_ste *ste_arr;
797 	u8 *hw_ste_arr;
798 	struct list_head *miss_list;
799 };
800 
801 static inline int
802 mlx5dr_matcher_supp_flex_parser_icmp_v4(struct mlx5dr_cmd_caps *caps)
803 {
804 	return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
805 }
806 
807 static inline int
808 mlx5dr_matcher_supp_flex_parser_icmp_v6(struct mlx5dr_cmd_caps *caps)
809 {
810 	return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
811 }
812 
813 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
814 				   struct mlx5dr_matcher_rx_tx *nic_matcher,
815 				   bool ipv6);
816 
817 static inline u32
818 mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
819 {
820 	return 1 << chunk_size;
821 }
822 
823 static inline int
824 mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
825 				   enum mlx5dr_icm_type icm_type)
826 {
827 	int num_of_entries;
828 	int entry_size;
829 
830 	if (icm_type == DR_ICM_TYPE_STE)
831 		entry_size = DR_STE_SIZE;
832 	else
833 		entry_size = DR_MODIFY_ACTION_SIZE;
834 
835 	num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
836 
837 	return entry_size * num_of_entries;
838 }
839 
840 static inline struct mlx5dr_cmd_vport_cap *
841 mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
842 {
843 	if (!caps->vports_caps ||
844 	    (vport >= caps->num_vports && vport != WIRE_PORT))
845 		return NULL;
846 
847 	if (vport == WIRE_PORT)
848 		vport = caps->num_vports;
849 
850 	return &caps->vports_caps[vport];
851 }
852 
853 struct mlx5dr_cmd_query_flow_table_details {
854 	u8 status;
855 	u8 level;
856 	u64 sw_owner_icm_root_1;
857 	u64 sw_owner_icm_root_0;
858 };
859 
860 /* internal API functions */
861 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
862 			    struct mlx5dr_cmd_caps *caps);
863 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
864 				       bool other_vport, u16 vport_number,
865 				       u64 *icm_address_rx,
866 				       u64 *icm_address_tx);
867 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
868 			  bool other_vport, u16 vport_number, u16 *gvmi);
869 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
870 			      struct mlx5dr_esw_caps *caps);
871 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
872 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
873 					u32 table_type,
874 					u32 table_id,
875 					u32 group_id,
876 					u32 modify_header_id,
877 					u32 vport_id);
878 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
879 				    u32 table_type,
880 				    u32 table_id);
881 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
882 				   u32 table_type,
883 				   u8 num_of_actions,
884 				   u64 *actions,
885 				   u32 *modify_header_id);
886 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
887 				     u32 modify_header_id);
888 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
889 				       u32 table_type,
890 				       u32 table_id,
891 				       u32 *group_id);
892 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
893 				  u32 table_type,
894 				  u32 table_id,
895 				  u32 group_id);
896 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
897 				 u32 table_type,
898 				 u64 icm_addr_rx,
899 				 u64 icm_addr_tx,
900 				 u8 level,
901 				 bool sw_owner,
902 				 bool term_tbl,
903 				 u64 *fdb_rx_icm_addr,
904 				 u32 *table_id);
905 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
906 				  u32 table_id,
907 				  u32 table_type);
908 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
909 				enum fs_flow_table_type type,
910 				u32 table_id,
911 				struct mlx5dr_cmd_query_flow_table_details *output);
912 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
913 				   enum mlx5_reformat_ctx_type rt,
914 				   size_t reformat_size,
915 				   void *reformat_data,
916 				   u32 *reformat_id);
917 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
918 				     u32 reformat_id);
919 
920 struct mlx5dr_cmd_gid_attr {
921 	u8 gid[16];
922 	u8 mac[6];
923 	u32 roce_ver;
924 };
925 
926 struct mlx5dr_cmd_qp_create_attr {
927 	u32 page_id;
928 	u32 pdn;
929 	u32 cqn;
930 	u32 pm_state;
931 	u32 service_type;
932 	u32 buff_umem_id;
933 	u32 db_umem_id;
934 	u32 sq_wqe_cnt;
935 	u32 rq_wqe_cnt;
936 	u32 rq_wqe_shift;
937 };
938 
939 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
940 			 u16 index, struct mlx5dr_cmd_gid_attr *attr);
941 
942 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
943 					       enum mlx5dr_icm_type icm_type);
944 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
945 
946 struct mlx5dr_icm_chunk *
947 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
948 		       enum mlx5dr_icm_chunk_size chunk_size);
949 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
950 bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
951 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
952 				      struct mlx5dr_domain_rx_tx *nic_dmn,
953 				      struct mlx5dr_ste_htbl *htbl,
954 				      struct mlx5dr_htbl_connect_info *connect_info,
955 				      bool update_hw_ste);
956 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
957 				  struct mlx5dr_domain_rx_tx *nic_dmn,
958 				  struct mlx5dr_ste_htbl *htbl,
959 				  u8 *formatted_ste,
960 				  struct mlx5dr_htbl_connect_info *connect_info);
961 void mlx5dr_ste_copy_param(u8 match_criteria,
962 			   struct mlx5dr_match_param *set_param,
963 			   struct mlx5dr_match_parameters *mask);
964 
965 void mlx5dr_crc32_init_table(void);
966 u32 mlx5dr_crc32_slice8_calc(const void *input_data, size_t length);
967 
968 struct mlx5dr_qp {
969 	struct mlx5_core_dev *mdev;
970 	struct mlx5_wq_qp wq;
971 	struct mlx5_uars_page *uar;
972 	struct mlx5_wq_ctrl wq_ctrl;
973 	struct mlx5_core_qp mqp;
974 	struct {
975 		unsigned int pc;
976 		unsigned int cc;
977 		unsigned int size;
978 		unsigned int *wqe_head;
979 		unsigned int wqe_cnt;
980 	} sq;
981 	struct {
982 		unsigned int pc;
983 		unsigned int cc;
984 		unsigned int size;
985 		unsigned int wqe_cnt;
986 	} rq;
987 	int max_inline_data;
988 };
989 
990 struct mlx5dr_cq {
991 	struct mlx5_core_dev *mdev;
992 	struct mlx5_cqwq wq;
993 	struct mlx5_wq_ctrl wq_ctrl;
994 	struct mlx5_core_cq mcq;
995 	struct mlx5dr_qp *qp;
996 };
997 
998 struct mlx5dr_mr {
999 	struct mlx5_core_dev *mdev;
1000 	struct mlx5_core_mkey mkey;
1001 	dma_addr_t dma_addr;
1002 	void *addr;
1003 	size_t size;
1004 };
1005 
1006 #define MAX_SEND_CQE		64
1007 #define MIN_READ_SYNC		64
1008 
1009 struct mlx5dr_send_ring {
1010 	struct mlx5dr_cq *cq;
1011 	struct mlx5dr_qp *qp;
1012 	struct mlx5dr_mr *mr;
1013 	/* How much wqes are waiting for completion */
1014 	u32 pending_wqe;
1015 	/* Signal request per this trash hold value */
1016 	u16 signal_th;
1017 	/* Each post_send_size less than max_post_send_size */
1018 	u32 max_post_send_size;
1019 	/* manage the send queue */
1020 	u32 tx_head;
1021 	void *buf;
1022 	u32 buf_size;
1023 	struct ib_wc wc[MAX_SEND_CQE];
1024 	u8 sync_buff[MIN_READ_SYNC];
1025 	struct mlx5dr_mr *sync_mr;
1026 };
1027 
1028 int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
1029 void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
1030 			   struct mlx5dr_send_ring *send_ring);
1031 int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
1032 int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
1033 			     struct mlx5dr_ste *ste,
1034 			     u8 *data,
1035 			     u16 size,
1036 			     u16 offset);
1037 int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
1038 			      struct mlx5dr_ste_htbl *htbl,
1039 			      u8 *formatted_ste, u8 *mask);
1040 int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
1041 					struct mlx5dr_ste_htbl *htbl,
1042 					u8 *ste_init_data,
1043 					bool update_hw_ste);
1044 int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
1045 				struct mlx5dr_action *action);
1046 
1047 struct mlx5dr_fw_recalc_cs_ft {
1048 	u64 rx_icm_addr;
1049 	u32 table_id;
1050 	u32 group_id;
1051 	u32 modify_hdr_id;
1052 };
1053 
1054 struct mlx5dr_fw_recalc_cs_ft *
1055 mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
1056 void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
1057 				    struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
1058 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
1059 					      u32 vport_num,
1060 					      u64 *rx_icm_addr);
1061 #endif  /* _DR_TYPES_H_ */
1062