1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include "mlx5_ifc_dr_ste_v1.h"
6 #include "dr_ste_v1.h"
7 
8 #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
9 	((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
10 		   DR_STE_V1_LU_TYPE_##lookup_type##_O)
11 
12 enum dr_ste_v1_entry_format {
13 	DR_STE_V1_TYPE_BWC_BYTE	= 0x0,
14 	DR_STE_V1_TYPE_BWC_DW	= 0x1,
15 	DR_STE_V1_TYPE_MATCH	= 0x2,
16 };
17 
18 /* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
19 enum {
20 	DR_STE_V1_LU_TYPE_NOP				= 0x0000,
21 	DR_STE_V1_LU_TYPE_ETHL2_TNL			= 0x0002,
22 	DR_STE_V1_LU_TYPE_IBL3_EXT			= 0x0102,
23 	DR_STE_V1_LU_TYPE_ETHL2_O			= 0x0003,
24 	DR_STE_V1_LU_TYPE_IBL4				= 0x0103,
25 	DR_STE_V1_LU_TYPE_ETHL2_I			= 0x0004,
26 	DR_STE_V1_LU_TYPE_SRC_QP_GVMI			= 0x0104,
27 	DR_STE_V1_LU_TYPE_ETHL2_SRC_O			= 0x0005,
28 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O		= 0x0105,
29 	DR_STE_V1_LU_TYPE_ETHL2_SRC_I			= 0x0006,
30 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I		= 0x0106,
31 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O		= 0x0007,
32 	DR_STE_V1_LU_TYPE_IPV6_DES_O			= 0x0107,
33 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I		= 0x0008,
34 	DR_STE_V1_LU_TYPE_IPV6_DES_I			= 0x0108,
35 	DR_STE_V1_LU_TYPE_ETHL4_O			= 0x0009,
36 	DR_STE_V1_LU_TYPE_IPV6_SRC_O			= 0x0109,
37 	DR_STE_V1_LU_TYPE_ETHL4_I			= 0x000a,
38 	DR_STE_V1_LU_TYPE_IPV6_SRC_I			= 0x010a,
39 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O		= 0x000b,
40 	DR_STE_V1_LU_TYPE_MPLS_O			= 0x010b,
41 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I		= 0x000c,
42 	DR_STE_V1_LU_TYPE_MPLS_I			= 0x010c,
43 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O		= 0x000d,
44 	DR_STE_V1_LU_TYPE_GRE				= 0x010d,
45 	DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER	= 0x000e,
46 	DR_STE_V1_LU_TYPE_GENERAL_PURPOSE		= 0x010e,
47 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I		= 0x000f,
48 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0		= 0x010f,
49 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1		= 0x0110,
50 	DR_STE_V1_LU_TYPE_FLEX_PARSER_OK		= 0x0011,
51 	DR_STE_V1_LU_TYPE_FLEX_PARSER_0			= 0x0111,
52 	DR_STE_V1_LU_TYPE_FLEX_PARSER_1			= 0x0112,
53 	DR_STE_V1_LU_TYPE_ETHL4_MISC_O			= 0x0113,
54 	DR_STE_V1_LU_TYPE_ETHL4_MISC_I			= 0x0114,
55 	DR_STE_V1_LU_TYPE_INVALID			= 0x00ff,
56 	DR_STE_V1_LU_TYPE_DONT_CARE			= MLX5DR_STE_LU_TYPE_DONT_CARE,
57 };
58 
59 enum dr_ste_v1_header_anchors {
60 	DR_STE_HEADER_ANCHOR_START_OUTER		= 0x00,
61 	DR_STE_HEADER_ANCHOR_1ST_VLAN			= 0x02,
62 	DR_STE_HEADER_ANCHOR_IPV6_IPV4			= 0x07,
63 	DR_STE_HEADER_ANCHOR_INNER_MAC			= 0x13,
64 	DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4		= 0x19,
65 };
66 
67 enum dr_ste_v1_action_size {
68 	DR_STE_ACTION_SINGLE_SZ = 4,
69 	DR_STE_ACTION_DOUBLE_SZ = 8,
70 	DR_STE_ACTION_TRIPLE_SZ = 12,
71 };
72 
73 enum dr_ste_v1_action_insert_ptr_attr {
74 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0,  /* Regular push header (e.g. push vlan) */
75 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
76 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2,   /* IPsec */
77 };
78 
79 enum dr_ste_v1_action_id {
80 	DR_STE_V1_ACTION_ID_NOP				= 0x00,
81 	DR_STE_V1_ACTION_ID_COPY			= 0x05,
82 	DR_STE_V1_ACTION_ID_SET				= 0x06,
83 	DR_STE_V1_ACTION_ID_ADD				= 0x07,
84 	DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE		= 0x08,
85 	DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER	= 0x09,
86 	DR_STE_V1_ACTION_ID_INSERT_INLINE		= 0x0a,
87 	DR_STE_V1_ACTION_ID_INSERT_POINTER		= 0x0b,
88 	DR_STE_V1_ACTION_ID_FLOW_TAG			= 0x0c,
89 	DR_STE_V1_ACTION_ID_QUEUE_ID_SEL		= 0x0d,
90 	DR_STE_V1_ACTION_ID_ACCELERATED_LIST		= 0x0e,
91 	DR_STE_V1_ACTION_ID_MODIFY_LIST			= 0x0f,
92 	DR_STE_V1_ACTION_ID_TRAILER			= 0x13,
93 	DR_STE_V1_ACTION_ID_COUNTER_ID			= 0x14,
94 	DR_STE_V1_ACTION_ID_MAX				= 0x21,
95 	/* use for special cases */
96 	DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3		= 0x22,
97 };
98 
99 enum {
100 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0		= 0x00,
101 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1		= 0x01,
102 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2		= 0x02,
103 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0		= 0x08,
104 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1		= 0x09,
105 	DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0		= 0x0e,
106 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0		= 0x18,
107 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1		= 0x19,
108 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0		= 0x40,
109 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1		= 0x41,
110 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0	= 0x44,
111 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1	= 0x45,
112 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2	= 0x46,
113 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3	= 0x47,
114 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0	= 0x4c,
115 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1	= 0x4d,
116 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2	= 0x4e,
117 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3	= 0x4f,
118 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0		= 0x5e,
119 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1		= 0x5f,
120 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0		= 0x6f,
121 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1		= 0x70,
122 	DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE	= 0x7b,
123 	DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE		= 0x7c,
124 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0		= 0x8c,
125 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1		= 0x8d,
126 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0		= 0x8e,
127 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1		= 0x8f,
128 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0		= 0x90,
129 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1		= 0x91,
130 };
131 
132 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
133 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
134 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
135 	},
136 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
137 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
138 	},
139 	[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
140 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
141 	},
142 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
143 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
144 	},
145 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
146 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
147 	},
148 	[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
149 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
150 	},
151 	[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
152 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
153 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
154 	},
155 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
156 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
157 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
158 	},
159 	[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
160 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
161 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
162 	},
163 	[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
164 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
165 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
166 	},
167 	[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
168 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
169 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
170 	},
171 	[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
172 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
173 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
174 	},
175 	[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
176 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
177 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
178 	},
179 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
180 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
181 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
182 	},
183 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
184 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
185 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
186 	},
187 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
188 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
189 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
190 	},
191 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
192 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
193 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
194 	},
195 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
196 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
197 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
198 	},
199 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
200 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
201 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
202 	},
203 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
204 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
205 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
206 	},
207 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
208 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
209 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
210 	},
211 	[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
212 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
213 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
214 	},
215 	[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
216 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
217 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
218 	},
219 	[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
220 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
221 	},
222 	[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
223 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
224 	},
225 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
226 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
227 	},
228 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
229 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
230 	},
231 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
232 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
233 	},
234 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
235 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
236 	},
237 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
238 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
239 	},
240 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
241 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
242 	},
243 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
244 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
245 	},
246 	[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
247 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
248 	},
249 	[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
250 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
251 	},
252 	[MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
253 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
254 	},
255 	[MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
256 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
257 	},
258 };
259 
260 static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
261 {
262 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
263 }
264 
265 void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
266 {
267 	u64 index = miss_addr >> 6;
268 
269 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
270 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
271 }
272 
273 u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
274 {
275 	u64 index =
276 		((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
277 		 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
278 
279 	return index << 6;
280 }
281 
282 void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
283 {
284 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
285 }
286 
287 u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
288 {
289 	return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
290 }
291 
292 static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
293 {
294 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
295 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
296 }
297 
298 void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
299 {
300 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
301 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
302 }
303 
304 u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
305 {
306 	u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
307 	u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
308 
309 	return (mode << 8 | index);
310 }
311 
312 static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
313 {
314 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
315 }
316 
317 void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
318 {
319 	u64 index = (icm_addr >> 5) | ht_size;
320 
321 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
322 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
323 }
324 
325 void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
326 {
327 	dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
328 	dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
329 
330 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
331 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
332 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
333 }
334 
335 void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
336 {
337 	u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
338 	u8 *mask = tag + DR_STE_SIZE_TAG;
339 	u8 tmp_tag[DR_STE_SIZE_TAG] = {};
340 
341 	if (ste_size == DR_STE_SIZE_CTRL)
342 		return;
343 
344 	WARN_ON(ste_size != DR_STE_SIZE);
345 
346 	/* Backup tag */
347 	memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
348 
349 	/* Swap mask and tag  both are the same size */
350 	memcpy(tag, mask, DR_STE_SIZE_MASK);
351 	memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
352 }
353 
354 static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
355 {
356 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
357 		 DR_STE_V1_ACTION_ID_FLOW_TAG);
358 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
359 }
360 
361 static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
362 {
363 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
364 }
365 
366 static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
367 {
368 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
369 }
370 
371 static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
372 				u32 reformat_id, int size)
373 {
374 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
375 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
376 	/* The hardware expects here size in words (2 byte) */
377 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
378 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
379 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
380 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
381 	dr_ste_v1_set_reparse(hw_ste_p);
382 }
383 
384 static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
385 				     u32 reformat_id,
386 				     u8 anchor, u8 offset,
387 				     int size)
388 {
389 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
390 		 action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
391 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
392 
393 	/* The hardware expects here size and offset in words (2 byte) */
394 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
395 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
396 
397 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
398 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
399 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
400 
401 	dr_ste_v1_set_reparse(hw_ste_p);
402 }
403 
404 static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
405 				     u8 anchor, u8 offset,
406 				     int size)
407 {
408 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
409 		 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
410 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
411 
412 	/* The hardware expects here size and offset in words (2 byte) */
413 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
414 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
415 
416 	dr_ste_v1_set_reparse(hw_ste_p);
417 }
418 
419 static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
420 				    u32 vlan_hdr)
421 {
422 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
423 		 action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
424 	/* The hardware expects offset to vlan header in words (2 byte) */
425 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
426 		 start_offset, HDR_LEN_L2_MACS >> 1);
427 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
428 		 inline_data, vlan_hdr);
429 
430 	dr_ste_v1_set_reparse(hw_ste_p);
431 }
432 
433 static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
434 {
435 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
436 		 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
437 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
438 		 start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
439 	/* The hardware expects here size in words (2 byte) */
440 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
441 		 remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
442 
443 	dr_ste_v1_set_reparse(hw_ste_p);
444 }
445 
446 static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
447 				   u8 *frst_s_action,
448 				   u8 *scnd_d_action,
449 				   u32 reformat_id,
450 				   int size)
451 {
452 	/* Remove L2 headers */
453 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
454 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
455 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
456 		 DR_STE_HEADER_ANCHOR_IPV6_IPV4);
457 
458 	/* Encapsulate with given reformat ID */
459 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
460 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
461 	/* The hardware expects here size in words (2 byte) */
462 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
463 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
464 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
465 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
466 
467 	dr_ste_v1_set_reparse(hw_ste_p);
468 }
469 
470 static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
471 {
472 	MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
473 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
474 	MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
475 	MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
476 	MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
477 		 DR_STE_HEADER_ANCHOR_INNER_MAC);
478 
479 	dr_ste_v1_set_reparse(hw_ste_p);
480 }
481 
482 static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
483 					  u8 *s_action,
484 					  u16 num_of_actions,
485 					  u32 re_write_index)
486 {
487 	MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
488 		 DR_STE_V1_ACTION_ID_MODIFY_LIST);
489 	MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
490 		 num_of_actions);
491 	MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
492 		 re_write_index);
493 
494 	dr_ste_v1_set_reparse(hw_ste_p);
495 }
496 
497 static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
498 					  u32 *added_stes,
499 					  u16 gvmi)
500 {
501 	u8 *action;
502 
503 	(*added_stes)++;
504 	*last_ste += DR_STE_SIZE;
505 	dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
506 	dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
507 
508 	action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
509 	memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
510 }
511 
512 void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
513 			      u8 *action_type_set,
514 			      u32 actions_caps,
515 			      u8 *last_ste,
516 			      struct mlx5dr_ste_actions_attr *attr,
517 			      u32 *added_stes)
518 {
519 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
520 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
521 	bool allow_modify_hdr = true;
522 	bool allow_encap = true;
523 
524 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
525 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
526 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
527 						      attr->gvmi);
528 			action = MLX5_ADDR_OF(ste_mask_and_match_v1,
529 					      last_ste, action);
530 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
531 		}
532 		dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
533 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
534 		action += DR_STE_ACTION_SINGLE_SZ;
535 
536 		/* Check if vlan_pop and modify_hdr on same STE is supported */
537 		if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
538 			allow_modify_hdr = false;
539 	}
540 
541 	if (action_type_set[DR_ACTION_TYP_CTR])
542 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
543 
544 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
545 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
546 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
547 						      attr->gvmi);
548 			action = MLX5_ADDR_OF(ste_mask_and_match_v1,
549 					      last_ste, action);
550 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
551 		}
552 		dr_ste_v1_set_rewrite_actions(last_ste, action,
553 					      attr->modify_actions,
554 					      attr->modify_index);
555 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
556 		action += DR_STE_ACTION_DOUBLE_SZ;
557 		allow_encap = false;
558 	}
559 
560 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
561 		int i;
562 
563 		for (i = 0; i < attr->vlans.count; i++) {
564 			if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
565 				dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
566 				action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
567 				action_sz = DR_STE_ACTION_TRIPLE_SZ;
568 				allow_encap = true;
569 			}
570 			dr_ste_v1_set_push_vlan(last_ste, action,
571 						attr->vlans.headers[i]);
572 			action_sz -= DR_STE_ACTION_DOUBLE_SZ;
573 			action += DR_STE_ACTION_DOUBLE_SZ;
574 		}
575 	}
576 
577 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
578 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
579 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
580 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
581 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
582 			allow_encap = true;
583 		}
584 		dr_ste_v1_set_encap(last_ste, action,
585 				    attr->reformat.id,
586 				    attr->reformat.size);
587 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
588 		action += DR_STE_ACTION_DOUBLE_SZ;
589 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
590 		u8 *d_action;
591 
592 		if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
593 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
594 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
595 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
596 		}
597 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
598 
599 		dr_ste_v1_set_encap_l3(last_ste,
600 				       action, d_action,
601 				       attr->reformat.id,
602 				       attr->reformat.size);
603 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
604 		action += DR_STE_ACTION_TRIPLE_SZ;
605 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
606 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
607 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
608 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
609 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
610 		}
611 		dr_ste_v1_set_insert_hdr(last_ste, action,
612 					 attr->reformat.id,
613 					 attr->reformat.param_0,
614 					 attr->reformat.param_1,
615 					 attr->reformat.size);
616 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
617 		action += DR_STE_ACTION_DOUBLE_SZ;
618 	} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
619 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
620 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
621 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
622 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
623 		}
624 		dr_ste_v1_set_remove_hdr(last_ste, action,
625 					 attr->reformat.param_0,
626 					 attr->reformat.param_1,
627 					 attr->reformat.size);
628 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
629 		action += DR_STE_ACTION_SINGLE_SZ;
630 	}
631 
632 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
633 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
634 }
635 
636 void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
637 			      u8 *action_type_set,
638 			      u32 actions_caps,
639 			      u8 *last_ste,
640 			      struct mlx5dr_ste_actions_attr *attr,
641 			      u32 *added_stes)
642 {
643 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
644 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
645 	bool allow_modify_hdr = true;
646 	bool allow_ctr = true;
647 
648 	if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
649 		dr_ste_v1_set_rewrite_actions(last_ste, action,
650 					      attr->decap_actions,
651 					      attr->decap_index);
652 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
653 		action += DR_STE_ACTION_DOUBLE_SZ;
654 		allow_modify_hdr = false;
655 		allow_ctr = false;
656 	} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
657 		dr_ste_v1_set_rx_decap(last_ste, action);
658 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
659 		action += DR_STE_ACTION_SINGLE_SZ;
660 		allow_modify_hdr = false;
661 		allow_ctr = false;
662 	}
663 
664 	if (action_type_set[DR_ACTION_TYP_TAG]) {
665 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
666 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
667 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
668 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
669 			allow_modify_hdr = true;
670 			allow_ctr = true;
671 		}
672 		dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
673 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
674 		action += DR_STE_ACTION_SINGLE_SZ;
675 	}
676 
677 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
678 		if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
679 		    !allow_modify_hdr) {
680 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
681 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
682 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
683 		}
684 
685 		dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
686 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
687 		action += DR_STE_ACTION_SINGLE_SZ;
688 		allow_ctr = false;
689 
690 		/* Check if vlan_pop and modify_hdr on same STE is supported */
691 		if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
692 			allow_modify_hdr = false;
693 	}
694 
695 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
696 		/* Modify header and decapsulation must use different STEs */
697 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
698 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
699 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
700 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
701 			allow_modify_hdr = true;
702 			allow_ctr = true;
703 		}
704 		dr_ste_v1_set_rewrite_actions(last_ste, action,
705 					      attr->modify_actions,
706 					      attr->modify_index);
707 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
708 		action += DR_STE_ACTION_DOUBLE_SZ;
709 	}
710 
711 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
712 		int i;
713 
714 		for (i = 0; i < attr->vlans.count; i++) {
715 			if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
716 			    !allow_modify_hdr) {
717 				dr_ste_v1_arr_init_next_match(&last_ste,
718 							      added_stes,
719 							      attr->gvmi);
720 				action = MLX5_ADDR_OF(ste_mask_and_match_v1,
721 						      last_ste, action);
722 				action_sz = DR_STE_ACTION_TRIPLE_SZ;
723 			}
724 			dr_ste_v1_set_push_vlan(last_ste, action,
725 						attr->vlans.headers[i]);
726 			action_sz -= DR_STE_ACTION_DOUBLE_SZ;
727 			action += DR_STE_ACTION_DOUBLE_SZ;
728 		}
729 	}
730 
731 	if (action_type_set[DR_ACTION_TYP_CTR]) {
732 		/* Counter action set after decap and before insert_hdr
733 		 * to exclude decaped / encaped header respectively.
734 		 */
735 		if (!allow_ctr) {
736 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
737 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
738 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
739 			allow_modify_hdr = true;
740 		}
741 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
742 		allow_ctr = false;
743 	}
744 
745 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
746 		if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
747 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
748 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
749 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
750 		}
751 		dr_ste_v1_set_encap(last_ste, action,
752 				    attr->reformat.id,
753 				    attr->reformat.size);
754 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
755 		action += DR_STE_ACTION_DOUBLE_SZ;
756 		allow_modify_hdr = false;
757 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
758 		u8 *d_action;
759 
760 		if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
761 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
762 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
763 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
764 		}
765 
766 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
767 
768 		dr_ste_v1_set_encap_l3(last_ste,
769 				       action, d_action,
770 				       attr->reformat.id,
771 				       attr->reformat.size);
772 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
773 		allow_modify_hdr = false;
774 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
775 		/* Modify header, decap, and encap must use different STEs */
776 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
777 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
778 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
779 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
780 		}
781 		dr_ste_v1_set_insert_hdr(last_ste, action,
782 					 attr->reformat.id,
783 					 attr->reformat.param_0,
784 					 attr->reformat.param_1,
785 					 attr->reformat.size);
786 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
787 		action += DR_STE_ACTION_DOUBLE_SZ;
788 		allow_modify_hdr = false;
789 	} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
790 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
791 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
792 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
793 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
794 			allow_modify_hdr = true;
795 			allow_ctr = true;
796 		}
797 		dr_ste_v1_set_remove_hdr(last_ste, action,
798 					 attr->reformat.param_0,
799 					 attr->reformat.param_1,
800 					 attr->reformat.size);
801 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
802 		action += DR_STE_ACTION_SINGLE_SZ;
803 	}
804 
805 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
806 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
807 }
808 
809 void dr_ste_v1_set_action_set(u8 *d_action,
810 			      u8 hw_field,
811 			      u8 shifter,
812 			      u8 length,
813 			      u32 data)
814 {
815 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
816 	MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
817 	MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
818 	MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
819 	MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
820 	MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
821 }
822 
823 void dr_ste_v1_set_action_add(u8 *d_action,
824 			      u8 hw_field,
825 			      u8 shifter,
826 			      u8 length,
827 			      u32 data)
828 {
829 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
830 	MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
831 	MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
832 	MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
833 	MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
834 	MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
835 }
836 
837 void dr_ste_v1_set_action_copy(u8 *d_action,
838 			       u8 dst_hw_field,
839 			       u8 dst_shifter,
840 			       u8 dst_len,
841 			       u8 src_hw_field,
842 			       u8 src_shifter)
843 {
844 	dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
845 	src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
846 	MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
847 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
848 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
849 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
850 	MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
851 	MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
852 }
853 
854 #define DR_STE_DECAP_L3_ACTION_NUM	8
855 #define DR_STE_L2_HDR_MAX_SZ		20
856 
857 int dr_ste_v1_set_action_decap_l3_list(void *data,
858 				       u32 data_sz,
859 				       u8 *hw_action,
860 				       u32 hw_action_sz,
861 				       u16 *used_hw_action_num)
862 {
863 	u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
864 	void *data_ptr = padded_data;
865 	u16 used_actions = 0;
866 	u32 inline_data_sz;
867 	u32 i;
868 
869 	if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
870 		return -EINVAL;
871 
872 	inline_data_sz =
873 		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
874 
875 	/* Add an alignment padding  */
876 	memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
877 
878 	/* Remove L2L3 outer headers */
879 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
880 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
881 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
882 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
883 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
884 		 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
885 	hw_action += DR_STE_ACTION_DOUBLE_SZ;
886 	used_actions++; /* Remove and NOP are a single double action */
887 
888 	/* Point to the last dword of the header */
889 	data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
890 
891 	/* Add the new header using inline action 4Byte at a time, the header
892 	 * is added in reversed order to the beginning of the packet to avoid
893 	 * incorrect parsing by the HW. Since header is 14B or 18B an extra
894 	 * two bytes are padded and later removed.
895 	 */
896 	for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
897 		void *addr_inline;
898 
899 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
900 			 DR_STE_V1_ACTION_ID_INSERT_INLINE);
901 		/* The hardware expects here offset to words (2 bytes) */
902 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
903 
904 		/* Copy bytes one by one to avoid endianness problem */
905 		addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
906 					   hw_action, inline_data);
907 		memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
908 		hw_action += DR_STE_ACTION_DOUBLE_SZ;
909 		used_actions++;
910 	}
911 
912 	/* Remove first 2 extra bytes */
913 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
914 		 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
915 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
916 	/* The hardware expects here size in words (2 bytes) */
917 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
918 	used_actions++;
919 
920 	*used_hw_action_num = used_actions;
921 
922 	return 0;
923 }
924 
925 static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
926 						    bool inner, u8 *bit_mask)
927 {
928 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
929 
930 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
931 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
932 
933 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
934 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
935 
936 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
937 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
938 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
939 	DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
940 
941 	if (mask->cvlan_tag) {
942 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
943 		mask->cvlan_tag = 0;
944 	} else if (mask->svlan_tag) {
945 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
946 		mask->svlan_tag = 0;
947 	}
948 }
949 
950 static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
951 					      struct mlx5dr_ste_build *sb,
952 					      u8 *tag)
953 {
954 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
955 
956 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
957 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
958 
959 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
960 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
961 
962 	if (spec->ip_version == IP_VERSION_IPV4) {
963 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
964 		spec->ip_version = 0;
965 	} else if (spec->ip_version == IP_VERSION_IPV6) {
966 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
967 		spec->ip_version = 0;
968 	} else if (spec->ip_version) {
969 		return -EINVAL;
970 	}
971 
972 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
973 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
974 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
975 
976 	if (spec->cvlan_tag) {
977 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
978 		spec->cvlan_tag = 0;
979 	} else if (spec->svlan_tag) {
980 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
981 		spec->svlan_tag = 0;
982 	}
983 	return 0;
984 }
985 
986 void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
987 					 struct mlx5dr_match_param *mask)
988 {
989 	dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
990 
991 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
992 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
993 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
994 }
995 
996 static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
997 					       struct mlx5dr_ste_build *sb,
998 					       u8 *tag)
999 {
1000 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1001 
1002 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1003 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1004 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1005 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1006 
1007 	return 0;
1008 }
1009 
1010 void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
1011 					  struct mlx5dr_match_param *mask)
1012 {
1013 	dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
1014 
1015 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
1016 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1017 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
1018 }
1019 
1020 static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1021 					       struct mlx5dr_ste_build *sb,
1022 					       u8 *tag)
1023 {
1024 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1025 
1026 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1027 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1028 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1029 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1030 
1031 	return 0;
1032 }
1033 
1034 void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
1035 					  struct mlx5dr_match_param *mask)
1036 {
1037 	dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
1038 
1039 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
1040 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1041 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
1042 }
1043 
1044 static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1045 						   struct mlx5dr_ste_build *sb,
1046 						   u8 *tag)
1047 {
1048 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1049 
1050 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
1051 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
1052 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
1053 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
1054 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
1055 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
1056 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
1057 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
1058 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
1059 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
1060 
1061 	if (spec->tcp_flags) {
1062 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
1063 		spec->tcp_flags = 0;
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
1070 					      struct mlx5dr_match_param *mask)
1071 {
1072 	dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
1073 
1074 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
1075 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1076 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
1077 }
1078 
1079 static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1080 						       bool inner, u8 *bit_mask)
1081 {
1082 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1083 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1084 
1085 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
1086 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
1087 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
1088 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
1089 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
1090 	DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
1091 
1092 	if (mask->svlan_tag || mask->cvlan_tag) {
1093 		MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
1094 		mask->cvlan_tag = 0;
1095 		mask->svlan_tag = 0;
1096 	}
1097 
1098 	if (inner) {
1099 		if (misc_mask->inner_second_cvlan_tag ||
1100 		    misc_mask->inner_second_svlan_tag) {
1101 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1102 			misc_mask->inner_second_cvlan_tag = 0;
1103 			misc_mask->inner_second_svlan_tag = 0;
1104 		}
1105 
1106 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1107 			       second_vlan_id, misc_mask, inner_second_vid);
1108 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1109 			       second_cfi, misc_mask, inner_second_cfi);
1110 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1111 			       second_priority, misc_mask, inner_second_prio);
1112 	} else {
1113 		if (misc_mask->outer_second_cvlan_tag ||
1114 		    misc_mask->outer_second_svlan_tag) {
1115 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1116 			misc_mask->outer_second_cvlan_tag = 0;
1117 			misc_mask->outer_second_svlan_tag = 0;
1118 		}
1119 
1120 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1121 			       second_vlan_id, misc_mask, outer_second_vid);
1122 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1123 			       second_cfi, misc_mask, outer_second_cfi);
1124 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1125 			       second_priority, misc_mask, outer_second_prio);
1126 	}
1127 }
1128 
1129 static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1130 						 bool inner, u8 *tag)
1131 {
1132 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1133 	struct mlx5dr_match_misc *misc_spec = &value->misc;
1134 
1135 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
1136 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
1137 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
1138 	DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
1139 	DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
1140 
1141 	if (spec->ip_version == IP_VERSION_IPV4) {
1142 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
1143 		spec->ip_version = 0;
1144 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1145 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
1146 		spec->ip_version = 0;
1147 	} else if (spec->ip_version) {
1148 		return -EINVAL;
1149 	}
1150 
1151 	if (spec->cvlan_tag) {
1152 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1153 		spec->cvlan_tag = 0;
1154 	} else if (spec->svlan_tag) {
1155 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1156 		spec->svlan_tag = 0;
1157 	}
1158 
1159 	if (inner) {
1160 		if (misc_spec->inner_second_cvlan_tag) {
1161 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1162 			misc_spec->inner_second_cvlan_tag = 0;
1163 		} else if (misc_spec->inner_second_svlan_tag) {
1164 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1165 			misc_spec->inner_second_svlan_tag = 0;
1166 		}
1167 
1168 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
1169 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
1170 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
1171 	} else {
1172 		if (misc_spec->outer_second_cvlan_tag) {
1173 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1174 			misc_spec->outer_second_cvlan_tag = 0;
1175 		} else if (misc_spec->outer_second_svlan_tag) {
1176 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1177 			misc_spec->outer_second_svlan_tag = 0;
1178 		}
1179 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
1180 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
1181 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1188 						bool inner, u8 *bit_mask)
1189 {
1190 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1191 
1192 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
1193 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
1194 
1195 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1196 }
1197 
1198 static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1199 					  struct mlx5dr_ste_build *sb,
1200 					  u8 *tag)
1201 {
1202 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1203 
1204 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
1205 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
1206 
1207 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1208 }
1209 
1210 void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1211 				     struct mlx5dr_match_param *mask)
1212 {
1213 	dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1214 
1215 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
1216 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1217 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
1218 }
1219 
1220 static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1221 						bool inner, u8 *bit_mask)
1222 {
1223 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1224 
1225 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1226 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1227 
1228 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1229 }
1230 
1231 static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1232 					  struct mlx5dr_ste_build *sb,
1233 					  u8 *tag)
1234 {
1235 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1236 
1237 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1238 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1239 
1240 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1241 }
1242 
1243 void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1244 				     struct mlx5dr_match_param *mask)
1245 {
1246 	dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1247 
1248 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
1249 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1250 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
1251 }
1252 
1253 static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1254 						bool inner, u8 *bit_mask)
1255 {
1256 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1257 	struct mlx5dr_match_misc *misc = &value->misc;
1258 
1259 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1260 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1261 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
1262 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
1263 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
1264 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
1265 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
1266 	DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
1267 
1268 	if (misc->vxlan_vni) {
1269 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
1270 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1271 		misc->vxlan_vni = 0;
1272 	}
1273 
1274 	if (mask->svlan_tag || mask->cvlan_tag) {
1275 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
1276 		mask->cvlan_tag = 0;
1277 		mask->svlan_tag = 0;
1278 	}
1279 }
1280 
1281 static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1282 					  struct mlx5dr_ste_build *sb,
1283 					  u8 *tag)
1284 {
1285 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1286 	struct mlx5dr_match_misc *misc = &value->misc;
1287 
1288 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
1289 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
1290 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
1291 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
1292 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
1293 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
1294 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
1295 
1296 	if (misc->vxlan_vni) {
1297 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
1298 			 (misc->vxlan_vni << 8));
1299 		misc->vxlan_vni = 0;
1300 	}
1301 
1302 	if (spec->cvlan_tag) {
1303 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1304 		spec->cvlan_tag = 0;
1305 	} else if (spec->svlan_tag) {
1306 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1307 		spec->svlan_tag = 0;
1308 	}
1309 
1310 	if (spec->ip_version == IP_VERSION_IPV4) {
1311 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
1312 		spec->ip_version = 0;
1313 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1314 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
1315 		spec->ip_version = 0;
1316 	} else if (spec->ip_version) {
1317 		return -EINVAL;
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1324 				     struct mlx5dr_match_param *mask)
1325 {
1326 	dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1327 
1328 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
1329 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1330 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
1331 }
1332 
1333 static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1334 						struct mlx5dr_ste_build *sb,
1335 						u8 *tag)
1336 {
1337 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1338 
1339 	DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
1340 	DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
1341 
1342 	return 0;
1343 }
1344 
1345 void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1346 					   struct mlx5dr_match_param *mask)
1347 {
1348 	dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1349 
1350 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
1351 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1352 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
1353 }
1354 
1355 static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1356 					      struct mlx5dr_ste_build *sb,
1357 					      u8 *tag)
1358 {
1359 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1360 	struct mlx5dr_match_misc *misc = &value->misc;
1361 
1362 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
1363 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
1364 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
1365 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
1366 	DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
1367 	DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
1368 	DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
1369 	DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
1370 	DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1371 
1372 	if (sb->inner)
1373 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
1374 	else
1375 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
1376 
1377 	if (spec->tcp_flags) {
1378 		DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
1379 		spec->tcp_flags = 0;
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1386 					 struct mlx5dr_match_param *mask)
1387 {
1388 	dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1389 
1390 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
1391 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1392 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
1393 }
1394 
1395 static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
1396 				    struct mlx5dr_ste_build *sb,
1397 				    u8 *tag)
1398 {
1399 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1400 
1401 	if (sb->inner)
1402 		DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
1403 	else
1404 		DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
1405 
1406 	return 0;
1407 }
1408 
1409 void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
1410 			       struct mlx5dr_match_param *mask)
1411 {
1412 	dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
1413 
1414 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
1415 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1416 	sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
1417 }
1418 
1419 static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1420 				       struct mlx5dr_ste_build *sb,
1421 				       u8 *tag)
1422 {
1423 	struct  mlx5dr_match_misc *misc = &value->misc;
1424 
1425 	DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
1426 	DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
1427 	DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
1428 	DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
1429 
1430 	DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
1431 	DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
1432 
1433 	return 0;
1434 }
1435 
1436 void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1437 				  struct mlx5dr_match_param *mask)
1438 {
1439 	dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1440 
1441 	sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
1442 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1443 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
1444 }
1445 
1446 static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1447 					struct mlx5dr_ste_build *sb,
1448 					u8 *tag)
1449 {
1450 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1451 
1452 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
1453 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1454 			       misc2, outer_first_mpls_over_gre_label);
1455 
1456 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1457 			       misc2, outer_first_mpls_over_gre_exp);
1458 
1459 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1460 			       misc2, outer_first_mpls_over_gre_s_bos);
1461 
1462 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1463 			       misc2, outer_first_mpls_over_gre_ttl);
1464 	} else {
1465 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1466 			       misc2, outer_first_mpls_over_udp_label);
1467 
1468 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1469 			       misc2, outer_first_mpls_over_udp_exp);
1470 
1471 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1472 			       misc2, outer_first_mpls_over_udp_s_bos);
1473 
1474 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1475 			       misc2, outer_first_mpls_over_udp_ttl);
1476 	}
1477 
1478 	return 0;
1479 }
1480 
1481 void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1482 				   struct mlx5dr_match_param *mask)
1483 {
1484 	dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1485 
1486 	sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
1487 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1488 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
1489 }
1490 
1491 static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1492 						 struct mlx5dr_ste_build *sb,
1493 						 u8 *tag)
1494 {
1495 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1496 	u8 *parser_ptr;
1497 	u8 parser_id;
1498 	u32 mpls_hdr;
1499 
1500 	mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1501 	misc2->outer_first_mpls_over_udp_label = 0;
1502 	mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1503 	misc2->outer_first_mpls_over_udp_exp = 0;
1504 	mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1505 	misc2->outer_first_mpls_over_udp_s_bos = 0;
1506 	mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1507 	misc2->outer_first_mpls_over_udp_ttl = 0;
1508 
1509 	parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1510 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1511 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1512 
1513 	return 0;
1514 }
1515 
1516 void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1517 					    struct mlx5dr_match_param *mask)
1518 {
1519 	dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1520 
1521 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1522 	 * flex parsers_{0-3}/{4-7} respectively.
1523 	 */
1524 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1525 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1526 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1527 
1528 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1529 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
1530 }
1531 
1532 static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1533 						 struct mlx5dr_ste_build *sb,
1534 						 u8 *tag)
1535 {
1536 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1537 	u8 *parser_ptr;
1538 	u8 parser_id;
1539 	u32 mpls_hdr;
1540 
1541 	mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1542 	misc2->outer_first_mpls_over_gre_label = 0;
1543 	mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1544 	misc2->outer_first_mpls_over_gre_exp = 0;
1545 	mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1546 	misc2->outer_first_mpls_over_gre_s_bos = 0;
1547 	mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1548 	misc2->outer_first_mpls_over_gre_ttl = 0;
1549 
1550 	parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1551 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1552 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1553 
1554 	return 0;
1555 }
1556 
1557 void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1558 					    struct mlx5dr_match_param *mask)
1559 {
1560 	dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1561 
1562 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1563 	 * flex parsers_{0-3}/{4-7} respectively.
1564 	 */
1565 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1566 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1567 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1568 
1569 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1570 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
1571 }
1572 
1573 static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
1574 				    struct mlx5dr_ste_build *sb,
1575 				    u8 *tag)
1576 {
1577 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1578 	bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
1579 	u32 *icmp_header_data;
1580 	u8 *icmp_type;
1581 	u8 *icmp_code;
1582 
1583 	if (is_ipv4) {
1584 		icmp_header_data	= &misc3->icmpv4_header_data;
1585 		icmp_type		= &misc3->icmpv4_type;
1586 		icmp_code		= &misc3->icmpv4_code;
1587 	} else {
1588 		icmp_header_data	= &misc3->icmpv6_header_data;
1589 		icmp_type		= &misc3->icmpv6_type;
1590 		icmp_code		= &misc3->icmpv6_code;
1591 	}
1592 
1593 	MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
1594 	MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
1595 	MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
1596 
1597 	*icmp_header_data = 0;
1598 	*icmp_type = 0;
1599 	*icmp_code = 0;
1600 
1601 	return 0;
1602 }
1603 
1604 void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
1605 			       struct mlx5dr_match_param *mask)
1606 {
1607 	dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
1608 
1609 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1610 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1611 	sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
1612 }
1613 
1614 static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
1615 					       struct mlx5dr_ste_build *sb,
1616 					       u8 *tag)
1617 {
1618 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1619 
1620 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1621 		       misc2, metadata_reg_a);
1622 
1623 	return 0;
1624 }
1625 
1626 void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1627 					  struct mlx5dr_match_param *mask)
1628 {
1629 	dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
1630 
1631 	sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
1632 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1633 	sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
1634 }
1635 
1636 static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1637 					   struct mlx5dr_ste_build *sb,
1638 					   u8 *tag)
1639 {
1640 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1641 
1642 	if (sb->inner) {
1643 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
1644 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
1645 	} else {
1646 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
1647 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
1648 	}
1649 
1650 	return 0;
1651 }
1652 
1653 void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1654 				      struct mlx5dr_match_param *mask)
1655 {
1656 	dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1657 
1658 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1659 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1660 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
1661 }
1662 
1663 static int
1664 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1665 					      struct mlx5dr_ste_build *sb,
1666 					      u8 *tag)
1667 {
1668 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1669 
1670 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1671 		       outer_vxlan_gpe_flags, misc3,
1672 		       outer_vxlan_gpe_flags);
1673 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1674 		       outer_vxlan_gpe_next_protocol, misc3,
1675 		       outer_vxlan_gpe_next_protocol);
1676 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1677 		       outer_vxlan_gpe_vni, misc3,
1678 		       outer_vxlan_gpe_vni);
1679 
1680 	return 0;
1681 }
1682 
1683 void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1684 						    struct mlx5dr_match_param *mask)
1685 {
1686 	dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1687 
1688 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1689 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1690 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
1691 }
1692 
1693 static int
1694 dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1695 					   struct mlx5dr_ste_build *sb,
1696 					   u8 *tag)
1697 {
1698 	struct mlx5dr_match_misc *misc = &value->misc;
1699 
1700 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1701 		       geneve_protocol_type, misc, geneve_protocol_type);
1702 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1703 		       geneve_oam, misc, geneve_oam);
1704 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1705 		       geneve_opt_len, misc, geneve_opt_len);
1706 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1707 		       geneve_vni, misc, geneve_vni);
1708 
1709 	return 0;
1710 }
1711 
1712 void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1713 						 struct mlx5dr_match_param *mask)
1714 {
1715 	dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1716 
1717 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1718 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1719 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
1720 }
1721 
1722 static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
1723 					      struct mlx5dr_ste_build *sb,
1724 					      uint8_t *tag)
1725 {
1726 	struct mlx5dr_match_misc5 *misc5 = &value->misc5;
1727 
1728 	DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
1729 	DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
1730 
1731 	return 0;
1732 }
1733 
1734 void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
1735 					 struct mlx5dr_match_param *mask)
1736 {
1737 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1738 	dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
1739 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1740 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
1741 }
1742 
1743 static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
1744 					  struct mlx5dr_ste_build *sb,
1745 					  u8 *tag)
1746 {
1747 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1748 
1749 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1750 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1751 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1752 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1753 
1754 	return 0;
1755 }
1756 
1757 void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
1758 				     struct mlx5dr_match_param *mask)
1759 {
1760 	dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
1761 
1762 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
1763 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1764 	sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
1765 }
1766 
1767 static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
1768 					  struct mlx5dr_ste_build *sb,
1769 					  u8 *tag)
1770 {
1771 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1772 
1773 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1774 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1775 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1776 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1777 
1778 	return 0;
1779 }
1780 
1781 void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
1782 				     struct mlx5dr_match_param *mask)
1783 {
1784 	dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
1785 
1786 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
1787 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1788 	sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
1789 }
1790 
1791 static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1792 						  u8 *bit_mask)
1793 {
1794 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1795 
1796 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
1797 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
1798 	misc_mask->source_eswitch_owner_vhca_id = 0;
1799 }
1800 
1801 static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1802 					    struct mlx5dr_ste_build *sb,
1803 					    u8 *tag)
1804 {
1805 	struct mlx5dr_match_misc *misc = &value->misc;
1806 	struct mlx5dr_cmd_vport_cap *vport_cap;
1807 	struct mlx5dr_domain *dmn = sb->dmn;
1808 	struct mlx5dr_domain *vport_dmn;
1809 	u8 *bit_mask = sb->bit_mask;
1810 
1811 	DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
1812 
1813 	if (sb->vhca_id_valid) {
1814 		/* Find port GVMI based on the eswitch_owner_vhca_id */
1815 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1816 			vport_dmn = dmn;
1817 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1818 					   dmn->peer_dmn->info.caps.gvmi))
1819 			vport_dmn = dmn->peer_dmn;
1820 		else
1821 			return -EINVAL;
1822 
1823 		misc->source_eswitch_owner_vhca_id = 0;
1824 	} else {
1825 		vport_dmn = dmn;
1826 	}
1827 
1828 	if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
1829 		return 0;
1830 
1831 	vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
1832 	if (!vport_cap) {
1833 		mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1834 			   misc->source_port);
1835 		return -EINVAL;
1836 	}
1837 
1838 	if (vport_cap->vport_gvmi)
1839 		MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
1840 
1841 	misc->source_port = 0;
1842 	return 0;
1843 }
1844 
1845 void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1846 				       struct mlx5dr_match_param *mask)
1847 {
1848 	dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1849 
1850 	sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
1851 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1852 	sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
1853 }
1854 
1855 static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
1856 				      u32 *misc4_field_value,
1857 				      bool *parser_is_used,
1858 				      u8 *tag)
1859 {
1860 	u32 id = *misc4_field_id;
1861 	u8 *parser_ptr;
1862 
1863 	if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
1864 		return;
1865 
1866 	parser_is_used[id] = true;
1867 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1868 
1869 	*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1870 	*misc4_field_id = 0;
1871 	*misc4_field_value = 0;
1872 }
1873 
1874 static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
1875 					   struct mlx5dr_ste_build *sb,
1876 					   u8 *tag)
1877 {
1878 	struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1879 	bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1880 
1881 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1882 				  &misc_4_mask->prog_sample_field_value_0,
1883 				  parser_is_used, tag);
1884 
1885 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1886 				  &misc_4_mask->prog_sample_field_value_1,
1887 				  parser_is_used, tag);
1888 
1889 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1890 				  &misc_4_mask->prog_sample_field_value_2,
1891 				  parser_is_used, tag);
1892 
1893 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1894 				  &misc_4_mask->prog_sample_field_value_3,
1895 				  parser_is_used, tag);
1896 
1897 	return 0;
1898 }
1899 
1900 void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1901 					struct mlx5dr_match_param *mask)
1902 {
1903 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1904 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1905 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1906 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1907 }
1908 
1909 void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1910 					struct mlx5dr_match_param *mask)
1911 {
1912 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
1913 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1914 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1915 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1916 }
1917 
1918 static int
1919 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1920 						   struct mlx5dr_ste_build *sb,
1921 						   u8 *tag)
1922 {
1923 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1924 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1925 	u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1926 
1927 	MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1928 		 misc3->geneve_tlv_option_0_data);
1929 	misc3->geneve_tlv_option_0_data = 0;
1930 
1931 	return 0;
1932 }
1933 
1934 void
1935 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1936 						    struct mlx5dr_match_param *mask)
1937 {
1938 	dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1939 
1940 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1941 	 * flex parsers_{0-3}/{4-7} respectively.
1942 	 */
1943 	sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
1944 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1945 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1946 
1947 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1948 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
1949 }
1950 
1951 static int
1952 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
1953 							 struct mlx5dr_ste_build *sb,
1954 							 uint8_t *tag)
1955 {
1956 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1957 	struct mlx5dr_match_misc *misc = &value->misc;
1958 
1959 	if (misc->geneve_tlv_option_0_exist) {
1960 		MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
1961 		misc->geneve_tlv_option_0_exist = 0;
1962 	}
1963 
1964 	return 0;
1965 }
1966 
1967 void
1968 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
1969 							  struct mlx5dr_match_param *mask)
1970 {
1971 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
1972 	dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
1973 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1974 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
1975 }
1976 
1977 static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
1978 						    struct mlx5dr_ste_build *sb,
1979 						    u8 *tag)
1980 {
1981 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1982 
1983 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
1984 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
1985 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
1986 
1987 	return 0;
1988 }
1989 
1990 void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
1991 					       struct mlx5dr_match_param *mask)
1992 {
1993 	dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
1994 
1995 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1996 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1997 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
1998 }
1999 
2000 static int
2001 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
2002 					   struct mlx5dr_ste_build *sb,
2003 					   u8 *tag)
2004 {
2005 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
2006 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2007 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
2008 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2009 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
2010 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2011 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2012 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2013 	return 0;
2014 }
2015 
2016 void
2017 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
2018 					    struct mlx5dr_match_param *mask)
2019 {
2020 	dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
2021 
2022 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2023 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2024 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
2025 }
2026 
2027 static int
2028 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
2029 					   struct mlx5dr_ste_build *sb,
2030 					   u8 *tag)
2031 {
2032 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
2033 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2034 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
2035 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2036 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
2037 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2038 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2039 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2040 	return 0;
2041 }
2042 
2043 void
2044 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
2045 					    struct mlx5dr_match_param *mask)
2046 {
2047 	dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
2048 
2049 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
2050 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2051 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
2052 }
2053 
2054 static struct mlx5dr_ste_ctx ste_ctx_v1 = {
2055 	/* Builders */
2056 	.build_eth_l2_src_dst_init	= &dr_ste_v1_build_eth_l2_src_dst_init,
2057 	.build_eth_l3_ipv6_src_init	= &dr_ste_v1_build_eth_l3_ipv6_src_init,
2058 	.build_eth_l3_ipv6_dst_init	= &dr_ste_v1_build_eth_l3_ipv6_dst_init,
2059 	.build_eth_l3_ipv4_5_tuple_init	= &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
2060 	.build_eth_l2_src_init		= &dr_ste_v1_build_eth_l2_src_init,
2061 	.build_eth_l2_dst_init		= &dr_ste_v1_build_eth_l2_dst_init,
2062 	.build_eth_l2_tnl_init		= &dr_ste_v1_build_eth_l2_tnl_init,
2063 	.build_eth_l3_ipv4_misc_init	= &dr_ste_v1_build_eth_l3_ipv4_misc_init,
2064 	.build_eth_ipv6_l3_l4_init	= &dr_ste_v1_build_eth_ipv6_l3_l4_init,
2065 	.build_mpls_init		= &dr_ste_v1_build_mpls_init,
2066 	.build_tnl_gre_init		= &dr_ste_v1_build_tnl_gre_init,
2067 	.build_tnl_mpls_init		= &dr_ste_v1_build_tnl_mpls_init,
2068 	.build_tnl_mpls_over_udp_init	= &dr_ste_v1_build_tnl_mpls_over_udp_init,
2069 	.build_tnl_mpls_over_gre_init	= &dr_ste_v1_build_tnl_mpls_over_gre_init,
2070 	.build_icmp_init		= &dr_ste_v1_build_icmp_init,
2071 	.build_general_purpose_init	= &dr_ste_v1_build_general_purpose_init,
2072 	.build_eth_l4_misc_init		= &dr_ste_v1_build_eth_l4_misc_init,
2073 	.build_tnl_vxlan_gpe_init	= &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
2074 	.build_tnl_geneve_init		= &dr_ste_v1_build_flex_parser_tnl_geneve_init,
2075 	.build_tnl_geneve_tlv_opt_init	= &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
2076 	.build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
2077 	.build_register_0_init		= &dr_ste_v1_build_register_0_init,
2078 	.build_register_1_init		= &dr_ste_v1_build_register_1_init,
2079 	.build_src_gvmi_qpn_init	= &dr_ste_v1_build_src_gvmi_qpn_init,
2080 	.build_flex_parser_0_init	= &dr_ste_v1_build_flex_parser_0_init,
2081 	.build_flex_parser_1_init	= &dr_ste_v1_build_flex_parser_1_init,
2082 	.build_tnl_gtpu_init		= &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
2083 	.build_tnl_header_0_1_init	= &dr_ste_v1_build_tnl_header_0_1_init,
2084 	.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
2085 	.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
2086 
2087 	/* Getters and Setters */
2088 	.ste_init			= &dr_ste_v1_init,
2089 	.set_next_lu_type		= &dr_ste_v1_set_next_lu_type,
2090 	.get_next_lu_type		= &dr_ste_v1_get_next_lu_type,
2091 	.set_miss_addr			= &dr_ste_v1_set_miss_addr,
2092 	.get_miss_addr			= &dr_ste_v1_get_miss_addr,
2093 	.set_hit_addr			= &dr_ste_v1_set_hit_addr,
2094 	.set_byte_mask			= &dr_ste_v1_set_byte_mask,
2095 	.get_byte_mask			= &dr_ste_v1_get_byte_mask,
2096 	/* Actions */
2097 	.actions_caps			= DR_STE_CTX_ACTION_CAP_TX_POP |
2098 					  DR_STE_CTX_ACTION_CAP_RX_PUSH |
2099 					  DR_STE_CTX_ACTION_CAP_RX_ENCAP |
2100 					  DR_STE_CTX_ACTION_CAP_POP_MDFY,
2101 	.set_actions_rx			= &dr_ste_v1_set_actions_rx,
2102 	.set_actions_tx			= &dr_ste_v1_set_actions_tx,
2103 	.modify_field_arr_sz		= ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
2104 	.modify_field_arr		= dr_ste_v1_action_modify_field_arr,
2105 	.set_action_set			= &dr_ste_v1_set_action_set,
2106 	.set_action_add			= &dr_ste_v1_set_action_add,
2107 	.set_action_copy		= &dr_ste_v1_set_action_copy,
2108 	.set_action_decap_l3_list	= &dr_ste_v1_set_action_decap_l3_list,
2109 	/* Send */
2110 	.prepare_for_postsend		= &dr_ste_v1_prepare_for_postsend,
2111 };
2112 
2113 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
2114 {
2115 	return &ste_ctx_v1;
2116 }
2117