1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include "mlx5_ifc_dr_ste_v1.h"
6 #include "dr_ste_v1.h"
7 
8 #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
9 	((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
10 		   DR_STE_V1_LU_TYPE_##lookup_type##_O)
11 
12 enum dr_ste_v1_entry_format {
13 	DR_STE_V1_TYPE_BWC_BYTE	= 0x0,
14 	DR_STE_V1_TYPE_BWC_DW	= 0x1,
15 	DR_STE_V1_TYPE_MATCH	= 0x2,
16 	DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
17 };
18 
19 /* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
20 enum {
21 	DR_STE_V1_LU_TYPE_NOP				= 0x0000,
22 	DR_STE_V1_LU_TYPE_ETHL2_TNL			= 0x0002,
23 	DR_STE_V1_LU_TYPE_IBL3_EXT			= 0x0102,
24 	DR_STE_V1_LU_TYPE_ETHL2_O			= 0x0003,
25 	DR_STE_V1_LU_TYPE_IBL4				= 0x0103,
26 	DR_STE_V1_LU_TYPE_ETHL2_I			= 0x0004,
27 	DR_STE_V1_LU_TYPE_SRC_QP_GVMI			= 0x0104,
28 	DR_STE_V1_LU_TYPE_ETHL2_SRC_O			= 0x0005,
29 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O		= 0x0105,
30 	DR_STE_V1_LU_TYPE_ETHL2_SRC_I			= 0x0006,
31 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I		= 0x0106,
32 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O		= 0x0007,
33 	DR_STE_V1_LU_TYPE_IPV6_DES_O			= 0x0107,
34 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I		= 0x0008,
35 	DR_STE_V1_LU_TYPE_IPV6_DES_I			= 0x0108,
36 	DR_STE_V1_LU_TYPE_ETHL4_O			= 0x0009,
37 	DR_STE_V1_LU_TYPE_IPV6_SRC_O			= 0x0109,
38 	DR_STE_V1_LU_TYPE_ETHL4_I			= 0x000a,
39 	DR_STE_V1_LU_TYPE_IPV6_SRC_I			= 0x010a,
40 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O		= 0x000b,
41 	DR_STE_V1_LU_TYPE_MPLS_O			= 0x010b,
42 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I		= 0x000c,
43 	DR_STE_V1_LU_TYPE_MPLS_I			= 0x010c,
44 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O		= 0x000d,
45 	DR_STE_V1_LU_TYPE_GRE				= 0x010d,
46 	DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER	= 0x000e,
47 	DR_STE_V1_LU_TYPE_GENERAL_PURPOSE		= 0x010e,
48 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I		= 0x000f,
49 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0		= 0x010f,
50 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1		= 0x0110,
51 	DR_STE_V1_LU_TYPE_FLEX_PARSER_OK		= 0x0011,
52 	DR_STE_V1_LU_TYPE_FLEX_PARSER_0			= 0x0111,
53 	DR_STE_V1_LU_TYPE_FLEX_PARSER_1			= 0x0112,
54 	DR_STE_V1_LU_TYPE_ETHL4_MISC_O			= 0x0113,
55 	DR_STE_V1_LU_TYPE_ETHL4_MISC_I			= 0x0114,
56 	DR_STE_V1_LU_TYPE_INVALID			= 0x00ff,
57 	DR_STE_V1_LU_TYPE_DONT_CARE			= MLX5DR_STE_LU_TYPE_DONT_CARE,
58 };
59 
60 enum dr_ste_v1_header_anchors {
61 	DR_STE_HEADER_ANCHOR_START_OUTER		= 0x00,
62 	DR_STE_HEADER_ANCHOR_1ST_VLAN			= 0x02,
63 	DR_STE_HEADER_ANCHOR_IPV6_IPV4			= 0x07,
64 	DR_STE_HEADER_ANCHOR_INNER_MAC			= 0x13,
65 	DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4		= 0x19,
66 };
67 
68 enum dr_ste_v1_action_size {
69 	DR_STE_ACTION_SINGLE_SZ = 4,
70 	DR_STE_ACTION_DOUBLE_SZ = 8,
71 	DR_STE_ACTION_TRIPLE_SZ = 12,
72 };
73 
74 enum dr_ste_v1_action_insert_ptr_attr {
75 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0,  /* Regular push header (e.g. push vlan) */
76 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
77 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2,   /* IPsec */
78 };
79 
80 enum dr_ste_v1_action_id {
81 	DR_STE_V1_ACTION_ID_NOP				= 0x00,
82 	DR_STE_V1_ACTION_ID_COPY			= 0x05,
83 	DR_STE_V1_ACTION_ID_SET				= 0x06,
84 	DR_STE_V1_ACTION_ID_ADD				= 0x07,
85 	DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE		= 0x08,
86 	DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER	= 0x09,
87 	DR_STE_V1_ACTION_ID_INSERT_INLINE		= 0x0a,
88 	DR_STE_V1_ACTION_ID_INSERT_POINTER		= 0x0b,
89 	DR_STE_V1_ACTION_ID_FLOW_TAG			= 0x0c,
90 	DR_STE_V1_ACTION_ID_QUEUE_ID_SEL		= 0x0d,
91 	DR_STE_V1_ACTION_ID_ACCELERATED_LIST		= 0x0e,
92 	DR_STE_V1_ACTION_ID_MODIFY_LIST			= 0x0f,
93 	DR_STE_V1_ACTION_ID_ASO				= 0x12,
94 	DR_STE_V1_ACTION_ID_TRAILER			= 0x13,
95 	DR_STE_V1_ACTION_ID_COUNTER_ID			= 0x14,
96 	DR_STE_V1_ACTION_ID_MAX				= 0x21,
97 	/* use for special cases */
98 	DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3		= 0x22,
99 };
100 
101 enum {
102 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0		= 0x00,
103 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1		= 0x01,
104 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2		= 0x02,
105 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0		= 0x08,
106 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1		= 0x09,
107 	DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0		= 0x0e,
108 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0		= 0x18,
109 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1		= 0x19,
110 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0		= 0x40,
111 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1		= 0x41,
112 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0	= 0x44,
113 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1	= 0x45,
114 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2	= 0x46,
115 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3	= 0x47,
116 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0	= 0x4c,
117 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1	= 0x4d,
118 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2	= 0x4e,
119 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3	= 0x4f,
120 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0		= 0x5e,
121 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1		= 0x5f,
122 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0		= 0x6f,
123 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1		= 0x70,
124 	DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE	= 0x7b,
125 	DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE		= 0x7c,
126 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0		= 0x8c,
127 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1		= 0x8d,
128 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0		= 0x8e,
129 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1		= 0x8f,
130 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0		= 0x90,
131 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1		= 0x91,
132 };
133 
134 enum dr_ste_v1_aso_ctx_type {
135 	DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
136 };
137 
138 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
139 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
140 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
141 	},
142 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
143 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
144 	},
145 	[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
146 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
147 	},
148 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
149 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
150 	},
151 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
152 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
153 	},
154 	[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
155 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
156 	},
157 	[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
158 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
159 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
160 	},
161 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
162 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
163 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
164 	},
165 	[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
166 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
167 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
168 	},
169 	[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
170 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
171 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
172 	},
173 	[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
174 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
175 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
176 	},
177 	[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
178 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
179 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
180 	},
181 	[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
182 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
183 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
184 	},
185 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
186 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
187 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
188 	},
189 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
190 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
191 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
192 	},
193 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
194 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
195 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
196 	},
197 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
198 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
199 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
200 	},
201 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
202 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
203 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
204 	},
205 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
206 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
207 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
208 	},
209 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
210 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
211 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
212 	},
213 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
214 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
215 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
216 	},
217 	[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
218 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
219 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
220 	},
221 	[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
222 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
223 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
224 	},
225 	[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
226 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
227 	},
228 	[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
229 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
230 	},
231 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
232 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
233 	},
234 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
235 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
236 	},
237 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
238 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
239 	},
240 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
241 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
242 	},
243 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
244 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
245 	},
246 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
247 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
248 	},
249 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
250 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
251 	},
252 	[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
253 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
254 	},
255 	[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
256 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
257 	},
258 	[MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
259 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
260 	},
261 	[MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
262 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
263 	},
264 };
265 
266 static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
267 {
268 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
269 }
270 
271 bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
272 {
273 	u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
274 
275 	/* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
276 	 * are part of the action, so they both set as part of STE init
277 	 */
278 	return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
279 }
280 
281 void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
282 {
283 	u64 index = miss_addr >> 6;
284 
285 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
286 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
287 }
288 
289 u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
290 {
291 	u64 index =
292 		((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
293 		 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
294 
295 	return index << 6;
296 }
297 
298 void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
299 {
300 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
301 }
302 
303 u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
304 {
305 	return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
306 }
307 
308 static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
309 {
310 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
311 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
312 }
313 
314 void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
315 {
316 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
317 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
318 }
319 
320 u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
321 {
322 	u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
323 	u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
324 
325 	return (mode << 8 | index);
326 }
327 
328 static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
329 {
330 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
331 }
332 
333 void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
334 {
335 	u64 index = (icm_addr >> 5) | ht_size;
336 
337 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
338 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
339 }
340 
341 void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
342 {
343 	dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
344 	dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
345 
346 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
347 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
348 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
349 }
350 
351 void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
352 {
353 	u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
354 	u8 *mask = tag + DR_STE_SIZE_TAG;
355 	u8 tmp_tag[DR_STE_SIZE_TAG] = {};
356 
357 	if (ste_size == DR_STE_SIZE_CTRL)
358 		return;
359 
360 	WARN_ON(ste_size != DR_STE_SIZE);
361 
362 	/* Backup tag */
363 	memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
364 
365 	/* Swap mask and tag  both are the same size */
366 	memcpy(tag, mask, DR_STE_SIZE_MASK);
367 	memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
368 }
369 
370 static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
371 {
372 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
373 		 DR_STE_V1_ACTION_ID_FLOW_TAG);
374 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
375 }
376 
377 static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
378 {
379 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
380 }
381 
382 static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
383 {
384 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
385 }
386 
387 static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
388 				u32 reformat_id, int size)
389 {
390 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
391 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
392 	/* The hardware expects here size in words (2 byte) */
393 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
394 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
395 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
396 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
397 	dr_ste_v1_set_reparse(hw_ste_p);
398 }
399 
400 static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
401 				     u32 reformat_id,
402 				     u8 anchor, u8 offset,
403 				     int size)
404 {
405 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
406 		 action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
407 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
408 
409 	/* The hardware expects here size and offset in words (2 byte) */
410 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
411 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
412 
413 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
414 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
415 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
416 
417 	dr_ste_v1_set_reparse(hw_ste_p);
418 }
419 
420 static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
421 				     u8 anchor, u8 offset,
422 				     int size)
423 {
424 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
425 		 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
426 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
427 
428 	/* The hardware expects here size and offset in words (2 byte) */
429 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
430 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
431 
432 	dr_ste_v1_set_reparse(hw_ste_p);
433 }
434 
435 static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
436 				    u32 vlan_hdr)
437 {
438 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
439 		 action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
440 	/* The hardware expects offset to vlan header in words (2 byte) */
441 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
442 		 start_offset, HDR_LEN_L2_MACS >> 1);
443 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
444 		 inline_data, vlan_hdr);
445 
446 	dr_ste_v1_set_reparse(hw_ste_p);
447 }
448 
449 static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
450 {
451 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
452 		 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
453 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
454 		 start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
455 	/* The hardware expects here size in words (2 byte) */
456 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
457 		 remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
458 
459 	dr_ste_v1_set_reparse(hw_ste_p);
460 }
461 
462 static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
463 				   u8 *frst_s_action,
464 				   u8 *scnd_d_action,
465 				   u32 reformat_id,
466 				   int size)
467 {
468 	/* Remove L2 headers */
469 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
470 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
471 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
472 		 DR_STE_HEADER_ANCHOR_IPV6_IPV4);
473 
474 	/* Encapsulate with given reformat ID */
475 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
476 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
477 	/* The hardware expects here size in words (2 byte) */
478 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
479 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
480 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
481 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
482 
483 	dr_ste_v1_set_reparse(hw_ste_p);
484 }
485 
486 static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
487 {
488 	MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
489 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
490 	MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
491 	MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
492 	MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
493 		 DR_STE_HEADER_ANCHOR_INNER_MAC);
494 
495 	dr_ste_v1_set_reparse(hw_ste_p);
496 }
497 
498 static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
499 					  u8 *s_action,
500 					  u16 num_of_actions,
501 					  u32 re_write_index)
502 {
503 	MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
504 		 DR_STE_V1_ACTION_ID_MODIFY_LIST);
505 	MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
506 		 num_of_actions);
507 	MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
508 		 re_write_index);
509 
510 	dr_ste_v1_set_reparse(hw_ste_p);
511 }
512 
513 static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
514 					 u32 object_id,
515 					 u32 offset,
516 					 u8 dest_reg_id,
517 					 u8 init_color)
518 {
519 	MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
520 		 DR_STE_V1_ACTION_ID_ASO);
521 	MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
522 		 object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
523 	/* Convert reg_c index to HW 64bit index */
524 	MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
525 		 (dest_reg_id - 1) / 2);
526 	MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
527 		 DR_STE_V1_ASO_CTX_TYPE_POLICERS);
528 	MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
529 		 offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
530 	MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
531 		 init_color);
532 }
533 
534 static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
535 					      u32 min, u32 max)
536 {
537 	MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
538 
539 	/* When the STE will be sent, its mask and tags will be swapped in
540 	 * dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
541 	 * which doesn't have mask, and shouldn't have mask/tag swapped.
542 	 * We're using the common utilities functions to send this STE, so need
543 	 * to allow for this swapping - place the values in the corresponding
544 	 * locations to allow flipping them when writing to ICM.
545 	 *
546 	 * min/max_value_2 corresponds to match_dw_0 in its definer.
547 	 * To allow mask/tag swapping, writing the min/max_2 to min/max_0.
548 	 *
549 	 * Pkt len is 2 bytes that are stored in the higher section of the DW.
550 	 */
551 	MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
552 	MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
553 }
554 
555 static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
556 					  u32 *added_stes,
557 					  u16 gvmi)
558 {
559 	u8 *action;
560 
561 	(*added_stes)++;
562 	*last_ste += DR_STE_SIZE;
563 	dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
564 	dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
565 
566 	action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
567 	memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
568 }
569 
570 static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
571 						u32 *added_stes,
572 						u16 gvmi)
573 {
574 	dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
575 	dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
576 }
577 
578 void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
579 			      u8 *action_type_set,
580 			      u32 actions_caps,
581 			      u8 *last_ste,
582 			      struct mlx5dr_ste_actions_attr *attr,
583 			      u32 *added_stes)
584 {
585 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
586 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
587 	bool allow_modify_hdr = true;
588 	bool allow_encap = true;
589 
590 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
591 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
592 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
593 						      attr->gvmi);
594 			action = MLX5_ADDR_OF(ste_mask_and_match_v1,
595 					      last_ste, action);
596 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
597 		}
598 		dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
599 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
600 		action += DR_STE_ACTION_SINGLE_SZ;
601 
602 		/* Check if vlan_pop and modify_hdr on same STE is supported */
603 		if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
604 			allow_modify_hdr = false;
605 	}
606 
607 	if (action_type_set[DR_ACTION_TYP_CTR])
608 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
609 
610 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
611 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
612 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
613 						      attr->gvmi);
614 			action = MLX5_ADDR_OF(ste_mask_and_match_v1,
615 					      last_ste, action);
616 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
617 		}
618 		dr_ste_v1_set_rewrite_actions(last_ste, action,
619 					      attr->modify_actions,
620 					      attr->modify_index);
621 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
622 		action += DR_STE_ACTION_DOUBLE_SZ;
623 		allow_encap = false;
624 	}
625 
626 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
627 		int i;
628 
629 		for (i = 0; i < attr->vlans.count; i++) {
630 			if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
631 				dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
632 				action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
633 				action_sz = DR_STE_ACTION_TRIPLE_SZ;
634 				allow_encap = true;
635 			}
636 			dr_ste_v1_set_push_vlan(last_ste, action,
637 						attr->vlans.headers[i]);
638 			action_sz -= DR_STE_ACTION_DOUBLE_SZ;
639 			action += DR_STE_ACTION_DOUBLE_SZ;
640 		}
641 	}
642 
643 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
644 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
645 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
646 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
647 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
648 			allow_encap = true;
649 		}
650 		dr_ste_v1_set_encap(last_ste, action,
651 				    attr->reformat.id,
652 				    attr->reformat.size);
653 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
654 		action += DR_STE_ACTION_DOUBLE_SZ;
655 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
656 		u8 *d_action;
657 
658 		if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
659 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
660 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
661 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
662 		}
663 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
664 
665 		dr_ste_v1_set_encap_l3(last_ste,
666 				       action, d_action,
667 				       attr->reformat.id,
668 				       attr->reformat.size);
669 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
670 		action += DR_STE_ACTION_TRIPLE_SZ;
671 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
672 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
673 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
674 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
675 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
676 		}
677 		dr_ste_v1_set_insert_hdr(last_ste, action,
678 					 attr->reformat.id,
679 					 attr->reformat.param_0,
680 					 attr->reformat.param_1,
681 					 attr->reformat.size);
682 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
683 		action += DR_STE_ACTION_DOUBLE_SZ;
684 	} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
685 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
686 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
687 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
688 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
689 		}
690 		dr_ste_v1_set_remove_hdr(last_ste, action,
691 					 attr->reformat.param_0,
692 					 attr->reformat.param_1,
693 					 attr->reformat.size);
694 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
695 		action += DR_STE_ACTION_SINGLE_SZ;
696 	}
697 
698 	if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
699 		if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
700 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
701 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
702 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
703 		}
704 		dr_ste_v1_set_aso_flow_meter(action,
705 					     attr->aso_flow_meter.obj_id,
706 					     attr->aso_flow_meter.offset,
707 					     attr->aso_flow_meter.dest_reg_id,
708 					     attr->aso_flow_meter.init_color);
709 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
710 		action += DR_STE_ACTION_DOUBLE_SZ;
711 	}
712 
713 	if (action_type_set[DR_ACTION_TYP_RANGE]) {
714 		/* match ranges requires a new STE of its own type */
715 		dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
716 		dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
717 
718 		/* we do not support setting any action on the match ranges STE */
719 		action_sz = 0;
720 
721 		dr_ste_v1_set_match_range_pkt_len(last_ste,
722 						  attr->range.definer_id,
723 						  attr->range.min,
724 						  attr->range.max);
725 	}
726 
727 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
728 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
729 }
730 
731 void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
732 			      u8 *action_type_set,
733 			      u32 actions_caps,
734 			      u8 *last_ste,
735 			      struct mlx5dr_ste_actions_attr *attr,
736 			      u32 *added_stes)
737 {
738 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
739 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
740 	bool allow_modify_hdr = true;
741 	bool allow_ctr = true;
742 
743 	if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
744 		dr_ste_v1_set_rewrite_actions(last_ste, action,
745 					      attr->decap_actions,
746 					      attr->decap_index);
747 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
748 		action += DR_STE_ACTION_DOUBLE_SZ;
749 		allow_modify_hdr = false;
750 		allow_ctr = false;
751 	} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
752 		dr_ste_v1_set_rx_decap(last_ste, action);
753 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
754 		action += DR_STE_ACTION_SINGLE_SZ;
755 		allow_modify_hdr = false;
756 		allow_ctr = false;
757 	}
758 
759 	if (action_type_set[DR_ACTION_TYP_TAG]) {
760 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
761 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
762 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
763 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
764 			allow_modify_hdr = true;
765 			allow_ctr = true;
766 		}
767 		dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
768 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
769 		action += DR_STE_ACTION_SINGLE_SZ;
770 	}
771 
772 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
773 		if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
774 		    !allow_modify_hdr) {
775 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
776 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
777 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
778 		}
779 
780 		dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
781 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
782 		action += DR_STE_ACTION_SINGLE_SZ;
783 		allow_ctr = false;
784 
785 		/* Check if vlan_pop and modify_hdr on same STE is supported */
786 		if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
787 			allow_modify_hdr = false;
788 	}
789 
790 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
791 		/* Modify header and decapsulation must use different STEs */
792 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
793 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
794 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
795 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
796 			allow_modify_hdr = true;
797 			allow_ctr = true;
798 		}
799 		dr_ste_v1_set_rewrite_actions(last_ste, action,
800 					      attr->modify_actions,
801 					      attr->modify_index);
802 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
803 		action += DR_STE_ACTION_DOUBLE_SZ;
804 	}
805 
806 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
807 		int i;
808 
809 		for (i = 0; i < attr->vlans.count; i++) {
810 			if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
811 			    !allow_modify_hdr) {
812 				dr_ste_v1_arr_init_next_match(&last_ste,
813 							      added_stes,
814 							      attr->gvmi);
815 				action = MLX5_ADDR_OF(ste_mask_and_match_v1,
816 						      last_ste, action);
817 				action_sz = DR_STE_ACTION_TRIPLE_SZ;
818 			}
819 			dr_ste_v1_set_push_vlan(last_ste, action,
820 						attr->vlans.headers[i]);
821 			action_sz -= DR_STE_ACTION_DOUBLE_SZ;
822 			action += DR_STE_ACTION_DOUBLE_SZ;
823 		}
824 	}
825 
826 	if (action_type_set[DR_ACTION_TYP_CTR]) {
827 		/* Counter action set after decap and before insert_hdr
828 		 * to exclude decaped / encaped header respectively.
829 		 */
830 		if (!allow_ctr) {
831 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
832 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
833 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
834 			allow_modify_hdr = true;
835 		}
836 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
837 		allow_ctr = false;
838 	}
839 
840 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
841 		if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
842 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
843 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
844 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
845 		}
846 		dr_ste_v1_set_encap(last_ste, action,
847 				    attr->reformat.id,
848 				    attr->reformat.size);
849 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
850 		action += DR_STE_ACTION_DOUBLE_SZ;
851 		allow_modify_hdr = false;
852 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
853 		u8 *d_action;
854 
855 		if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
856 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
857 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
858 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
859 		}
860 
861 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
862 
863 		dr_ste_v1_set_encap_l3(last_ste,
864 				       action, d_action,
865 				       attr->reformat.id,
866 				       attr->reformat.size);
867 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
868 		allow_modify_hdr = false;
869 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
870 		/* Modify header, decap, and encap must use different STEs */
871 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
872 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
873 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
874 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
875 		}
876 		dr_ste_v1_set_insert_hdr(last_ste, action,
877 					 attr->reformat.id,
878 					 attr->reformat.param_0,
879 					 attr->reformat.param_1,
880 					 attr->reformat.size);
881 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
882 		action += DR_STE_ACTION_DOUBLE_SZ;
883 		allow_modify_hdr = false;
884 	} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
885 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
886 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
887 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
888 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
889 			allow_modify_hdr = true;
890 			allow_ctr = true;
891 		}
892 		dr_ste_v1_set_remove_hdr(last_ste, action,
893 					 attr->reformat.param_0,
894 					 attr->reformat.param_1,
895 					 attr->reformat.size);
896 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
897 		action += DR_STE_ACTION_SINGLE_SZ;
898 	}
899 
900 	if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
901 		if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
902 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
903 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
904 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
905 		}
906 		dr_ste_v1_set_aso_flow_meter(action,
907 					     attr->aso_flow_meter.obj_id,
908 					     attr->aso_flow_meter.offset,
909 					     attr->aso_flow_meter.dest_reg_id,
910 					     attr->aso_flow_meter.init_color);
911 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
912 		action += DR_STE_ACTION_DOUBLE_SZ;
913 	}
914 
915 	if (action_type_set[DR_ACTION_TYP_RANGE]) {
916 		/* match ranges requires a new STE of its own type */
917 		dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
918 		dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
919 
920 		/* we do not support setting any action on the match ranges STE */
921 		action_sz = 0;
922 
923 		dr_ste_v1_set_match_range_pkt_len(last_ste,
924 						  attr->range.definer_id,
925 						  attr->range.min,
926 						  attr->range.max);
927 	}
928 
929 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
930 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
931 }
932 
933 void dr_ste_v1_set_action_set(u8 *d_action,
934 			      u8 hw_field,
935 			      u8 shifter,
936 			      u8 length,
937 			      u32 data)
938 {
939 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
940 	MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
941 	MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
942 	MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
943 	MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
944 	MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
945 }
946 
947 void dr_ste_v1_set_action_add(u8 *d_action,
948 			      u8 hw_field,
949 			      u8 shifter,
950 			      u8 length,
951 			      u32 data)
952 {
953 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
954 	MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
955 	MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
956 	MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
957 	MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
958 	MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
959 }
960 
961 void dr_ste_v1_set_action_copy(u8 *d_action,
962 			       u8 dst_hw_field,
963 			       u8 dst_shifter,
964 			       u8 dst_len,
965 			       u8 src_hw_field,
966 			       u8 src_shifter)
967 {
968 	dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
969 	src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
970 	MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
971 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
972 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
973 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
974 	MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
975 	MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
976 }
977 
978 #define DR_STE_DECAP_L3_ACTION_NUM	8
979 #define DR_STE_L2_HDR_MAX_SZ		20
980 
981 int dr_ste_v1_set_action_decap_l3_list(void *data,
982 				       u32 data_sz,
983 				       u8 *hw_action,
984 				       u32 hw_action_sz,
985 				       u16 *used_hw_action_num)
986 {
987 	u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
988 	void *data_ptr = padded_data;
989 	u16 used_actions = 0;
990 	u32 inline_data_sz;
991 	u32 i;
992 
993 	if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
994 		return -EINVAL;
995 
996 	inline_data_sz =
997 		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
998 
999 	/* Add an alignment padding  */
1000 	memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
1001 
1002 	/* Remove L2L3 outer headers */
1003 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
1004 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
1005 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
1006 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
1007 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
1008 		 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
1009 	hw_action += DR_STE_ACTION_DOUBLE_SZ;
1010 	used_actions++; /* Remove and NOP are a single double action */
1011 
1012 	/* Point to the last dword of the header */
1013 	data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
1014 
1015 	/* Add the new header using inline action 4Byte at a time, the header
1016 	 * is added in reversed order to the beginning of the packet to avoid
1017 	 * incorrect parsing by the HW. Since header is 14B or 18B an extra
1018 	 * two bytes are padded and later removed.
1019 	 */
1020 	for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
1021 		void *addr_inline;
1022 
1023 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
1024 			 DR_STE_V1_ACTION_ID_INSERT_INLINE);
1025 		/* The hardware expects here offset to words (2 bytes) */
1026 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
1027 
1028 		/* Copy bytes one by one to avoid endianness problem */
1029 		addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
1030 					   hw_action, inline_data);
1031 		memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
1032 		hw_action += DR_STE_ACTION_DOUBLE_SZ;
1033 		used_actions++;
1034 	}
1035 
1036 	/* Remove first 2 extra bytes */
1037 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
1038 		 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
1039 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
1040 	/* The hardware expects here size in words (2 bytes) */
1041 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
1042 	used_actions++;
1043 
1044 	*used_hw_action_num = used_actions;
1045 
1046 	return 0;
1047 }
1048 
1049 static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
1050 						    bool inner, u8 *bit_mask)
1051 {
1052 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1053 
1054 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1055 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1056 
1057 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
1058 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
1059 
1060 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
1061 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
1062 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
1063 	DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
1064 
1065 	if (mask->cvlan_tag) {
1066 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
1067 		mask->cvlan_tag = 0;
1068 	} else if (mask->svlan_tag) {
1069 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
1070 		mask->svlan_tag = 0;
1071 	}
1072 }
1073 
1074 static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
1075 					      struct mlx5dr_ste_build *sb,
1076 					      u8 *tag)
1077 {
1078 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1079 
1080 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1081 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1082 
1083 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
1084 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
1085 
1086 	if (spec->ip_version == IP_VERSION_IPV4) {
1087 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
1088 		spec->ip_version = 0;
1089 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1090 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
1091 		spec->ip_version = 0;
1092 	} else if (spec->ip_version) {
1093 		return -EINVAL;
1094 	}
1095 
1096 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
1097 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
1098 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
1099 
1100 	if (spec->cvlan_tag) {
1101 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1102 		spec->cvlan_tag = 0;
1103 	} else if (spec->svlan_tag) {
1104 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1105 		spec->svlan_tag = 0;
1106 	}
1107 	return 0;
1108 }
1109 
1110 void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
1111 					 struct mlx5dr_match_param *mask)
1112 {
1113 	dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1114 
1115 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
1116 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1117 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
1118 }
1119 
1120 static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1121 					       struct mlx5dr_ste_build *sb,
1122 					       u8 *tag)
1123 {
1124 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1125 
1126 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1127 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1128 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1129 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1130 
1131 	return 0;
1132 }
1133 
1134 void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
1135 					  struct mlx5dr_match_param *mask)
1136 {
1137 	dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
1138 
1139 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
1140 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1141 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
1142 }
1143 
1144 static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1145 					       struct mlx5dr_ste_build *sb,
1146 					       u8 *tag)
1147 {
1148 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1149 
1150 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1151 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1152 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1153 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1154 
1155 	return 0;
1156 }
1157 
1158 void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
1159 					  struct mlx5dr_match_param *mask)
1160 {
1161 	dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
1162 
1163 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
1164 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1165 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
1166 }
1167 
1168 static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1169 						   struct mlx5dr_ste_build *sb,
1170 						   u8 *tag)
1171 {
1172 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1173 
1174 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
1175 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
1176 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
1177 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
1178 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
1179 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
1180 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
1181 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
1182 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
1183 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
1184 
1185 	if (spec->tcp_flags) {
1186 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
1187 		spec->tcp_flags = 0;
1188 	}
1189 
1190 	return 0;
1191 }
1192 
1193 void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
1194 					      struct mlx5dr_match_param *mask)
1195 {
1196 	dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
1197 
1198 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
1199 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1200 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
1201 }
1202 
1203 static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1204 						       bool inner, u8 *bit_mask)
1205 {
1206 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1207 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1208 
1209 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
1210 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
1211 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
1212 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
1213 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
1214 	DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
1215 
1216 	if (mask->svlan_tag || mask->cvlan_tag) {
1217 		MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
1218 		mask->cvlan_tag = 0;
1219 		mask->svlan_tag = 0;
1220 	}
1221 
1222 	if (inner) {
1223 		if (misc_mask->inner_second_cvlan_tag ||
1224 		    misc_mask->inner_second_svlan_tag) {
1225 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1226 			misc_mask->inner_second_cvlan_tag = 0;
1227 			misc_mask->inner_second_svlan_tag = 0;
1228 		}
1229 
1230 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1231 			       second_vlan_id, misc_mask, inner_second_vid);
1232 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1233 			       second_cfi, misc_mask, inner_second_cfi);
1234 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1235 			       second_priority, misc_mask, inner_second_prio);
1236 	} else {
1237 		if (misc_mask->outer_second_cvlan_tag ||
1238 		    misc_mask->outer_second_svlan_tag) {
1239 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1240 			misc_mask->outer_second_cvlan_tag = 0;
1241 			misc_mask->outer_second_svlan_tag = 0;
1242 		}
1243 
1244 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1245 			       second_vlan_id, misc_mask, outer_second_vid);
1246 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1247 			       second_cfi, misc_mask, outer_second_cfi);
1248 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1249 			       second_priority, misc_mask, outer_second_prio);
1250 	}
1251 }
1252 
1253 static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1254 						 bool inner, u8 *tag)
1255 {
1256 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1257 	struct mlx5dr_match_misc *misc_spec = &value->misc;
1258 
1259 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
1260 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
1261 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
1262 	DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
1263 	DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
1264 
1265 	if (spec->ip_version == IP_VERSION_IPV4) {
1266 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
1267 		spec->ip_version = 0;
1268 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1269 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
1270 		spec->ip_version = 0;
1271 	} else if (spec->ip_version) {
1272 		return -EINVAL;
1273 	}
1274 
1275 	if (spec->cvlan_tag) {
1276 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1277 		spec->cvlan_tag = 0;
1278 	} else if (spec->svlan_tag) {
1279 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1280 		spec->svlan_tag = 0;
1281 	}
1282 
1283 	if (inner) {
1284 		if (misc_spec->inner_second_cvlan_tag) {
1285 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1286 			misc_spec->inner_second_cvlan_tag = 0;
1287 		} else if (misc_spec->inner_second_svlan_tag) {
1288 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1289 			misc_spec->inner_second_svlan_tag = 0;
1290 		}
1291 
1292 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
1293 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
1294 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
1295 	} else {
1296 		if (misc_spec->outer_second_cvlan_tag) {
1297 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1298 			misc_spec->outer_second_cvlan_tag = 0;
1299 		} else if (misc_spec->outer_second_svlan_tag) {
1300 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1301 			misc_spec->outer_second_svlan_tag = 0;
1302 		}
1303 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
1304 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
1305 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
1306 	}
1307 
1308 	return 0;
1309 }
1310 
1311 static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1312 						bool inner, u8 *bit_mask)
1313 {
1314 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1315 
1316 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
1317 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
1318 
1319 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1320 }
1321 
1322 static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1323 					  struct mlx5dr_ste_build *sb,
1324 					  u8 *tag)
1325 {
1326 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1327 
1328 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
1329 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
1330 
1331 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1332 }
1333 
1334 void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1335 				     struct mlx5dr_match_param *mask)
1336 {
1337 	dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1338 
1339 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
1340 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1341 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
1342 }
1343 
1344 static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1345 						bool inner, u8 *bit_mask)
1346 {
1347 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1348 
1349 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1350 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1351 
1352 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1353 }
1354 
1355 static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1356 					  struct mlx5dr_ste_build *sb,
1357 					  u8 *tag)
1358 {
1359 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1360 
1361 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1362 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1363 
1364 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1365 }
1366 
1367 void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1368 				     struct mlx5dr_match_param *mask)
1369 {
1370 	dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1371 
1372 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
1373 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1374 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
1375 }
1376 
1377 static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1378 						bool inner, u8 *bit_mask)
1379 {
1380 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1381 	struct mlx5dr_match_misc *misc = &value->misc;
1382 
1383 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1384 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1385 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
1386 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
1387 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
1388 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
1389 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
1390 	DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
1391 
1392 	if (misc->vxlan_vni) {
1393 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
1394 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1395 		misc->vxlan_vni = 0;
1396 	}
1397 
1398 	if (mask->svlan_tag || mask->cvlan_tag) {
1399 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
1400 		mask->cvlan_tag = 0;
1401 		mask->svlan_tag = 0;
1402 	}
1403 }
1404 
1405 static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1406 					  struct mlx5dr_ste_build *sb,
1407 					  u8 *tag)
1408 {
1409 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1410 	struct mlx5dr_match_misc *misc = &value->misc;
1411 
1412 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
1413 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
1414 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
1415 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
1416 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
1417 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
1418 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
1419 
1420 	if (misc->vxlan_vni) {
1421 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
1422 			 (misc->vxlan_vni << 8));
1423 		misc->vxlan_vni = 0;
1424 	}
1425 
1426 	if (spec->cvlan_tag) {
1427 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1428 		spec->cvlan_tag = 0;
1429 	} else if (spec->svlan_tag) {
1430 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1431 		spec->svlan_tag = 0;
1432 	}
1433 
1434 	if (spec->ip_version == IP_VERSION_IPV4) {
1435 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
1436 		spec->ip_version = 0;
1437 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1438 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
1439 		spec->ip_version = 0;
1440 	} else if (spec->ip_version) {
1441 		return -EINVAL;
1442 	}
1443 
1444 	return 0;
1445 }
1446 
1447 void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1448 				     struct mlx5dr_match_param *mask)
1449 {
1450 	dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1451 
1452 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
1453 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1454 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
1455 }
1456 
1457 static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1458 						struct mlx5dr_ste_build *sb,
1459 						u8 *tag)
1460 {
1461 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1462 
1463 	DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
1464 	DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
1465 
1466 	return 0;
1467 }
1468 
1469 void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1470 					   struct mlx5dr_match_param *mask)
1471 {
1472 	dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1473 
1474 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
1475 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1476 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
1477 }
1478 
1479 static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1480 					      struct mlx5dr_ste_build *sb,
1481 					      u8 *tag)
1482 {
1483 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1484 	struct mlx5dr_match_misc *misc = &value->misc;
1485 
1486 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
1487 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
1488 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
1489 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
1490 	DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
1491 	DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
1492 	DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
1493 	DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
1494 	DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1495 
1496 	if (sb->inner)
1497 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
1498 	else
1499 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
1500 
1501 	if (spec->tcp_flags) {
1502 		DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
1503 		spec->tcp_flags = 0;
1504 	}
1505 
1506 	return 0;
1507 }
1508 
1509 void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1510 					 struct mlx5dr_match_param *mask)
1511 {
1512 	dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1513 
1514 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
1515 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1516 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
1517 }
1518 
1519 static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
1520 				    struct mlx5dr_ste_build *sb,
1521 				    u8 *tag)
1522 {
1523 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1524 
1525 	if (sb->inner)
1526 		DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
1527 	else
1528 		DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
1529 
1530 	return 0;
1531 }
1532 
1533 void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
1534 			       struct mlx5dr_match_param *mask)
1535 {
1536 	dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
1537 
1538 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
1539 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1540 	sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
1541 }
1542 
1543 static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1544 				       struct mlx5dr_ste_build *sb,
1545 				       u8 *tag)
1546 {
1547 	struct  mlx5dr_match_misc *misc = &value->misc;
1548 
1549 	DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
1550 	DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
1551 	DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
1552 	DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
1553 
1554 	DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
1555 	DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
1556 
1557 	return 0;
1558 }
1559 
1560 void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1561 				  struct mlx5dr_match_param *mask)
1562 {
1563 	dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1564 
1565 	sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
1566 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1567 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
1568 }
1569 
1570 static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1571 					struct mlx5dr_ste_build *sb,
1572 					u8 *tag)
1573 {
1574 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1575 
1576 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
1577 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1578 			       misc2, outer_first_mpls_over_gre_label);
1579 
1580 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1581 			       misc2, outer_first_mpls_over_gre_exp);
1582 
1583 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1584 			       misc2, outer_first_mpls_over_gre_s_bos);
1585 
1586 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1587 			       misc2, outer_first_mpls_over_gre_ttl);
1588 	} else {
1589 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1590 			       misc2, outer_first_mpls_over_udp_label);
1591 
1592 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1593 			       misc2, outer_first_mpls_over_udp_exp);
1594 
1595 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1596 			       misc2, outer_first_mpls_over_udp_s_bos);
1597 
1598 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1599 			       misc2, outer_first_mpls_over_udp_ttl);
1600 	}
1601 
1602 	return 0;
1603 }
1604 
1605 void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1606 				   struct mlx5dr_match_param *mask)
1607 {
1608 	dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1609 
1610 	sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
1611 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1612 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
1613 }
1614 
1615 static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1616 						 struct mlx5dr_ste_build *sb,
1617 						 u8 *tag)
1618 {
1619 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1620 	u8 *parser_ptr;
1621 	u8 parser_id;
1622 	u32 mpls_hdr;
1623 
1624 	mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1625 	misc2->outer_first_mpls_over_udp_label = 0;
1626 	mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1627 	misc2->outer_first_mpls_over_udp_exp = 0;
1628 	mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1629 	misc2->outer_first_mpls_over_udp_s_bos = 0;
1630 	mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1631 	misc2->outer_first_mpls_over_udp_ttl = 0;
1632 
1633 	parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1634 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1635 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1636 
1637 	return 0;
1638 }
1639 
1640 void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1641 					    struct mlx5dr_match_param *mask)
1642 {
1643 	dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1644 
1645 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1646 	 * flex parsers_{0-3}/{4-7} respectively.
1647 	 */
1648 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1649 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1650 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1651 
1652 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1653 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
1654 }
1655 
1656 static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1657 						 struct mlx5dr_ste_build *sb,
1658 						 u8 *tag)
1659 {
1660 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1661 	u8 *parser_ptr;
1662 	u8 parser_id;
1663 	u32 mpls_hdr;
1664 
1665 	mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1666 	misc2->outer_first_mpls_over_gre_label = 0;
1667 	mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1668 	misc2->outer_first_mpls_over_gre_exp = 0;
1669 	mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1670 	misc2->outer_first_mpls_over_gre_s_bos = 0;
1671 	mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1672 	misc2->outer_first_mpls_over_gre_ttl = 0;
1673 
1674 	parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1675 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1676 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1677 
1678 	return 0;
1679 }
1680 
1681 void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1682 					    struct mlx5dr_match_param *mask)
1683 {
1684 	dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1685 
1686 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1687 	 * flex parsers_{0-3}/{4-7} respectively.
1688 	 */
1689 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1690 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1691 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1692 
1693 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1694 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
1695 }
1696 
1697 static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
1698 				    struct mlx5dr_ste_build *sb,
1699 				    u8 *tag)
1700 {
1701 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1702 	bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
1703 	u32 *icmp_header_data;
1704 	u8 *icmp_type;
1705 	u8 *icmp_code;
1706 
1707 	if (is_ipv4) {
1708 		icmp_header_data	= &misc3->icmpv4_header_data;
1709 		icmp_type		= &misc3->icmpv4_type;
1710 		icmp_code		= &misc3->icmpv4_code;
1711 	} else {
1712 		icmp_header_data	= &misc3->icmpv6_header_data;
1713 		icmp_type		= &misc3->icmpv6_type;
1714 		icmp_code		= &misc3->icmpv6_code;
1715 	}
1716 
1717 	MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
1718 	MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
1719 	MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
1720 
1721 	*icmp_header_data = 0;
1722 	*icmp_type = 0;
1723 	*icmp_code = 0;
1724 
1725 	return 0;
1726 }
1727 
1728 void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
1729 			       struct mlx5dr_match_param *mask)
1730 {
1731 	dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
1732 
1733 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1734 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1735 	sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
1736 }
1737 
1738 static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
1739 					       struct mlx5dr_ste_build *sb,
1740 					       u8 *tag)
1741 {
1742 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1743 
1744 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1745 		       misc2, metadata_reg_a);
1746 
1747 	return 0;
1748 }
1749 
1750 void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1751 					  struct mlx5dr_match_param *mask)
1752 {
1753 	dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
1754 
1755 	sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
1756 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1757 	sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
1758 }
1759 
1760 static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1761 					   struct mlx5dr_ste_build *sb,
1762 					   u8 *tag)
1763 {
1764 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1765 
1766 	if (sb->inner) {
1767 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
1768 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
1769 	} else {
1770 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
1771 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
1772 	}
1773 
1774 	return 0;
1775 }
1776 
1777 void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1778 				      struct mlx5dr_match_param *mask)
1779 {
1780 	dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1781 
1782 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1783 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1784 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
1785 }
1786 
1787 static int
1788 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1789 					      struct mlx5dr_ste_build *sb,
1790 					      u8 *tag)
1791 {
1792 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1793 
1794 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1795 		       outer_vxlan_gpe_flags, misc3,
1796 		       outer_vxlan_gpe_flags);
1797 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1798 		       outer_vxlan_gpe_next_protocol, misc3,
1799 		       outer_vxlan_gpe_next_protocol);
1800 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1801 		       outer_vxlan_gpe_vni, misc3,
1802 		       outer_vxlan_gpe_vni);
1803 
1804 	return 0;
1805 }
1806 
1807 void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1808 						    struct mlx5dr_match_param *mask)
1809 {
1810 	dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1811 
1812 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1813 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1814 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
1815 }
1816 
1817 static int
1818 dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1819 					   struct mlx5dr_ste_build *sb,
1820 					   u8 *tag)
1821 {
1822 	struct mlx5dr_match_misc *misc = &value->misc;
1823 
1824 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1825 		       geneve_protocol_type, misc, geneve_protocol_type);
1826 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1827 		       geneve_oam, misc, geneve_oam);
1828 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1829 		       geneve_opt_len, misc, geneve_opt_len);
1830 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1831 		       geneve_vni, misc, geneve_vni);
1832 
1833 	return 0;
1834 }
1835 
1836 void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1837 						 struct mlx5dr_match_param *mask)
1838 {
1839 	dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1840 
1841 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1842 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1843 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
1844 }
1845 
1846 static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
1847 					      struct mlx5dr_ste_build *sb,
1848 					      uint8_t *tag)
1849 {
1850 	struct mlx5dr_match_misc5 *misc5 = &value->misc5;
1851 
1852 	DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
1853 	DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
1854 
1855 	return 0;
1856 }
1857 
1858 void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
1859 					 struct mlx5dr_match_param *mask)
1860 {
1861 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1862 	dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
1863 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1864 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
1865 }
1866 
1867 static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
1868 					  struct mlx5dr_ste_build *sb,
1869 					  u8 *tag)
1870 {
1871 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1872 
1873 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1874 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1875 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1876 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1877 
1878 	return 0;
1879 }
1880 
1881 void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
1882 				     struct mlx5dr_match_param *mask)
1883 {
1884 	dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
1885 
1886 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
1887 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1888 	sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
1889 }
1890 
1891 static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
1892 					  struct mlx5dr_ste_build *sb,
1893 					  u8 *tag)
1894 {
1895 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1896 
1897 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1898 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1899 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1900 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1901 
1902 	return 0;
1903 }
1904 
1905 void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
1906 				     struct mlx5dr_match_param *mask)
1907 {
1908 	dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
1909 
1910 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
1911 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1912 	sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
1913 }
1914 
1915 static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1916 						  u8 *bit_mask)
1917 {
1918 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1919 
1920 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
1921 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
1922 	misc_mask->source_eswitch_owner_vhca_id = 0;
1923 }
1924 
1925 static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1926 					    struct mlx5dr_ste_build *sb,
1927 					    u8 *tag)
1928 {
1929 	struct mlx5dr_match_misc *misc = &value->misc;
1930 	struct mlx5dr_cmd_vport_cap *vport_cap;
1931 	struct mlx5dr_domain *dmn = sb->dmn;
1932 	struct mlx5dr_domain *vport_dmn;
1933 	u8 *bit_mask = sb->bit_mask;
1934 
1935 	DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
1936 
1937 	if (sb->vhca_id_valid) {
1938 		/* Find port GVMI based on the eswitch_owner_vhca_id */
1939 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1940 			vport_dmn = dmn;
1941 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1942 					   dmn->peer_dmn->info.caps.gvmi))
1943 			vport_dmn = dmn->peer_dmn;
1944 		else
1945 			return -EINVAL;
1946 
1947 		misc->source_eswitch_owner_vhca_id = 0;
1948 	} else {
1949 		vport_dmn = dmn;
1950 	}
1951 
1952 	if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
1953 		return 0;
1954 
1955 	vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
1956 	if (!vport_cap) {
1957 		mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1958 			   misc->source_port);
1959 		return -EINVAL;
1960 	}
1961 
1962 	if (vport_cap->vport_gvmi)
1963 		MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
1964 
1965 	misc->source_port = 0;
1966 	return 0;
1967 }
1968 
1969 void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1970 				       struct mlx5dr_match_param *mask)
1971 {
1972 	dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1973 
1974 	sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
1975 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1976 	sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
1977 }
1978 
1979 static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
1980 				      u32 *misc4_field_value,
1981 				      bool *parser_is_used,
1982 				      u8 *tag)
1983 {
1984 	u32 id = *misc4_field_id;
1985 	u8 *parser_ptr;
1986 
1987 	if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
1988 		return;
1989 
1990 	parser_is_used[id] = true;
1991 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1992 
1993 	*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1994 	*misc4_field_id = 0;
1995 	*misc4_field_value = 0;
1996 }
1997 
1998 static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
1999 					   struct mlx5dr_ste_build *sb,
2000 					   u8 *tag)
2001 {
2002 	struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
2003 	bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
2004 
2005 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
2006 				  &misc_4_mask->prog_sample_field_value_0,
2007 				  parser_is_used, tag);
2008 
2009 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
2010 				  &misc_4_mask->prog_sample_field_value_1,
2011 				  parser_is_used, tag);
2012 
2013 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
2014 				  &misc_4_mask->prog_sample_field_value_2,
2015 				  parser_is_used, tag);
2016 
2017 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
2018 				  &misc_4_mask->prog_sample_field_value_3,
2019 				  parser_is_used, tag);
2020 
2021 	return 0;
2022 }
2023 
2024 void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
2025 					struct mlx5dr_match_param *mask)
2026 {
2027 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2028 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
2029 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2030 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
2031 }
2032 
2033 void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
2034 					struct mlx5dr_match_param *mask)
2035 {
2036 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
2037 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
2038 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2039 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
2040 }
2041 
2042 static int
2043 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
2044 						   struct mlx5dr_ste_build *sb,
2045 						   u8 *tag)
2046 {
2047 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2048 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
2049 	u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
2050 
2051 	MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
2052 		 misc3->geneve_tlv_option_0_data);
2053 	misc3->geneve_tlv_option_0_data = 0;
2054 
2055 	return 0;
2056 }
2057 
2058 void
2059 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
2060 						    struct mlx5dr_match_param *mask)
2061 {
2062 	dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
2063 
2064 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
2065 	 * flex parsers_{0-3}/{4-7} respectively.
2066 	 */
2067 	sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
2068 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
2069 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2070 
2071 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2072 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
2073 }
2074 
2075 static int
2076 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
2077 							 struct mlx5dr_ste_build *sb,
2078 							 uint8_t *tag)
2079 {
2080 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
2081 	struct mlx5dr_match_misc *misc = &value->misc;
2082 
2083 	if (misc->geneve_tlv_option_0_exist) {
2084 		MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
2085 		misc->geneve_tlv_option_0_exist = 0;
2086 	}
2087 
2088 	return 0;
2089 }
2090 
2091 void
2092 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
2093 							  struct mlx5dr_match_param *mask)
2094 {
2095 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
2096 	dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
2097 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2098 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
2099 }
2100 
2101 static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
2102 						    struct mlx5dr_ste_build *sb,
2103 						    u8 *tag)
2104 {
2105 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2106 
2107 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
2108 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
2109 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
2110 
2111 	return 0;
2112 }
2113 
2114 void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
2115 					       struct mlx5dr_match_param *mask)
2116 {
2117 	dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
2118 
2119 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2120 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2121 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
2122 }
2123 
2124 static int
2125 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
2126 					   struct mlx5dr_ste_build *sb,
2127 					   u8 *tag)
2128 {
2129 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
2130 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2131 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
2132 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2133 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
2134 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2135 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2136 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2137 	return 0;
2138 }
2139 
2140 void
2141 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
2142 					    struct mlx5dr_match_param *mask)
2143 {
2144 	dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
2145 
2146 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
2147 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2148 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
2149 }
2150 
2151 static int
2152 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
2153 					   struct mlx5dr_ste_build *sb,
2154 					   u8 *tag)
2155 {
2156 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
2157 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
2158 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
2159 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
2160 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
2161 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
2162 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
2163 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
2164 	return 0;
2165 }
2166 
2167 void
2168 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
2169 					    struct mlx5dr_match_param *mask)
2170 {
2171 	dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
2172 
2173 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
2174 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2175 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
2176 }
2177 
2178 static struct mlx5dr_ste_ctx ste_ctx_v1 = {
2179 	/* Builders */
2180 	.build_eth_l2_src_dst_init	= &dr_ste_v1_build_eth_l2_src_dst_init,
2181 	.build_eth_l3_ipv6_src_init	= &dr_ste_v1_build_eth_l3_ipv6_src_init,
2182 	.build_eth_l3_ipv6_dst_init	= &dr_ste_v1_build_eth_l3_ipv6_dst_init,
2183 	.build_eth_l3_ipv4_5_tuple_init	= &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
2184 	.build_eth_l2_src_init		= &dr_ste_v1_build_eth_l2_src_init,
2185 	.build_eth_l2_dst_init		= &dr_ste_v1_build_eth_l2_dst_init,
2186 	.build_eth_l2_tnl_init		= &dr_ste_v1_build_eth_l2_tnl_init,
2187 	.build_eth_l3_ipv4_misc_init	= &dr_ste_v1_build_eth_l3_ipv4_misc_init,
2188 	.build_eth_ipv6_l3_l4_init	= &dr_ste_v1_build_eth_ipv6_l3_l4_init,
2189 	.build_mpls_init		= &dr_ste_v1_build_mpls_init,
2190 	.build_tnl_gre_init		= &dr_ste_v1_build_tnl_gre_init,
2191 	.build_tnl_mpls_init		= &dr_ste_v1_build_tnl_mpls_init,
2192 	.build_tnl_mpls_over_udp_init	= &dr_ste_v1_build_tnl_mpls_over_udp_init,
2193 	.build_tnl_mpls_over_gre_init	= &dr_ste_v1_build_tnl_mpls_over_gre_init,
2194 	.build_icmp_init		= &dr_ste_v1_build_icmp_init,
2195 	.build_general_purpose_init	= &dr_ste_v1_build_general_purpose_init,
2196 	.build_eth_l4_misc_init		= &dr_ste_v1_build_eth_l4_misc_init,
2197 	.build_tnl_vxlan_gpe_init	= &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
2198 	.build_tnl_geneve_init		= &dr_ste_v1_build_flex_parser_tnl_geneve_init,
2199 	.build_tnl_geneve_tlv_opt_init	= &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
2200 	.build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
2201 	.build_register_0_init		= &dr_ste_v1_build_register_0_init,
2202 	.build_register_1_init		= &dr_ste_v1_build_register_1_init,
2203 	.build_src_gvmi_qpn_init	= &dr_ste_v1_build_src_gvmi_qpn_init,
2204 	.build_flex_parser_0_init	= &dr_ste_v1_build_flex_parser_0_init,
2205 	.build_flex_parser_1_init	= &dr_ste_v1_build_flex_parser_1_init,
2206 	.build_tnl_gtpu_init		= &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
2207 	.build_tnl_header_0_1_init	= &dr_ste_v1_build_tnl_header_0_1_init,
2208 	.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
2209 	.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
2210 
2211 	/* Getters and Setters */
2212 	.ste_init			= &dr_ste_v1_init,
2213 	.set_next_lu_type		= &dr_ste_v1_set_next_lu_type,
2214 	.get_next_lu_type		= &dr_ste_v1_get_next_lu_type,
2215 	.is_miss_addr_set		= &dr_ste_v1_is_miss_addr_set,
2216 	.set_miss_addr			= &dr_ste_v1_set_miss_addr,
2217 	.get_miss_addr			= &dr_ste_v1_get_miss_addr,
2218 	.set_hit_addr			= &dr_ste_v1_set_hit_addr,
2219 	.set_byte_mask			= &dr_ste_v1_set_byte_mask,
2220 	.get_byte_mask			= &dr_ste_v1_get_byte_mask,
2221 	/* Actions */
2222 	.actions_caps			= DR_STE_CTX_ACTION_CAP_TX_POP |
2223 					  DR_STE_CTX_ACTION_CAP_RX_PUSH |
2224 					  DR_STE_CTX_ACTION_CAP_RX_ENCAP |
2225 					  DR_STE_CTX_ACTION_CAP_POP_MDFY,
2226 	.set_actions_rx			= &dr_ste_v1_set_actions_rx,
2227 	.set_actions_tx			= &dr_ste_v1_set_actions_tx,
2228 	.modify_field_arr_sz		= ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
2229 	.modify_field_arr		= dr_ste_v1_action_modify_field_arr,
2230 	.set_action_set			= &dr_ste_v1_set_action_set,
2231 	.set_action_add			= &dr_ste_v1_set_action_add,
2232 	.set_action_copy		= &dr_ste_v1_set_action_copy,
2233 	.set_action_decap_l3_list	= &dr_ste_v1_set_action_decap_l3_list,
2234 	/* Send */
2235 	.prepare_for_postsend		= &dr_ste_v1_prepare_for_postsend,
2236 };
2237 
2238 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
2239 {
2240 	return &ste_ctx_v1;
2241 }
2242