1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include "mlx5_ifc_dr_ste_v1.h"
6 #include "dr_ste.h"
7 
8 #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
9 	((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
10 		   DR_STE_V1_LU_TYPE_##lookup_type##_O)
11 
12 enum dr_ste_v1_entry_format {
13 	DR_STE_V1_TYPE_BWC_BYTE	= 0x0,
14 	DR_STE_V1_TYPE_BWC_DW	= 0x1,
15 	DR_STE_V1_TYPE_MATCH	= 0x2,
16 };
17 
18 /* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
19 enum {
20 	DR_STE_V1_LU_TYPE_NOP				= 0x0000,
21 	DR_STE_V1_LU_TYPE_ETHL2_TNL			= 0x0002,
22 	DR_STE_V1_LU_TYPE_IBL3_EXT			= 0x0102,
23 	DR_STE_V1_LU_TYPE_ETHL2_O			= 0x0003,
24 	DR_STE_V1_LU_TYPE_IBL4				= 0x0103,
25 	DR_STE_V1_LU_TYPE_ETHL2_I			= 0x0004,
26 	DR_STE_V1_LU_TYPE_SRC_QP_GVMI			= 0x0104,
27 	DR_STE_V1_LU_TYPE_ETHL2_SRC_O			= 0x0005,
28 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O		= 0x0105,
29 	DR_STE_V1_LU_TYPE_ETHL2_SRC_I			= 0x0006,
30 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I		= 0x0106,
31 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O		= 0x0007,
32 	DR_STE_V1_LU_TYPE_IPV6_DES_O			= 0x0107,
33 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I		= 0x0008,
34 	DR_STE_V1_LU_TYPE_IPV6_DES_I			= 0x0108,
35 	DR_STE_V1_LU_TYPE_ETHL4_O			= 0x0009,
36 	DR_STE_V1_LU_TYPE_IPV6_SRC_O			= 0x0109,
37 	DR_STE_V1_LU_TYPE_ETHL4_I			= 0x000a,
38 	DR_STE_V1_LU_TYPE_IPV6_SRC_I			= 0x010a,
39 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O		= 0x000b,
40 	DR_STE_V1_LU_TYPE_MPLS_O			= 0x010b,
41 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I		= 0x000c,
42 	DR_STE_V1_LU_TYPE_MPLS_I			= 0x010c,
43 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O		= 0x000d,
44 	DR_STE_V1_LU_TYPE_GRE				= 0x010d,
45 	DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER	= 0x000e,
46 	DR_STE_V1_LU_TYPE_GENERAL_PURPOSE		= 0x010e,
47 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I		= 0x000f,
48 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0		= 0x010f,
49 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1		= 0x0110,
50 	DR_STE_V1_LU_TYPE_FLEX_PARSER_0			= 0x0111,
51 	DR_STE_V1_LU_TYPE_FLEX_PARSER_1			= 0x0112,
52 	DR_STE_V1_LU_TYPE_ETHL4_MISC_O			= 0x0113,
53 	DR_STE_V1_LU_TYPE_ETHL4_MISC_I			= 0x0114,
54 	DR_STE_V1_LU_TYPE_INVALID			= 0x00ff,
55 	DR_STE_V1_LU_TYPE_DONT_CARE			= MLX5DR_STE_LU_TYPE_DONT_CARE,
56 };
57 
58 enum dr_ste_v1_header_anchors {
59 	DR_STE_HEADER_ANCHOR_START_OUTER		= 0x00,
60 	DR_STE_HEADER_ANCHOR_1ST_VLAN			= 0x02,
61 	DR_STE_HEADER_ANCHOR_IPV6_IPV4			= 0x07,
62 	DR_STE_HEADER_ANCHOR_INNER_MAC			= 0x13,
63 	DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4		= 0x19,
64 };
65 
66 enum dr_ste_v1_action_size {
67 	DR_STE_ACTION_SINGLE_SZ = 4,
68 	DR_STE_ACTION_DOUBLE_SZ = 8,
69 	DR_STE_ACTION_TRIPLE_SZ = 12,
70 };
71 
72 enum dr_ste_v1_action_insert_ptr_attr {
73 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0,  /* Regular push header (e.g. push vlan) */
74 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
75 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2,   /* IPsec */
76 };
77 
78 enum dr_ste_v1_action_id {
79 	DR_STE_V1_ACTION_ID_NOP				= 0x00,
80 	DR_STE_V1_ACTION_ID_COPY			= 0x05,
81 	DR_STE_V1_ACTION_ID_SET				= 0x06,
82 	DR_STE_V1_ACTION_ID_ADD				= 0x07,
83 	DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE		= 0x08,
84 	DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER	= 0x09,
85 	DR_STE_V1_ACTION_ID_INSERT_INLINE		= 0x0a,
86 	DR_STE_V1_ACTION_ID_INSERT_POINTER		= 0x0b,
87 	DR_STE_V1_ACTION_ID_FLOW_TAG			= 0x0c,
88 	DR_STE_V1_ACTION_ID_QUEUE_ID_SEL		= 0x0d,
89 	DR_STE_V1_ACTION_ID_ACCELERATED_LIST		= 0x0e,
90 	DR_STE_V1_ACTION_ID_MODIFY_LIST			= 0x0f,
91 	DR_STE_V1_ACTION_ID_TRAILER			= 0x13,
92 	DR_STE_V1_ACTION_ID_COUNTER_ID			= 0x14,
93 	DR_STE_V1_ACTION_ID_MAX				= 0x21,
94 	/* use for special cases */
95 	DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3		= 0x22,
96 };
97 
98 enum {
99 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0		= 0x00,
100 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1		= 0x01,
101 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2		= 0x02,
102 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0		= 0x08,
103 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1		= 0x09,
104 	DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0		= 0x0e,
105 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0		= 0x18,
106 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1		= 0x19,
107 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0		= 0x40,
108 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1		= 0x41,
109 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0	= 0x44,
110 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1	= 0x45,
111 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2	= 0x46,
112 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3	= 0x47,
113 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0	= 0x4c,
114 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1	= 0x4d,
115 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2	= 0x4e,
116 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3	= 0x4f,
117 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0		= 0x5e,
118 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1		= 0x5f,
119 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0		= 0x6f,
120 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1		= 0x70,
121 	DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE	= 0x7b,
122 	DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE		= 0x7c,
123 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2		= 0x8c,
124 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3		= 0x8d,
125 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4		= 0x8e,
126 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5		= 0x8f,
127 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6		= 0x90,
128 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7		= 0x91,
129 };
130 
131 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
132 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
133 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
134 	},
135 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
136 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
137 	},
138 	[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
139 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
140 	},
141 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
142 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
143 	},
144 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
145 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
146 	},
147 	[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
148 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
149 	},
150 	[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
151 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
152 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
153 	},
154 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
155 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
156 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
157 	},
158 	[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
159 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
160 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
161 	},
162 	[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
163 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
164 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
165 	},
166 	[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
167 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
168 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
169 	},
170 	[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
171 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
172 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
173 	},
174 	[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
175 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
176 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
177 	},
178 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
179 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
180 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
181 	},
182 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
183 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
184 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
185 	},
186 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
187 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
188 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
189 	},
190 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
191 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
192 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
193 	},
194 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
195 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
196 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
197 	},
198 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
199 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
200 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
201 	},
202 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
203 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
204 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
205 	},
206 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
207 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
208 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
209 	},
210 	[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
211 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
212 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
213 	},
214 	[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
215 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
216 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
217 	},
218 	[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
219 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
220 	},
221 	[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
222 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
223 	},
224 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
225 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
226 	},
227 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
228 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
229 	},
230 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
231 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
232 	},
233 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
234 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
235 	},
236 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
237 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
238 	},
239 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
240 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
241 	},
242 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
243 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
244 	},
245 	[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
246 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
247 	},
248 	[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
249 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
250 	},
251 	[MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
252 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
253 	},
254 	[MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
255 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
256 	},
257 };
258 
259 static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
260 {
261 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
262 }
263 
264 static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
265 {
266 	u64 index = miss_addr >> 6;
267 
268 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
269 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
270 }
271 
272 static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
273 {
274 	u64 index =
275 		((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
276 		 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
277 
278 	return index << 6;
279 }
280 
281 static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
282 {
283 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
284 }
285 
286 static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
287 {
288 	return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
289 }
290 
291 static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
292 {
293 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
294 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
295 }
296 
297 static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
298 {
299 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
300 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
301 }
302 
303 static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
304 {
305 	u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
306 	u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
307 
308 	return (mode << 8 | index);
309 }
310 
311 static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
312 {
313 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
314 }
315 
316 static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
317 {
318 	u64 index = (icm_addr >> 5) | ht_size;
319 
320 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
321 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
322 }
323 
324 static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
325 			   bool is_rx, u16 gvmi)
326 {
327 	dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
328 	dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
329 
330 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
331 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
332 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
333 }
334 
335 static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
336 					   u32 ste_size)
337 {
338 	u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
339 	u8 *mask = tag + DR_STE_SIZE_TAG;
340 	u8 tmp_tag[DR_STE_SIZE_TAG] = {};
341 
342 	if (ste_size == DR_STE_SIZE_CTRL)
343 		return;
344 
345 	WARN_ON(ste_size != DR_STE_SIZE);
346 
347 	/* Backup tag */
348 	memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
349 
350 	/* Swap mask and tag  both are the same size */
351 	memcpy(tag, mask, DR_STE_SIZE_MASK);
352 	memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
353 }
354 
355 static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
356 {
357 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
358 		 DR_STE_V1_ACTION_ID_FLOW_TAG);
359 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
360 }
361 
362 static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
363 {
364 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
365 }
366 
367 static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
368 {
369 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
370 }
371 
372 static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
373 				u32 reformat_id, int size)
374 {
375 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
376 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
377 	/* The hardware expects here size in words (2 byte) */
378 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
379 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
380 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
381 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
382 	dr_ste_v1_set_reparse(hw_ste_p);
383 }
384 
385 static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
386 				     u32 reformat_id,
387 				     u8 anchor, u8 offset,
388 				     int size)
389 {
390 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
391 		 action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
392 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
393 
394 	/* The hardware expects here size and offset in words (2 byte) */
395 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
396 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
397 
398 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
399 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
400 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
401 
402 	dr_ste_v1_set_reparse(hw_ste_p);
403 }
404 
405 static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
406 				     u8 anchor, u8 offset,
407 				     int size)
408 {
409 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
410 		 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
411 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
412 
413 	/* The hardware expects here size and offset in words (2 byte) */
414 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
415 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
416 
417 	dr_ste_v1_set_reparse(hw_ste_p);
418 }
419 
420 static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
421 				    u32 vlan_hdr)
422 {
423 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
424 		 action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
425 	/* The hardware expects offset to vlan header in words (2 byte) */
426 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
427 		 start_offset, HDR_LEN_L2_MACS >> 1);
428 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
429 		 inline_data, vlan_hdr);
430 
431 	dr_ste_v1_set_reparse(hw_ste_p);
432 }
433 
434 static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
435 {
436 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
437 		 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
438 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
439 		 start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
440 	/* The hardware expects here size in words (2 byte) */
441 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
442 		 remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
443 
444 	dr_ste_v1_set_reparse(hw_ste_p);
445 }
446 
447 static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
448 				   u8 *frst_s_action,
449 				   u8 *scnd_d_action,
450 				   u32 reformat_id,
451 				   int size)
452 {
453 	/* Remove L2 headers */
454 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
455 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
456 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
457 		 DR_STE_HEADER_ANCHOR_IPV6_IPV4);
458 
459 	/* Encapsulate with given reformat ID */
460 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
461 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
462 	/* The hardware expects here size in words (2 byte) */
463 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
464 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
465 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
466 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
467 
468 	dr_ste_v1_set_reparse(hw_ste_p);
469 }
470 
471 static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
472 {
473 	MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
474 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
475 	MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
476 	MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
477 	MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
478 		 DR_STE_HEADER_ANCHOR_INNER_MAC);
479 
480 	dr_ste_v1_set_reparse(hw_ste_p);
481 }
482 
483 static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
484 					  u8 *s_action,
485 					  u16 num_of_actions,
486 					  u32 re_write_index)
487 {
488 	MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
489 		 DR_STE_V1_ACTION_ID_MODIFY_LIST);
490 	MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
491 		 num_of_actions);
492 	MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
493 		 re_write_index);
494 
495 	dr_ste_v1_set_reparse(hw_ste_p);
496 }
497 
498 static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
499 					  u32 *added_stes,
500 					  u16 gvmi)
501 {
502 	u8 *action;
503 
504 	(*added_stes)++;
505 	*last_ste += DR_STE_SIZE;
506 	dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
507 	dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
508 
509 	action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
510 	memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
511 }
512 
513 static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
514 				     u8 *action_type_set,
515 				     u8 *last_ste,
516 				     struct mlx5dr_ste_actions_attr *attr,
517 				     u32 *added_stes)
518 {
519 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
520 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
521 	bool allow_modify_hdr = true;
522 	bool allow_encap = true;
523 
524 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
525 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
526 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
527 						      attr->gvmi);
528 			action = MLX5_ADDR_OF(ste_mask_and_match_v1,
529 					      last_ste, action);
530 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
531 		}
532 		dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
533 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
534 		action += DR_STE_ACTION_SINGLE_SZ;
535 		allow_modify_hdr = false;
536 	}
537 
538 	if (action_type_set[DR_ACTION_TYP_CTR])
539 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
540 
541 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
542 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
543 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
544 						      attr->gvmi);
545 			action = MLX5_ADDR_OF(ste_mask_and_match_v1,
546 					      last_ste, action);
547 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
548 		}
549 		dr_ste_v1_set_rewrite_actions(last_ste, action,
550 					      attr->modify_actions,
551 					      attr->modify_index);
552 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
553 		action += DR_STE_ACTION_DOUBLE_SZ;
554 		allow_encap = false;
555 	}
556 
557 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
558 		int i;
559 
560 		for (i = 0; i < attr->vlans.count; i++) {
561 			if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
562 				dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
563 				action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
564 				action_sz = DR_STE_ACTION_TRIPLE_SZ;
565 				allow_encap = true;
566 			}
567 			dr_ste_v1_set_push_vlan(last_ste, action,
568 						attr->vlans.headers[i]);
569 			action_sz -= DR_STE_ACTION_DOUBLE_SZ;
570 			action += DR_STE_ACTION_DOUBLE_SZ;
571 		}
572 	}
573 
574 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
575 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
576 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
577 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
578 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
579 			allow_encap = true;
580 		}
581 		dr_ste_v1_set_encap(last_ste, action,
582 				    attr->reformat.id,
583 				    attr->reformat.size);
584 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
585 		action += DR_STE_ACTION_DOUBLE_SZ;
586 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
587 		u8 *d_action;
588 
589 		if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
590 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
591 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
592 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
593 		}
594 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
595 
596 		dr_ste_v1_set_encap_l3(last_ste,
597 				       action, d_action,
598 				       attr->reformat.id,
599 				       attr->reformat.size);
600 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
601 		action += DR_STE_ACTION_TRIPLE_SZ;
602 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
603 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
604 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
605 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
606 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
607 		}
608 		dr_ste_v1_set_insert_hdr(last_ste, action,
609 					 attr->reformat.id,
610 					 attr->reformat.param_0,
611 					 attr->reformat.param_1,
612 					 attr->reformat.size);
613 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
614 		action += DR_STE_ACTION_DOUBLE_SZ;
615 	} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
616 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
617 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
618 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
619 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
620 		}
621 		dr_ste_v1_set_remove_hdr(last_ste, action,
622 					 attr->reformat.param_0,
623 					 attr->reformat.param_1,
624 					 attr->reformat.size);
625 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
626 		action += DR_STE_ACTION_SINGLE_SZ;
627 	}
628 
629 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
630 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
631 }
632 
633 static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
634 				     u8 *action_type_set,
635 				     u8 *last_ste,
636 				     struct mlx5dr_ste_actions_attr *attr,
637 				     u32 *added_stes)
638 {
639 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
640 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
641 	bool allow_modify_hdr = true;
642 	bool allow_ctr = true;
643 
644 	if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
645 		dr_ste_v1_set_rewrite_actions(last_ste, action,
646 					      attr->decap_actions,
647 					      attr->decap_index);
648 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
649 		action += DR_STE_ACTION_DOUBLE_SZ;
650 		allow_modify_hdr = false;
651 		allow_ctr = false;
652 	} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
653 		dr_ste_v1_set_rx_decap(last_ste, action);
654 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
655 		action += DR_STE_ACTION_SINGLE_SZ;
656 		allow_modify_hdr = false;
657 		allow_ctr = false;
658 	}
659 
660 	if (action_type_set[DR_ACTION_TYP_TAG]) {
661 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
662 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
663 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
664 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
665 			allow_modify_hdr = true;
666 			allow_ctr = true;
667 		}
668 		dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
669 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
670 		action += DR_STE_ACTION_SINGLE_SZ;
671 	}
672 
673 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
674 		if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
675 		    !allow_modify_hdr) {
676 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
677 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
678 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
679 			allow_modify_hdr = false;
680 			allow_ctr = false;
681 		}
682 
683 		dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
684 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
685 		action += DR_STE_ACTION_SINGLE_SZ;
686 	}
687 
688 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
689 		/* Modify header and decapsulation must use different STEs */
690 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
691 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
692 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
693 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
694 			allow_modify_hdr = true;
695 			allow_ctr = true;
696 		}
697 		dr_ste_v1_set_rewrite_actions(last_ste, action,
698 					      attr->modify_actions,
699 					      attr->modify_index);
700 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
701 		action += DR_STE_ACTION_DOUBLE_SZ;
702 	}
703 
704 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
705 		int i;
706 
707 		for (i = 0; i < attr->vlans.count; i++) {
708 			if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
709 			    !allow_modify_hdr) {
710 				dr_ste_v1_arr_init_next_match(&last_ste,
711 							      added_stes,
712 							      attr->gvmi);
713 				action = MLX5_ADDR_OF(ste_mask_and_match_v1,
714 						      last_ste, action);
715 				action_sz = DR_STE_ACTION_TRIPLE_SZ;
716 			}
717 			dr_ste_v1_set_push_vlan(last_ste, action,
718 						attr->vlans.headers[i]);
719 			action_sz -= DR_STE_ACTION_DOUBLE_SZ;
720 			action += DR_STE_ACTION_DOUBLE_SZ;
721 		}
722 	}
723 
724 	if (action_type_set[DR_ACTION_TYP_CTR]) {
725 		/* Counter action set after decap and before insert_hdr
726 		 * to exclude decaped / encaped header respectively.
727 		 */
728 		if (!allow_ctr) {
729 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
730 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
731 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
732 			allow_modify_hdr = true;
733 			allow_ctr = false;
734 		}
735 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
736 	}
737 
738 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
739 		if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
740 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
741 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
742 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
743 		}
744 		dr_ste_v1_set_encap(last_ste, action,
745 				    attr->reformat.id,
746 				    attr->reformat.size);
747 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
748 		action += DR_STE_ACTION_DOUBLE_SZ;
749 		allow_modify_hdr = false;
750 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
751 		u8 *d_action;
752 
753 		if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
754 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
755 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
756 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
757 		}
758 
759 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
760 
761 		dr_ste_v1_set_encap_l3(last_ste,
762 				       action, d_action,
763 				       attr->reformat.id,
764 				       attr->reformat.size);
765 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
766 		allow_modify_hdr = false;
767 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
768 		/* Modify header, decap, and encap must use different STEs */
769 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
770 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
771 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
772 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
773 		}
774 		dr_ste_v1_set_insert_hdr(last_ste, action,
775 					 attr->reformat.id,
776 					 attr->reformat.param_0,
777 					 attr->reformat.param_1,
778 					 attr->reformat.size);
779 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
780 		action += DR_STE_ACTION_DOUBLE_SZ;
781 		allow_modify_hdr = false;
782 	} else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
783 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
784 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
785 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
786 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
787 			allow_modify_hdr = true;
788 			allow_ctr = true;
789 		}
790 		dr_ste_v1_set_remove_hdr(last_ste, action,
791 					 attr->reformat.param_0,
792 					 attr->reformat.param_1,
793 					 attr->reformat.size);
794 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
795 		action += DR_STE_ACTION_SINGLE_SZ;
796 	}
797 
798 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
799 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
800 }
801 
802 static void dr_ste_v1_set_action_set(u8 *d_action,
803 				     u8 hw_field,
804 				     u8 shifter,
805 				     u8 length,
806 				     u32 data)
807 {
808 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
809 	MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
810 	MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
811 	MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
812 	MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
813 	MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
814 }
815 
816 static void dr_ste_v1_set_action_add(u8 *d_action,
817 				     u8 hw_field,
818 				     u8 shifter,
819 				     u8 length,
820 				     u32 data)
821 {
822 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
823 	MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
824 	MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
825 	MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
826 	MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
827 	MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
828 }
829 
830 static void dr_ste_v1_set_action_copy(u8 *d_action,
831 				      u8 dst_hw_field,
832 				      u8 dst_shifter,
833 				      u8 dst_len,
834 				      u8 src_hw_field,
835 				      u8 src_shifter)
836 {
837 	dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
838 	src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
839 	MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
840 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
841 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
842 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
843 	MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
844 	MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
845 }
846 
847 #define DR_STE_DECAP_L3_ACTION_NUM	8
848 #define DR_STE_L2_HDR_MAX_SZ		20
849 
850 static int dr_ste_v1_set_action_decap_l3_list(void *data,
851 					      u32 data_sz,
852 					      u8 *hw_action,
853 					      u32 hw_action_sz,
854 					      u16 *used_hw_action_num)
855 {
856 	u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
857 	void *data_ptr = padded_data;
858 	u16 used_actions = 0;
859 	u32 inline_data_sz;
860 	u32 i;
861 
862 	if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
863 		return -EINVAL;
864 
865 	inline_data_sz =
866 		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
867 
868 	/* Add an alignment padding  */
869 	memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
870 
871 	/* Remove L2L3 outer headers */
872 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
873 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
874 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
875 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
876 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
877 		 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
878 	hw_action += DR_STE_ACTION_DOUBLE_SZ;
879 	used_actions++; /* Remove and NOP are a single double action */
880 
881 	/* Point to the last dword of the header */
882 	data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
883 
884 	/* Add the new header using inline action 4Byte at a time, the header
885 	 * is added in reversed order to the beginning of the packet to avoid
886 	 * incorrect parsing by the HW. Since header is 14B or 18B an extra
887 	 * two bytes are padded and later removed.
888 	 */
889 	for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
890 		void *addr_inline;
891 
892 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
893 			 DR_STE_V1_ACTION_ID_INSERT_INLINE);
894 		/* The hardware expects here offset to words (2 bytes) */
895 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
896 
897 		/* Copy bytes one by one to avoid endianness problem */
898 		addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
899 					   hw_action, inline_data);
900 		memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
901 		hw_action += DR_STE_ACTION_DOUBLE_SZ;
902 		used_actions++;
903 	}
904 
905 	/* Remove first 2 extra bytes */
906 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
907 		 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
908 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
909 	/* The hardware expects here size in words (2 bytes) */
910 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
911 	used_actions++;
912 
913 	*used_hw_action_num = used_actions;
914 
915 	return 0;
916 }
917 
918 static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
919 						    bool inner, u8 *bit_mask)
920 {
921 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
922 
923 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
924 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
925 
926 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
927 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
928 
929 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
930 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
931 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
932 	DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
933 
934 	if (mask->cvlan_tag) {
935 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
936 		mask->cvlan_tag = 0;
937 	} else if (mask->svlan_tag) {
938 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
939 		mask->svlan_tag = 0;
940 	}
941 }
942 
943 static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
944 					      struct mlx5dr_ste_build *sb,
945 					      u8 *tag)
946 {
947 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
948 
949 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
950 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
951 
952 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
953 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
954 
955 	if (spec->ip_version == IP_VERSION_IPV4) {
956 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
957 		spec->ip_version = 0;
958 	} else if (spec->ip_version == IP_VERSION_IPV6) {
959 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
960 		spec->ip_version = 0;
961 	} else if (spec->ip_version) {
962 		return -EINVAL;
963 	}
964 
965 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
966 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
967 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
968 
969 	if (spec->cvlan_tag) {
970 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
971 		spec->cvlan_tag = 0;
972 	} else if (spec->svlan_tag) {
973 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
974 		spec->svlan_tag = 0;
975 	}
976 	return 0;
977 }
978 
979 static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
980 						struct mlx5dr_match_param *mask)
981 {
982 	dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
983 
984 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
985 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
986 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
987 }
988 
989 static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
990 					       struct mlx5dr_ste_build *sb,
991 					       u8 *tag)
992 {
993 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
994 
995 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
996 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
997 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
998 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
999 
1000 	return 0;
1001 }
1002 
1003 static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
1004 						 struct mlx5dr_match_param *mask)
1005 {
1006 	dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
1007 
1008 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
1009 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1010 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
1011 }
1012 
1013 static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1014 					       struct mlx5dr_ste_build *sb,
1015 					       u8 *tag)
1016 {
1017 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1018 
1019 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1020 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1021 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1022 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1023 
1024 	return 0;
1025 }
1026 
1027 static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
1028 						 struct mlx5dr_match_param *mask)
1029 {
1030 	dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
1031 
1032 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
1033 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1034 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
1035 }
1036 
1037 static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1038 						   struct mlx5dr_ste_build *sb,
1039 						   u8 *tag)
1040 {
1041 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1042 
1043 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
1044 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
1045 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
1046 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
1047 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
1048 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
1049 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
1050 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
1051 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
1052 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
1053 
1054 	if (spec->tcp_flags) {
1055 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
1056 		spec->tcp_flags = 0;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
1062 static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
1063 						     struct mlx5dr_match_param *mask)
1064 {
1065 	dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
1066 
1067 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
1068 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1069 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
1070 }
1071 
1072 static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1073 						       bool inner, u8 *bit_mask)
1074 {
1075 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1076 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1077 
1078 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
1079 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
1080 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
1081 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
1082 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
1083 	DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
1084 
1085 	if (mask->svlan_tag || mask->cvlan_tag) {
1086 		MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
1087 		mask->cvlan_tag = 0;
1088 		mask->svlan_tag = 0;
1089 	}
1090 
1091 	if (inner) {
1092 		if (misc_mask->inner_second_cvlan_tag ||
1093 		    misc_mask->inner_second_svlan_tag) {
1094 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1095 			misc_mask->inner_second_cvlan_tag = 0;
1096 			misc_mask->inner_second_svlan_tag = 0;
1097 		}
1098 
1099 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1100 			       second_vlan_id, misc_mask, inner_second_vid);
1101 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1102 			       second_cfi, misc_mask, inner_second_cfi);
1103 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1104 			       second_priority, misc_mask, inner_second_prio);
1105 	} else {
1106 		if (misc_mask->outer_second_cvlan_tag ||
1107 		    misc_mask->outer_second_svlan_tag) {
1108 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1109 			misc_mask->outer_second_cvlan_tag = 0;
1110 			misc_mask->outer_second_svlan_tag = 0;
1111 		}
1112 
1113 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1114 			       second_vlan_id, misc_mask, outer_second_vid);
1115 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1116 			       second_cfi, misc_mask, outer_second_cfi);
1117 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1118 			       second_priority, misc_mask, outer_second_prio);
1119 	}
1120 }
1121 
1122 static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1123 						 bool inner, u8 *tag)
1124 {
1125 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1126 	struct mlx5dr_match_misc *misc_spec = &value->misc;
1127 
1128 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
1129 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
1130 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
1131 	DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
1132 	DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
1133 
1134 	if (spec->ip_version == IP_VERSION_IPV4) {
1135 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
1136 		spec->ip_version = 0;
1137 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1138 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
1139 		spec->ip_version = 0;
1140 	} else if (spec->ip_version) {
1141 		return -EINVAL;
1142 	}
1143 
1144 	if (spec->cvlan_tag) {
1145 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1146 		spec->cvlan_tag = 0;
1147 	} else if (spec->svlan_tag) {
1148 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1149 		spec->svlan_tag = 0;
1150 	}
1151 
1152 	if (inner) {
1153 		if (misc_spec->inner_second_cvlan_tag) {
1154 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1155 			misc_spec->inner_second_cvlan_tag = 0;
1156 		} else if (misc_spec->inner_second_svlan_tag) {
1157 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1158 			misc_spec->inner_second_svlan_tag = 0;
1159 		}
1160 
1161 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
1162 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
1163 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
1164 	} else {
1165 		if (misc_spec->outer_second_cvlan_tag) {
1166 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1167 			misc_spec->outer_second_cvlan_tag = 0;
1168 		} else if (misc_spec->outer_second_svlan_tag) {
1169 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1170 			misc_spec->outer_second_svlan_tag = 0;
1171 		}
1172 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
1173 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
1174 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
1175 	}
1176 
1177 	return 0;
1178 }
1179 
1180 static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1181 						bool inner, u8 *bit_mask)
1182 {
1183 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1184 
1185 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
1186 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
1187 
1188 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1189 }
1190 
1191 static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1192 					  struct mlx5dr_ste_build *sb,
1193 					  u8 *tag)
1194 {
1195 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1196 
1197 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
1198 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
1199 
1200 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1201 }
1202 
1203 static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1204 					    struct mlx5dr_match_param *mask)
1205 {
1206 	dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1207 
1208 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
1209 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1210 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
1211 }
1212 
1213 static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1214 						bool inner, u8 *bit_mask)
1215 {
1216 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1217 
1218 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1219 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1220 
1221 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1222 }
1223 
1224 static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1225 					  struct mlx5dr_ste_build *sb,
1226 					  u8 *tag)
1227 {
1228 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1229 
1230 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1231 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1232 
1233 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1234 }
1235 
1236 static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1237 					    struct mlx5dr_match_param *mask)
1238 {
1239 	dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1240 
1241 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
1242 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1243 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
1244 }
1245 
1246 static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1247 						bool inner, u8 *bit_mask)
1248 {
1249 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1250 	struct mlx5dr_match_misc *misc = &value->misc;
1251 
1252 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1253 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1254 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
1255 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
1256 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
1257 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
1258 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
1259 	DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
1260 
1261 	if (misc->vxlan_vni) {
1262 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
1263 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1264 		misc->vxlan_vni = 0;
1265 	}
1266 
1267 	if (mask->svlan_tag || mask->cvlan_tag) {
1268 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
1269 		mask->cvlan_tag = 0;
1270 		mask->svlan_tag = 0;
1271 	}
1272 }
1273 
1274 static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1275 					  struct mlx5dr_ste_build *sb,
1276 					  u8 *tag)
1277 {
1278 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1279 	struct mlx5dr_match_misc *misc = &value->misc;
1280 
1281 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
1282 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
1283 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
1284 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
1285 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
1286 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
1287 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
1288 
1289 	if (misc->vxlan_vni) {
1290 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
1291 			 (misc->vxlan_vni << 8));
1292 		misc->vxlan_vni = 0;
1293 	}
1294 
1295 	if (spec->cvlan_tag) {
1296 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1297 		spec->cvlan_tag = 0;
1298 	} else if (spec->svlan_tag) {
1299 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1300 		spec->svlan_tag = 0;
1301 	}
1302 
1303 	if (spec->ip_version == IP_VERSION_IPV4) {
1304 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
1305 		spec->ip_version = 0;
1306 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1307 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
1308 		spec->ip_version = 0;
1309 	} else if (spec->ip_version) {
1310 		return -EINVAL;
1311 	}
1312 
1313 	return 0;
1314 }
1315 
1316 static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1317 					    struct mlx5dr_match_param *mask)
1318 {
1319 	dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1320 
1321 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
1322 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1323 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
1324 }
1325 
1326 static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1327 						struct mlx5dr_ste_build *sb,
1328 						u8 *tag)
1329 {
1330 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1331 
1332 	DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
1333 
1334 	return 0;
1335 }
1336 
1337 static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1338 						  struct mlx5dr_match_param *mask)
1339 {
1340 	dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1341 
1342 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
1343 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1344 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
1345 }
1346 
1347 static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1348 					      struct mlx5dr_ste_build *sb,
1349 					      u8 *tag)
1350 {
1351 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1352 	struct mlx5dr_match_misc *misc = &value->misc;
1353 
1354 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
1355 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
1356 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
1357 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
1358 	DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
1359 	DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
1360 	DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
1361 	DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
1362 	DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1363 
1364 	if (sb->inner)
1365 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
1366 	else
1367 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
1368 
1369 	if (spec->tcp_flags) {
1370 		DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
1371 		spec->tcp_flags = 0;
1372 	}
1373 
1374 	return 0;
1375 }
1376 
1377 static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1378 						struct mlx5dr_match_param *mask)
1379 {
1380 	dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1381 
1382 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
1383 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1384 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
1385 }
1386 
1387 static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
1388 				    struct mlx5dr_ste_build *sb,
1389 				    u8 *tag)
1390 {
1391 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1392 
1393 	if (sb->inner)
1394 		DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
1395 	else
1396 		DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
1397 
1398 	return 0;
1399 }
1400 
1401 static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
1402 				      struct mlx5dr_match_param *mask)
1403 {
1404 	dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
1405 
1406 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
1407 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1408 	sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
1409 }
1410 
1411 static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1412 				       struct mlx5dr_ste_build *sb,
1413 				       u8 *tag)
1414 {
1415 	struct  mlx5dr_match_misc *misc = &value->misc;
1416 
1417 	DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
1418 	DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
1419 	DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
1420 	DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
1421 
1422 	DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
1423 	DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
1424 
1425 	return 0;
1426 }
1427 
1428 static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1429 					 struct mlx5dr_match_param *mask)
1430 {
1431 	dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1432 
1433 	sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
1434 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1435 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
1436 }
1437 
1438 static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1439 					struct mlx5dr_ste_build *sb,
1440 					u8 *tag)
1441 {
1442 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1443 
1444 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
1445 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1446 			       misc2, outer_first_mpls_over_gre_label);
1447 
1448 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1449 			       misc2, outer_first_mpls_over_gre_exp);
1450 
1451 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1452 			       misc2, outer_first_mpls_over_gre_s_bos);
1453 
1454 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1455 			       misc2, outer_first_mpls_over_gre_ttl);
1456 	} else {
1457 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1458 			       misc2, outer_first_mpls_over_udp_label);
1459 
1460 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1461 			       misc2, outer_first_mpls_over_udp_exp);
1462 
1463 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1464 			       misc2, outer_first_mpls_over_udp_s_bos);
1465 
1466 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1467 			       misc2, outer_first_mpls_over_udp_ttl);
1468 	}
1469 
1470 	return 0;
1471 }
1472 
1473 static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1474 					  struct mlx5dr_match_param *mask)
1475 {
1476 	dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1477 
1478 	sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
1479 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1480 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
1481 }
1482 
1483 static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1484 						 struct mlx5dr_ste_build *sb,
1485 						 u8 *tag)
1486 {
1487 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1488 	u8 *parser_ptr;
1489 	u8 parser_id;
1490 	u32 mpls_hdr;
1491 
1492 	mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1493 	misc2->outer_first_mpls_over_udp_label = 0;
1494 	mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1495 	misc2->outer_first_mpls_over_udp_exp = 0;
1496 	mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1497 	misc2->outer_first_mpls_over_udp_s_bos = 0;
1498 	mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1499 	misc2->outer_first_mpls_over_udp_ttl = 0;
1500 
1501 	parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1502 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1503 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1504 
1505 	return 0;
1506 }
1507 
1508 static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1509 						   struct mlx5dr_match_param *mask)
1510 {
1511 	dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1512 
1513 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1514 	 * flex parsers_{0-3}/{4-7} respectively.
1515 	 */
1516 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1517 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1518 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1519 
1520 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1521 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
1522 }
1523 
1524 static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1525 						 struct mlx5dr_ste_build *sb,
1526 						 u8 *tag)
1527 {
1528 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1529 	u8 *parser_ptr;
1530 	u8 parser_id;
1531 	u32 mpls_hdr;
1532 
1533 	mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1534 	misc2->outer_first_mpls_over_gre_label = 0;
1535 	mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1536 	misc2->outer_first_mpls_over_gre_exp = 0;
1537 	mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1538 	misc2->outer_first_mpls_over_gre_s_bos = 0;
1539 	mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1540 	misc2->outer_first_mpls_over_gre_ttl = 0;
1541 
1542 	parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1543 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1544 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1545 
1546 	return 0;
1547 }
1548 
1549 static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1550 						   struct mlx5dr_match_param *mask)
1551 {
1552 	dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1553 
1554 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1555 	 * flex parsers_{0-3}/{4-7} respectively.
1556 	 */
1557 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1558 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1559 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1560 
1561 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1562 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
1563 }
1564 
1565 static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
1566 				    struct mlx5dr_ste_build *sb,
1567 				    u8 *tag)
1568 {
1569 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1570 	bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
1571 	u32 *icmp_header_data;
1572 	u8 *icmp_type;
1573 	u8 *icmp_code;
1574 
1575 	if (is_ipv4) {
1576 		icmp_header_data	= &misc3->icmpv4_header_data;
1577 		icmp_type		= &misc3->icmpv4_type;
1578 		icmp_code		= &misc3->icmpv4_code;
1579 	} else {
1580 		icmp_header_data	= &misc3->icmpv6_header_data;
1581 		icmp_type		= &misc3->icmpv6_type;
1582 		icmp_code		= &misc3->icmpv6_code;
1583 	}
1584 
1585 	MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
1586 	MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
1587 	MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
1588 
1589 	*icmp_header_data = 0;
1590 	*icmp_type = 0;
1591 	*icmp_code = 0;
1592 
1593 	return 0;
1594 }
1595 
1596 static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
1597 				      struct mlx5dr_match_param *mask)
1598 {
1599 	dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
1600 
1601 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1602 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1603 	sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
1604 }
1605 
1606 static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
1607 					       struct mlx5dr_ste_build *sb,
1608 					       u8 *tag)
1609 {
1610 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1611 
1612 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1613 		       misc2, metadata_reg_a);
1614 
1615 	return 0;
1616 }
1617 
1618 static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1619 						 struct mlx5dr_match_param *mask)
1620 {
1621 	dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
1622 
1623 	sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
1624 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1625 	sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
1626 }
1627 
1628 static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1629 					   struct mlx5dr_ste_build *sb,
1630 					   u8 *tag)
1631 {
1632 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1633 
1634 	if (sb->inner) {
1635 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
1636 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
1637 	} else {
1638 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
1639 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1646 					     struct mlx5dr_match_param *mask)
1647 {
1648 	dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1649 
1650 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1651 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1652 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
1653 }
1654 
1655 static int
1656 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1657 					      struct mlx5dr_ste_build *sb,
1658 					      u8 *tag)
1659 {
1660 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1661 
1662 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1663 		       outer_vxlan_gpe_flags, misc3,
1664 		       outer_vxlan_gpe_flags);
1665 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1666 		       outer_vxlan_gpe_next_protocol, misc3,
1667 		       outer_vxlan_gpe_next_protocol);
1668 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1669 		       outer_vxlan_gpe_vni, misc3,
1670 		       outer_vxlan_gpe_vni);
1671 
1672 	return 0;
1673 }
1674 
1675 static void
1676 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1677 					       struct mlx5dr_match_param *mask)
1678 {
1679 	dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1680 
1681 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1682 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1683 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
1684 }
1685 
1686 static int
1687 dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1688 					   struct mlx5dr_ste_build *sb,
1689 					   u8 *tag)
1690 {
1691 	struct mlx5dr_match_misc *misc = &value->misc;
1692 
1693 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1694 		       geneve_protocol_type, misc, geneve_protocol_type);
1695 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1696 		       geneve_oam, misc, geneve_oam);
1697 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1698 		       geneve_opt_len, misc, geneve_opt_len);
1699 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1700 		       geneve_vni, misc, geneve_vni);
1701 
1702 	return 0;
1703 }
1704 
1705 static void
1706 dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1707 					    struct mlx5dr_match_param *mask)
1708 {
1709 	dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1710 
1711 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1712 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1713 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
1714 }
1715 
1716 static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
1717 					  struct mlx5dr_ste_build *sb,
1718 					  u8 *tag)
1719 {
1720 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1721 
1722 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1723 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1724 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1725 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1726 
1727 	return 0;
1728 }
1729 
1730 static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
1731 					    struct mlx5dr_match_param *mask)
1732 {
1733 	dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
1734 
1735 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
1736 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1737 	sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
1738 }
1739 
1740 static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
1741 					  struct mlx5dr_ste_build *sb,
1742 					  u8 *tag)
1743 {
1744 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1745 
1746 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1747 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1748 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1749 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1750 
1751 	return 0;
1752 }
1753 
1754 static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
1755 					    struct mlx5dr_match_param *mask)
1756 {
1757 	dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
1758 
1759 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
1760 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1761 	sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
1762 }
1763 
1764 static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1765 						  u8 *bit_mask)
1766 {
1767 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1768 
1769 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
1770 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
1771 	misc_mask->source_eswitch_owner_vhca_id = 0;
1772 }
1773 
1774 static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1775 					    struct mlx5dr_ste_build *sb,
1776 					    u8 *tag)
1777 {
1778 	struct mlx5dr_match_misc *misc = &value->misc;
1779 	struct mlx5dr_cmd_vport_cap *vport_cap;
1780 	struct mlx5dr_domain *dmn = sb->dmn;
1781 	struct mlx5dr_domain *vport_dmn;
1782 	u8 *bit_mask = sb->bit_mask;
1783 
1784 	DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
1785 
1786 	if (sb->vhca_id_valid) {
1787 		/* Find port GVMI based on the eswitch_owner_vhca_id */
1788 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1789 			vport_dmn = dmn;
1790 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1791 					   dmn->peer_dmn->info.caps.gvmi))
1792 			vport_dmn = dmn->peer_dmn;
1793 		else
1794 			return -EINVAL;
1795 
1796 		misc->source_eswitch_owner_vhca_id = 0;
1797 	} else {
1798 		vport_dmn = dmn;
1799 	}
1800 
1801 	if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
1802 		return 0;
1803 
1804 	vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
1805 	if (!vport_cap) {
1806 		mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1807 			   misc->source_port);
1808 		return -EINVAL;
1809 	}
1810 
1811 	if (vport_cap->vport_gvmi)
1812 		MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
1813 
1814 	misc->source_port = 0;
1815 	return 0;
1816 }
1817 
1818 static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1819 					      struct mlx5dr_match_param *mask)
1820 {
1821 	dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1822 
1823 	sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
1824 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1825 	sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
1826 }
1827 
1828 static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
1829 				      u32 *misc4_field_value,
1830 				      bool *parser_is_used,
1831 				      u8 *tag)
1832 {
1833 	u32 id = *misc4_field_id;
1834 	u8 *parser_ptr;
1835 
1836 	if (parser_is_used[id])
1837 		return;
1838 
1839 	parser_is_used[id] = true;
1840 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1841 
1842 	*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1843 	*misc4_field_id = 0;
1844 	*misc4_field_value = 0;
1845 }
1846 
1847 static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
1848 					   struct mlx5dr_ste_build *sb,
1849 					   u8 *tag)
1850 {
1851 	struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1852 	bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1853 
1854 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1855 				  &misc_4_mask->prog_sample_field_value_0,
1856 				  parser_is_used, tag);
1857 
1858 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1859 				  &misc_4_mask->prog_sample_field_value_1,
1860 				  parser_is_used, tag);
1861 
1862 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1863 				  &misc_4_mask->prog_sample_field_value_2,
1864 				  parser_is_used, tag);
1865 
1866 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1867 				  &misc_4_mask->prog_sample_field_value_3,
1868 				  parser_is_used, tag);
1869 
1870 	return 0;
1871 }
1872 
1873 static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1874 					       struct mlx5dr_match_param *mask)
1875 {
1876 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1877 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1878 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1879 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1880 }
1881 
1882 static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1883 					       struct mlx5dr_match_param *mask)
1884 {
1885 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
1886 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1887 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1888 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1889 }
1890 
1891 static int
1892 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1893 						   struct mlx5dr_ste_build *sb,
1894 						   u8 *tag)
1895 {
1896 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1897 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1898 	u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1899 
1900 	MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1901 		 misc3->geneve_tlv_option_0_data);
1902 	misc3->geneve_tlv_option_0_data = 0;
1903 
1904 	return 0;
1905 }
1906 
1907 static void
1908 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1909 						    struct mlx5dr_match_param *mask)
1910 {
1911 	dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1912 
1913 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1914 	 * flex parsers_{0-3}/{4-7} respectively.
1915 	 */
1916 	sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
1917 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1918 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1919 
1920 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1921 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
1922 }
1923 
1924 static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
1925 						    struct mlx5dr_ste_build *sb,
1926 						    u8 *tag)
1927 {
1928 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1929 
1930 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
1931 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
1932 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
1933 
1934 	return 0;
1935 }
1936 
1937 static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
1938 						      struct mlx5dr_match_param *mask)
1939 {
1940 	dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
1941 
1942 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1943 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1944 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
1945 }
1946 
1947 static int
1948 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
1949 					   struct mlx5dr_ste_build *sb,
1950 					   u8 *tag)
1951 {
1952 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
1953 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1954 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
1955 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1956 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
1957 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1958 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1959 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1960 	return 0;
1961 }
1962 
1963 static void
1964 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1965 					    struct mlx5dr_match_param *mask)
1966 {
1967 	dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
1968 
1969 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1970 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1971 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
1972 }
1973 
1974 static int
1975 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
1976 					   struct mlx5dr_ste_build *sb,
1977 					   u8 *tag)
1978 {
1979 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
1980 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1981 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
1982 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1983 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
1984 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1985 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1986 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1987 	return 0;
1988 }
1989 
1990 static void
1991 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1992 					    struct mlx5dr_match_param *mask)
1993 {
1994 	dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
1995 
1996 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
1997 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1998 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
1999 }
2000 
2001 struct mlx5dr_ste_ctx ste_ctx_v1 = {
2002 	/* Builders */
2003 	.build_eth_l2_src_dst_init	= &dr_ste_v1_build_eth_l2_src_dst_init,
2004 	.build_eth_l3_ipv6_src_init	= &dr_ste_v1_build_eth_l3_ipv6_src_init,
2005 	.build_eth_l3_ipv6_dst_init	= &dr_ste_v1_build_eth_l3_ipv6_dst_init,
2006 	.build_eth_l3_ipv4_5_tuple_init	= &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
2007 	.build_eth_l2_src_init		= &dr_ste_v1_build_eth_l2_src_init,
2008 	.build_eth_l2_dst_init		= &dr_ste_v1_build_eth_l2_dst_init,
2009 	.build_eth_l2_tnl_init		= &dr_ste_v1_build_eth_l2_tnl_init,
2010 	.build_eth_l3_ipv4_misc_init	= &dr_ste_v1_build_eth_l3_ipv4_misc_init,
2011 	.build_eth_ipv6_l3_l4_init	= &dr_ste_v1_build_eth_ipv6_l3_l4_init,
2012 	.build_mpls_init		= &dr_ste_v1_build_mpls_init,
2013 	.build_tnl_gre_init		= &dr_ste_v1_build_tnl_gre_init,
2014 	.build_tnl_mpls_init		= &dr_ste_v1_build_tnl_mpls_init,
2015 	.build_tnl_mpls_over_udp_init	= &dr_ste_v1_build_tnl_mpls_over_udp_init,
2016 	.build_tnl_mpls_over_gre_init	= &dr_ste_v1_build_tnl_mpls_over_gre_init,
2017 	.build_icmp_init		= &dr_ste_v1_build_icmp_init,
2018 	.build_general_purpose_init	= &dr_ste_v1_build_general_purpose_init,
2019 	.build_eth_l4_misc_init		= &dr_ste_v1_build_eth_l4_misc_init,
2020 	.build_tnl_vxlan_gpe_init	= &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
2021 	.build_tnl_geneve_init		= &dr_ste_v1_build_flex_parser_tnl_geneve_init,
2022 	.build_tnl_geneve_tlv_opt_init	= &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
2023 	.build_register_0_init		= &dr_ste_v1_build_register_0_init,
2024 	.build_register_1_init		= &dr_ste_v1_build_register_1_init,
2025 	.build_src_gvmi_qpn_init	= &dr_ste_v1_build_src_gvmi_qpn_init,
2026 	.build_flex_parser_0_init	= &dr_ste_v1_build_flex_parser_0_init,
2027 	.build_flex_parser_1_init	= &dr_ste_v1_build_flex_parser_1_init,
2028 	.build_tnl_gtpu_init		= &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
2029 	.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
2030 	.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
2031 
2032 	/* Getters and Setters */
2033 	.ste_init			= &dr_ste_v1_init,
2034 	.set_next_lu_type		= &dr_ste_v1_set_next_lu_type,
2035 	.get_next_lu_type		= &dr_ste_v1_get_next_lu_type,
2036 	.set_miss_addr			= &dr_ste_v1_set_miss_addr,
2037 	.get_miss_addr			= &dr_ste_v1_get_miss_addr,
2038 	.set_hit_addr			= &dr_ste_v1_set_hit_addr,
2039 	.set_byte_mask			= &dr_ste_v1_set_byte_mask,
2040 	.get_byte_mask			= &dr_ste_v1_get_byte_mask,
2041 	/* Actions */
2042 	.actions_caps			= DR_STE_CTX_ACTION_CAP_TX_POP |
2043 					  DR_STE_CTX_ACTION_CAP_RX_PUSH |
2044 					  DR_STE_CTX_ACTION_CAP_RX_ENCAP,
2045 	.set_actions_rx			= &dr_ste_v1_set_actions_rx,
2046 	.set_actions_tx			= &dr_ste_v1_set_actions_tx,
2047 	.modify_field_arr_sz		= ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
2048 	.modify_field_arr		= dr_ste_v1_action_modify_field_arr,
2049 	.set_action_set			= &dr_ste_v1_set_action_set,
2050 	.set_action_add			= &dr_ste_v1_set_action_add,
2051 	.set_action_copy		= &dr_ste_v1_set_action_copy,
2052 	.set_action_decap_l3_list	= &dr_ste_v1_set_action_decap_l3_list,
2053 	/* Send */
2054 	.prepare_for_postsend		= &dr_ste_v1_prepare_for_postsend,
2055 };
2056