1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include "mlx5_ifc_dr_ste_v1.h"
6 #include "dr_ste.h"
7 
8 #define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
9 	((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
10 		   DR_STE_V1_LU_TYPE_##lookup_type##_O)
11 
12 enum dr_ste_v1_entry_format {
13 	DR_STE_V1_TYPE_BWC_BYTE	= 0x0,
14 	DR_STE_V1_TYPE_BWC_DW	= 0x1,
15 	DR_STE_V1_TYPE_MATCH	= 0x2,
16 };
17 
18 /* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
19 enum {
20 	DR_STE_V1_LU_TYPE_NOP				= 0x0000,
21 	DR_STE_V1_LU_TYPE_ETHL2_TNL			= 0x0002,
22 	DR_STE_V1_LU_TYPE_IBL3_EXT			= 0x0102,
23 	DR_STE_V1_LU_TYPE_ETHL2_O			= 0x0003,
24 	DR_STE_V1_LU_TYPE_IBL4				= 0x0103,
25 	DR_STE_V1_LU_TYPE_ETHL2_I			= 0x0004,
26 	DR_STE_V1_LU_TYPE_SRC_QP_GVMI			= 0x0104,
27 	DR_STE_V1_LU_TYPE_ETHL2_SRC_O			= 0x0005,
28 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O		= 0x0105,
29 	DR_STE_V1_LU_TYPE_ETHL2_SRC_I			= 0x0006,
30 	DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I		= 0x0106,
31 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O		= 0x0007,
32 	DR_STE_V1_LU_TYPE_IPV6_DES_O			= 0x0107,
33 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I		= 0x0008,
34 	DR_STE_V1_LU_TYPE_IPV6_DES_I			= 0x0108,
35 	DR_STE_V1_LU_TYPE_ETHL4_O			= 0x0009,
36 	DR_STE_V1_LU_TYPE_IPV6_SRC_O			= 0x0109,
37 	DR_STE_V1_LU_TYPE_ETHL4_I			= 0x000a,
38 	DR_STE_V1_LU_TYPE_IPV6_SRC_I			= 0x010a,
39 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O		= 0x000b,
40 	DR_STE_V1_LU_TYPE_MPLS_O			= 0x010b,
41 	DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I		= 0x000c,
42 	DR_STE_V1_LU_TYPE_MPLS_I			= 0x010c,
43 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O		= 0x000d,
44 	DR_STE_V1_LU_TYPE_GRE				= 0x010d,
45 	DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER	= 0x000e,
46 	DR_STE_V1_LU_TYPE_GENERAL_PURPOSE		= 0x010e,
47 	DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I		= 0x000f,
48 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0		= 0x010f,
49 	DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1		= 0x0110,
50 	DR_STE_V1_LU_TYPE_FLEX_PARSER_0			= 0x0111,
51 	DR_STE_V1_LU_TYPE_FLEX_PARSER_1			= 0x0112,
52 	DR_STE_V1_LU_TYPE_ETHL4_MISC_O			= 0x0113,
53 	DR_STE_V1_LU_TYPE_ETHL4_MISC_I			= 0x0114,
54 	DR_STE_V1_LU_TYPE_INVALID			= 0x00ff,
55 	DR_STE_V1_LU_TYPE_DONT_CARE			= MLX5DR_STE_LU_TYPE_DONT_CARE,
56 };
57 
58 enum dr_ste_v1_header_anchors {
59 	DR_STE_HEADER_ANCHOR_START_OUTER		= 0x00,
60 	DR_STE_HEADER_ANCHOR_1ST_VLAN			= 0x02,
61 	DR_STE_HEADER_ANCHOR_IPV6_IPV4			= 0x07,
62 	DR_STE_HEADER_ANCHOR_INNER_MAC			= 0x13,
63 	DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4		= 0x19,
64 };
65 
66 enum dr_ste_v1_action_size {
67 	DR_STE_ACTION_SINGLE_SZ = 4,
68 	DR_STE_ACTION_DOUBLE_SZ = 8,
69 	DR_STE_ACTION_TRIPLE_SZ = 12,
70 };
71 
72 enum dr_ste_v1_action_insert_ptr_attr {
73 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0,  /* Regular push header (e.g. push vlan) */
74 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
75 	DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2,   /* IPsec */
76 };
77 
78 enum dr_ste_v1_action_id {
79 	DR_STE_V1_ACTION_ID_NOP				= 0x00,
80 	DR_STE_V1_ACTION_ID_COPY			= 0x05,
81 	DR_STE_V1_ACTION_ID_SET				= 0x06,
82 	DR_STE_V1_ACTION_ID_ADD				= 0x07,
83 	DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE		= 0x08,
84 	DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER	= 0x09,
85 	DR_STE_V1_ACTION_ID_INSERT_INLINE		= 0x0a,
86 	DR_STE_V1_ACTION_ID_INSERT_POINTER		= 0x0b,
87 	DR_STE_V1_ACTION_ID_FLOW_TAG			= 0x0c,
88 	DR_STE_V1_ACTION_ID_QUEUE_ID_SEL		= 0x0d,
89 	DR_STE_V1_ACTION_ID_ACCELERATED_LIST		= 0x0e,
90 	DR_STE_V1_ACTION_ID_MODIFY_LIST			= 0x0f,
91 	DR_STE_V1_ACTION_ID_TRAILER			= 0x13,
92 	DR_STE_V1_ACTION_ID_COUNTER_ID			= 0x14,
93 	DR_STE_V1_ACTION_ID_MAX				= 0x21,
94 	/* use for special cases */
95 	DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3		= 0x22,
96 };
97 
98 enum {
99 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0		= 0x00,
100 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1		= 0x01,
101 	DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2		= 0x02,
102 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0		= 0x08,
103 	DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1		= 0x09,
104 	DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0		= 0x0e,
105 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0		= 0x18,
106 	DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1		= 0x19,
107 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0		= 0x40,
108 	DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1		= 0x41,
109 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0	= 0x44,
110 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1	= 0x45,
111 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2	= 0x46,
112 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3	= 0x47,
113 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0	= 0x4c,
114 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1	= 0x4d,
115 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2	= 0x4e,
116 	DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3	= 0x4f,
117 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0		= 0x5e,
118 	DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1		= 0x5f,
119 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0		= 0x6f,
120 	DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1		= 0x70,
121 	DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE	= 0x7b,
122 	DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE		= 0x7c,
123 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2		= 0x8c,
124 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3		= 0x8d,
125 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4		= 0x8e,
126 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5		= 0x8f,
127 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6		= 0x90,
128 	DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7		= 0x91,
129 };
130 
131 static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
132 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
133 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
134 	},
135 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
136 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
137 	},
138 	[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
139 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
140 	},
141 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
142 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
143 	},
144 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
145 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
146 	},
147 	[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
148 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
149 	},
150 	[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
151 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
152 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
153 	},
154 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
155 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
156 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
157 	},
158 	[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
159 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
160 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
161 	},
162 	[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
163 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
164 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
165 	},
166 	[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
167 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
168 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
169 	},
170 	[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
171 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
172 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
173 	},
174 	[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
175 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
176 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
177 	},
178 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
179 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
180 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
181 	},
182 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
183 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
184 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
185 	},
186 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
187 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
188 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
189 	},
190 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
191 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
192 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
193 	},
194 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
195 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
196 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
197 	},
198 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
199 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
200 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
201 	},
202 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
203 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
204 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
205 	},
206 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
207 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
208 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
209 	},
210 	[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
211 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
212 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
213 	},
214 	[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
215 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
216 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
217 	},
218 	[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
219 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
220 	},
221 	[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
222 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
223 	},
224 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
225 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
226 	},
227 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
228 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
229 	},
230 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
231 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
232 	},
233 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
234 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
235 	},
236 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
237 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
238 	},
239 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
240 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
241 	},
242 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
243 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
244 	},
245 	[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
246 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
247 	},
248 	[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
249 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
250 	},
251 	[MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
252 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
253 	},
254 	[MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
255 		.hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
256 	},
257 };
258 
259 static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
260 {
261 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
262 }
263 
264 static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
265 {
266 	u64 index = miss_addr >> 6;
267 
268 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
269 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
270 }
271 
272 static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
273 {
274 	u64 index =
275 		((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
276 		 ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
277 
278 	return index << 6;
279 }
280 
281 static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
282 {
283 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
284 }
285 
286 static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
287 {
288 	return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
289 }
290 
291 static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
292 {
293 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
294 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
295 }
296 
297 static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
298 {
299 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
300 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
301 }
302 
303 static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
304 {
305 	u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
306 	u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
307 
308 	return (mode << 8 | index);
309 }
310 
311 static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
312 {
313 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
314 }
315 
316 static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
317 {
318 	u64 index = (icm_addr >> 5) | ht_size;
319 
320 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
321 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
322 }
323 
324 static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
325 			   u8 entry_type, u16 gvmi)
326 {
327 	dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
328 	dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
329 
330 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
331 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
332 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
333 }
334 
335 static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
336 					   u32 ste_size)
337 {
338 	u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
339 	u8 *mask = tag + DR_STE_SIZE_TAG;
340 	u8 tmp_tag[DR_STE_SIZE_TAG] = {};
341 
342 	if (ste_size == DR_STE_SIZE_CTRL)
343 		return;
344 
345 	WARN_ON(ste_size != DR_STE_SIZE);
346 
347 	/* Backup tag */
348 	memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
349 
350 	/* Swap mask and tag  both are the same size */
351 	memcpy(tag, mask, DR_STE_SIZE_MASK);
352 	memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
353 }
354 
355 static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
356 {
357 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
358 		 DR_STE_V1_ACTION_ID_FLOW_TAG);
359 	MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
360 }
361 
362 static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
363 {
364 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
365 }
366 
367 static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
368 {
369 	MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
370 }
371 
372 static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
373 				u32 reformat_id, int size)
374 {
375 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
376 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
377 	/* The hardware expects here size in words (2 byte) */
378 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
379 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
380 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
381 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
382 	dr_ste_v1_set_reparse(hw_ste_p);
383 }
384 
385 static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
386 				     u32 reformat_id,
387 				     u8 anchor, u8 offset,
388 				     int size)
389 {
390 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
391 		 action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
392 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
393 
394 	/* The hardware expects here size and offset in words (2 byte) */
395 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
396 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
397 
398 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
399 	MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
400 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
401 
402 	dr_ste_v1_set_reparse(hw_ste_p);
403 }
404 
405 static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
406 				       u32 vlan_hdr)
407 {
408 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
409 		 action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
410 	/* The hardware expects offset to vlan header in words (2 byte) */
411 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
412 		 start_offset, HDR_LEN_L2_MACS >> 1);
413 	MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
414 		 inline_data, vlan_hdr);
415 
416 	dr_ste_v1_set_reparse(hw_ste_p);
417 }
418 
419 static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
420 {
421 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
422 		 action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
423 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
424 		 start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
425 	/* The hardware expects here size in words (2 byte) */
426 	MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
427 		 remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
428 
429 	dr_ste_v1_set_reparse(hw_ste_p);
430 }
431 
432 static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
433 				   u8 *frst_s_action,
434 				   u8 *scnd_d_action,
435 				   u32 reformat_id,
436 				   int size)
437 {
438 	/* Remove L2 headers */
439 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
440 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
441 	MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
442 		 DR_STE_HEADER_ANCHOR_IPV6_IPV4);
443 
444 	/* Encapsulate with given reformat ID */
445 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
446 		 DR_STE_V1_ACTION_ID_INSERT_POINTER);
447 	/* The hardware expects here size in words (2 byte) */
448 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
449 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
450 	MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
451 		 DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
452 
453 	dr_ste_v1_set_reparse(hw_ste_p);
454 }
455 
456 static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
457 {
458 	MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
459 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
460 	MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
461 	MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
462 	MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
463 		 DR_STE_HEADER_ANCHOR_INNER_MAC);
464 
465 	dr_ste_v1_set_reparse(hw_ste_p);
466 }
467 
468 static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
469 					  u8 *s_action,
470 					  u16 num_of_actions,
471 					  u32 re_write_index)
472 {
473 	MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
474 		 DR_STE_V1_ACTION_ID_MODIFY_LIST);
475 	MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
476 		 num_of_actions);
477 	MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
478 		 re_write_index);
479 
480 	dr_ste_v1_set_reparse(hw_ste_p);
481 }
482 
483 static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
484 					  u32 *added_stes,
485 					  u16 gvmi)
486 {
487 	u8 *action;
488 
489 	(*added_stes)++;
490 	*last_ste += DR_STE_SIZE;
491 	dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
492 	dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
493 
494 	action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
495 	memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
496 }
497 
498 static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
499 				     u8 *action_type_set,
500 				     u8 *last_ste,
501 				     struct mlx5dr_ste_actions_attr *attr,
502 				     u32 *added_stes)
503 {
504 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
505 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
506 	bool allow_encap = true;
507 
508 	if (action_type_set[DR_ACTION_TYP_CTR])
509 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
510 
511 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
512 		if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
513 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
514 						      attr->gvmi);
515 			action = MLX5_ADDR_OF(ste_mask_and_match_v1,
516 					      last_ste, action);
517 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
518 		}
519 		dr_ste_v1_set_rewrite_actions(last_ste, action,
520 					      attr->modify_actions,
521 					      attr->modify_index);
522 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
523 		action += DR_STE_ACTION_DOUBLE_SZ;
524 		allow_encap = false;
525 	}
526 
527 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
528 		int i;
529 
530 		for (i = 0; i < attr->vlans.count; i++) {
531 			if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
532 				dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
533 				action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
534 				action_sz = DR_STE_ACTION_TRIPLE_SZ;
535 				allow_encap = true;
536 			}
537 			dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]);
538 			action_sz -= DR_STE_ACTION_DOUBLE_SZ;
539 			action += DR_STE_ACTION_DOUBLE_SZ;
540 		}
541 	}
542 
543 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
544 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
545 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
546 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
547 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
548 			allow_encap = true;
549 		}
550 		dr_ste_v1_set_encap(last_ste, action,
551 				    attr->reformat.id,
552 				    attr->reformat.size);
553 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
554 		action += DR_STE_ACTION_DOUBLE_SZ;
555 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
556 		u8 *d_action;
557 
558 		dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
559 		action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
560 		action_sz = DR_STE_ACTION_TRIPLE_SZ;
561 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
562 
563 		dr_ste_v1_set_encap_l3(last_ste,
564 				       action, d_action,
565 				       attr->reformat.id,
566 				       attr->reformat.size);
567 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
568 		action += DR_STE_ACTION_TRIPLE_SZ;
569 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
570 		if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
571 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
572 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
573 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
574 		}
575 		dr_ste_v1_set_insert_hdr(last_ste, action,
576 					 attr->reformat.id,
577 					 attr->reformat.param_0,
578 					 attr->reformat.param_1,
579 					 attr->reformat.size);
580 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
581 		action += DR_STE_ACTION_DOUBLE_SZ;
582 	}
583 
584 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
585 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
586 }
587 
588 static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
589 				     u8 *action_type_set,
590 				     u8 *last_ste,
591 				     struct mlx5dr_ste_actions_attr *attr,
592 				     u32 *added_stes)
593 {
594 	u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
595 	u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
596 	bool allow_modify_hdr = true;
597 	bool allow_ctr = true;
598 
599 	if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
600 		dr_ste_v1_set_rewrite_actions(last_ste, action,
601 					      attr->decap_actions,
602 					      attr->decap_index);
603 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
604 		action += DR_STE_ACTION_DOUBLE_SZ;
605 		allow_modify_hdr = false;
606 		allow_ctr = false;
607 	} else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
608 		dr_ste_v1_set_rx_decap(last_ste, action);
609 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
610 		action += DR_STE_ACTION_SINGLE_SZ;
611 		allow_modify_hdr = false;
612 		allow_ctr = false;
613 	}
614 
615 	if (action_type_set[DR_ACTION_TYP_TAG]) {
616 		if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
617 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
618 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
619 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
620 			allow_modify_hdr = true;
621 			allow_ctr = true;
622 		}
623 		dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
624 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
625 		action += DR_STE_ACTION_SINGLE_SZ;
626 	}
627 
628 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
629 		if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
630 		    !allow_modify_hdr) {
631 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
632 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
633 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
634 			allow_modify_hdr = false;
635 			allow_ctr = false;
636 		}
637 
638 		dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count);
639 		action_sz -= DR_STE_ACTION_SINGLE_SZ;
640 		action += DR_STE_ACTION_SINGLE_SZ;
641 	}
642 
643 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
644 		/* Modify header and decapsulation must use different STEs */
645 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
646 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
647 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
648 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
649 			allow_modify_hdr = true;
650 			allow_ctr = true;
651 		}
652 		dr_ste_v1_set_rewrite_actions(last_ste, action,
653 					      attr->modify_actions,
654 					      attr->modify_index);
655 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
656 		action += DR_STE_ACTION_DOUBLE_SZ;
657 	}
658 
659 	if (action_type_set[DR_ACTION_TYP_CTR]) {
660 		/* Counter action set after decap and before insert_hdr
661 		 * to exclude decaped / encaped header respectively.
662 		 */
663 		if (!allow_ctr) {
664 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
665 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
666 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
667 			allow_modify_hdr = true;
668 			allow_ctr = false;
669 		}
670 		dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
671 	}
672 
673 	if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
674 		if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
675 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
676 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
677 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
678 		}
679 		dr_ste_v1_set_encap(last_ste, action,
680 				    attr->reformat.id,
681 				    attr->reformat.size);
682 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
683 		action += DR_STE_ACTION_DOUBLE_SZ;
684 		allow_modify_hdr = false;
685 	} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
686 		u8 *d_action;
687 
688 		if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
689 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
690 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
691 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
692 		}
693 
694 		d_action = action + DR_STE_ACTION_SINGLE_SZ;
695 
696 		dr_ste_v1_set_encap_l3(last_ste,
697 				       action, d_action,
698 				       attr->reformat.id,
699 				       attr->reformat.size);
700 		action_sz -= DR_STE_ACTION_TRIPLE_SZ;
701 		allow_modify_hdr = false;
702 	} else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
703 		/* Modify header, decap, and encap must use different STEs */
704 		if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
705 			dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
706 			action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
707 			action_sz = DR_STE_ACTION_TRIPLE_SZ;
708 		}
709 		dr_ste_v1_set_insert_hdr(last_ste, action,
710 					 attr->reformat.id,
711 					 attr->reformat.param_0,
712 					 attr->reformat.param_1,
713 					 attr->reformat.size);
714 		action_sz -= DR_STE_ACTION_DOUBLE_SZ;
715 		action += DR_STE_ACTION_DOUBLE_SZ;
716 		allow_modify_hdr = false;
717 	}
718 
719 	dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
720 	dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
721 }
722 
723 static void dr_ste_v1_set_action_set(u8 *d_action,
724 				     u8 hw_field,
725 				     u8 shifter,
726 				     u8 length,
727 				     u32 data)
728 {
729 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
730 	MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
731 	MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
732 	MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
733 	MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
734 	MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
735 }
736 
737 static void dr_ste_v1_set_action_add(u8 *d_action,
738 				     u8 hw_field,
739 				     u8 shifter,
740 				     u8 length,
741 				     u32 data)
742 {
743 	shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
744 	MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
745 	MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
746 	MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
747 	MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
748 	MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
749 }
750 
751 static void dr_ste_v1_set_action_copy(u8 *d_action,
752 				      u8 dst_hw_field,
753 				      u8 dst_shifter,
754 				      u8 dst_len,
755 				      u8 src_hw_field,
756 				      u8 src_shifter)
757 {
758 	dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
759 	src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
760 	MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
761 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
762 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
763 	MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
764 	MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
765 	MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
766 }
767 
768 #define DR_STE_DECAP_L3_ACTION_NUM	8
769 #define DR_STE_L2_HDR_MAX_SZ		20
770 
771 static int dr_ste_v1_set_action_decap_l3_list(void *data,
772 					      u32 data_sz,
773 					      u8 *hw_action,
774 					      u32 hw_action_sz,
775 					      u16 *used_hw_action_num)
776 {
777 	u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
778 	void *data_ptr = padded_data;
779 	u16 used_actions = 0;
780 	u32 inline_data_sz;
781 	u32 i;
782 
783 	if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
784 		return -EINVAL;
785 
786 	inline_data_sz =
787 		MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
788 
789 	/* Add an alignment padding  */
790 	memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
791 
792 	/* Remove L2L3 outer headers */
793 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
794 		 DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
795 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
796 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
797 	MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
798 		 DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
799 	hw_action += DR_STE_ACTION_DOUBLE_SZ;
800 	used_actions++; /* Remove and NOP are a single double action */
801 
802 	/* Point to the last dword of the header */
803 	data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
804 
805 	/* Add the new header using inline action 4Byte at a time, the header
806 	 * is added in reversed order to the beginning of the packet to avoid
807 	 * incorrect parsing by the HW. Since header is 14B or 18B an extra
808 	 * two bytes are padded and later removed.
809 	 */
810 	for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
811 		void *addr_inline;
812 
813 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
814 			 DR_STE_V1_ACTION_ID_INSERT_INLINE);
815 		/* The hardware expects here offset to words (2 bytes) */
816 		MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
817 
818 		/* Copy bytes one by one to avoid endianness problem */
819 		addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
820 					   hw_action, inline_data);
821 		memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
822 		hw_action += DR_STE_ACTION_DOUBLE_SZ;
823 		used_actions++;
824 	}
825 
826 	/* Remove first 2 extra bytes */
827 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
828 		 DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
829 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
830 	/* The hardware expects here size in words (2 bytes) */
831 	MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
832 	used_actions++;
833 
834 	*used_hw_action_num = used_actions;
835 
836 	return 0;
837 }
838 
839 static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
840 						    bool inner, u8 *bit_mask)
841 {
842 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
843 
844 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
845 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
846 
847 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
848 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
849 
850 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
851 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
852 	DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
853 	DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
854 
855 	if (mask->cvlan_tag) {
856 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
857 		mask->cvlan_tag = 0;
858 	} else if (mask->svlan_tag) {
859 		MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
860 		mask->svlan_tag = 0;
861 	}
862 }
863 
864 static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
865 					      struct mlx5dr_ste_build *sb,
866 					      u8 *tag)
867 {
868 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
869 
870 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
871 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
872 
873 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
874 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
875 
876 	if (spec->ip_version == IP_VERSION_IPV4) {
877 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
878 		spec->ip_version = 0;
879 	} else if (spec->ip_version == IP_VERSION_IPV6) {
880 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
881 		spec->ip_version = 0;
882 	} else if (spec->ip_version) {
883 		return -EINVAL;
884 	}
885 
886 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
887 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
888 	DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
889 
890 	if (spec->cvlan_tag) {
891 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
892 		spec->cvlan_tag = 0;
893 	} else if (spec->svlan_tag) {
894 		MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
895 		spec->svlan_tag = 0;
896 	}
897 	return 0;
898 }
899 
900 static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
901 						struct mlx5dr_match_param *mask)
902 {
903 	dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
904 
905 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
906 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
907 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
908 }
909 
910 static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
911 					       struct mlx5dr_ste_build *sb,
912 					       u8 *tag)
913 {
914 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
915 
916 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
917 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
918 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
919 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
920 
921 	return 0;
922 }
923 
924 static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
925 						 struct mlx5dr_match_param *mask)
926 {
927 	dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
928 
929 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
930 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
931 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
932 }
933 
934 static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
935 					       struct mlx5dr_ste_build *sb,
936 					       u8 *tag)
937 {
938 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
939 
940 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
941 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
942 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
943 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
944 
945 	return 0;
946 }
947 
948 static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
949 						 struct mlx5dr_match_param *mask)
950 {
951 	dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
952 
953 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
954 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
955 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
956 }
957 
958 static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
959 						   struct mlx5dr_ste_build *sb,
960 						   u8 *tag)
961 {
962 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
963 
964 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
965 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
966 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
967 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
968 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
969 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
970 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
971 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
972 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
973 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
974 
975 	if (spec->tcp_flags) {
976 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
977 		spec->tcp_flags = 0;
978 	}
979 
980 	return 0;
981 }
982 
983 static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
984 						     struct mlx5dr_match_param *mask)
985 {
986 	dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
987 
988 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
989 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
990 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
991 }
992 
993 static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
994 						       bool inner, u8 *bit_mask)
995 {
996 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
997 	struct mlx5dr_match_misc *misc_mask = &value->misc;
998 
999 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
1000 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
1001 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
1002 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
1003 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
1004 	DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
1005 
1006 	if (mask->svlan_tag || mask->cvlan_tag) {
1007 		MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
1008 		mask->cvlan_tag = 0;
1009 		mask->svlan_tag = 0;
1010 	}
1011 
1012 	if (inner) {
1013 		if (misc_mask->inner_second_cvlan_tag ||
1014 		    misc_mask->inner_second_svlan_tag) {
1015 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1016 			misc_mask->inner_second_cvlan_tag = 0;
1017 			misc_mask->inner_second_svlan_tag = 0;
1018 		}
1019 
1020 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1021 			       second_vlan_id, misc_mask, inner_second_vid);
1022 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1023 			       second_cfi, misc_mask, inner_second_cfi);
1024 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1025 			       second_priority, misc_mask, inner_second_prio);
1026 	} else {
1027 		if (misc_mask->outer_second_cvlan_tag ||
1028 		    misc_mask->outer_second_svlan_tag) {
1029 			MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
1030 			misc_mask->outer_second_cvlan_tag = 0;
1031 			misc_mask->outer_second_svlan_tag = 0;
1032 		}
1033 
1034 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1035 			       second_vlan_id, misc_mask, outer_second_vid);
1036 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1037 			       second_cfi, misc_mask, outer_second_cfi);
1038 		DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
1039 			       second_priority, misc_mask, outer_second_prio);
1040 	}
1041 }
1042 
1043 static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1044 						 bool inner, u8 *tag)
1045 {
1046 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1047 	struct mlx5dr_match_misc *misc_spec = &value->misc;
1048 
1049 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
1050 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
1051 	DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
1052 	DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
1053 	DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
1054 
1055 	if (spec->ip_version == IP_VERSION_IPV4) {
1056 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
1057 		spec->ip_version = 0;
1058 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1059 		MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
1060 		spec->ip_version = 0;
1061 	} else if (spec->ip_version) {
1062 		return -EINVAL;
1063 	}
1064 
1065 	if (spec->cvlan_tag) {
1066 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1067 		spec->cvlan_tag = 0;
1068 	} else if (spec->svlan_tag) {
1069 		MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1070 		spec->svlan_tag = 0;
1071 	}
1072 
1073 	if (inner) {
1074 		if (misc_spec->inner_second_cvlan_tag) {
1075 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1076 			misc_spec->inner_second_cvlan_tag = 0;
1077 		} else if (misc_spec->inner_second_svlan_tag) {
1078 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1079 			misc_spec->inner_second_svlan_tag = 0;
1080 		}
1081 
1082 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
1083 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
1084 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
1085 	} else {
1086 		if (misc_spec->outer_second_cvlan_tag) {
1087 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
1088 			misc_spec->outer_second_cvlan_tag = 0;
1089 		} else if (misc_spec->outer_second_svlan_tag) {
1090 			MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
1091 			misc_spec->outer_second_svlan_tag = 0;
1092 		}
1093 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
1094 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
1095 		DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
1096 	}
1097 
1098 	return 0;
1099 }
1100 
1101 static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1102 						bool inner, u8 *bit_mask)
1103 {
1104 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1105 
1106 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
1107 	DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
1108 
1109 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1110 }
1111 
1112 static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1113 					  struct mlx5dr_ste_build *sb,
1114 					  u8 *tag)
1115 {
1116 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1117 
1118 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
1119 	DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
1120 
1121 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1122 }
1123 
1124 static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1125 					    struct mlx5dr_match_param *mask)
1126 {
1127 	dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1128 
1129 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
1130 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1131 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
1132 }
1133 
1134 static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1135 						bool inner, u8 *bit_mask)
1136 {
1137 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1138 
1139 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1140 	DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1141 
1142 	dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1143 }
1144 
1145 static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1146 					  struct mlx5dr_ste_build *sb,
1147 					  u8 *tag)
1148 {
1149 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1150 
1151 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
1152 	DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
1153 
1154 	return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1155 }
1156 
1157 static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1158 					    struct mlx5dr_match_param *mask)
1159 {
1160 	dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
1161 
1162 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
1163 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1164 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
1165 }
1166 
1167 static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1168 						bool inner, u8 *bit_mask)
1169 {
1170 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1171 	struct mlx5dr_match_misc *misc = &value->misc;
1172 
1173 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
1174 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
1175 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
1176 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
1177 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
1178 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
1179 	DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
1180 	DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
1181 
1182 	if (misc->vxlan_vni) {
1183 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
1184 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1185 		misc->vxlan_vni = 0;
1186 	}
1187 
1188 	if (mask->svlan_tag || mask->cvlan_tag) {
1189 		MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
1190 		mask->cvlan_tag = 0;
1191 		mask->svlan_tag = 0;
1192 	}
1193 }
1194 
1195 static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1196 					  struct mlx5dr_ste_build *sb,
1197 					  u8 *tag)
1198 {
1199 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1200 	struct mlx5dr_match_misc *misc = &value->misc;
1201 
1202 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
1203 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
1204 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
1205 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
1206 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
1207 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
1208 	DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
1209 
1210 	if (misc->vxlan_vni) {
1211 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
1212 			 (misc->vxlan_vni << 8));
1213 		misc->vxlan_vni = 0;
1214 	}
1215 
1216 	if (spec->cvlan_tag) {
1217 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
1218 		spec->cvlan_tag = 0;
1219 	} else if (spec->svlan_tag) {
1220 		MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
1221 		spec->svlan_tag = 0;
1222 	}
1223 
1224 	if (spec->ip_version == IP_VERSION_IPV4) {
1225 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
1226 		spec->ip_version = 0;
1227 	} else if (spec->ip_version == IP_VERSION_IPV6) {
1228 		MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
1229 		spec->ip_version = 0;
1230 	} else if (spec->ip_version) {
1231 		return -EINVAL;
1232 	}
1233 
1234 	return 0;
1235 }
1236 
1237 static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1238 					    struct mlx5dr_match_param *mask)
1239 {
1240 	dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1241 
1242 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
1243 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1244 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
1245 }
1246 
1247 static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1248 						struct mlx5dr_ste_build *sb,
1249 						u8 *tag)
1250 {
1251 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1252 
1253 	DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
1254 
1255 	return 0;
1256 }
1257 
1258 static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1259 						  struct mlx5dr_match_param *mask)
1260 {
1261 	dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1262 
1263 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
1264 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1265 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
1266 }
1267 
1268 static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1269 					      struct mlx5dr_ste_build *sb,
1270 					      u8 *tag)
1271 {
1272 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1273 	struct mlx5dr_match_misc *misc = &value->misc;
1274 
1275 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
1276 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
1277 	DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
1278 	DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
1279 	DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
1280 	DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
1281 	DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
1282 	DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
1283 	DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1284 
1285 	if (sb->inner)
1286 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
1287 	else
1288 		DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
1289 
1290 	if (spec->tcp_flags) {
1291 		DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
1292 		spec->tcp_flags = 0;
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1299 						struct mlx5dr_match_param *mask)
1300 {
1301 	dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1302 
1303 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
1304 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1305 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
1306 }
1307 
1308 static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
1309 				    struct mlx5dr_ste_build *sb,
1310 				    u8 *tag)
1311 {
1312 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1313 
1314 	if (sb->inner)
1315 		DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
1316 	else
1317 		DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
1318 
1319 	return 0;
1320 }
1321 
1322 static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
1323 				      struct mlx5dr_match_param *mask)
1324 {
1325 	dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
1326 
1327 	sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
1328 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1329 	sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
1330 }
1331 
1332 static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1333 				       struct mlx5dr_ste_build *sb,
1334 				       u8 *tag)
1335 {
1336 	struct  mlx5dr_match_misc *misc = &value->misc;
1337 
1338 	DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
1339 	DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
1340 	DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
1341 	DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
1342 
1343 	DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
1344 	DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
1345 
1346 	return 0;
1347 }
1348 
1349 static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1350 					 struct mlx5dr_match_param *mask)
1351 {
1352 	dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1353 
1354 	sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
1355 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1356 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
1357 }
1358 
1359 static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1360 					struct mlx5dr_ste_build *sb,
1361 					u8 *tag)
1362 {
1363 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1364 
1365 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
1366 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1367 			       misc2, outer_first_mpls_over_gre_label);
1368 
1369 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1370 			       misc2, outer_first_mpls_over_gre_exp);
1371 
1372 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1373 			       misc2, outer_first_mpls_over_gre_s_bos);
1374 
1375 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1376 			       misc2, outer_first_mpls_over_gre_ttl);
1377 	} else {
1378 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
1379 			       misc2, outer_first_mpls_over_udp_label);
1380 
1381 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
1382 			       misc2, outer_first_mpls_over_udp_exp);
1383 
1384 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
1385 			       misc2, outer_first_mpls_over_udp_s_bos);
1386 
1387 		DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
1388 			       misc2, outer_first_mpls_over_udp_ttl);
1389 	}
1390 
1391 	return 0;
1392 }
1393 
1394 static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1395 					  struct mlx5dr_match_param *mask)
1396 {
1397 	dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1398 
1399 	sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
1400 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1401 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
1402 }
1403 
1404 static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1405 						 struct mlx5dr_ste_build *sb,
1406 						 u8 *tag)
1407 {
1408 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1409 	u8 *parser_ptr;
1410 	u8 parser_id;
1411 	u32 mpls_hdr;
1412 
1413 	mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1414 	misc2->outer_first_mpls_over_udp_label = 0;
1415 	mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1416 	misc2->outer_first_mpls_over_udp_exp = 0;
1417 	mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1418 	misc2->outer_first_mpls_over_udp_s_bos = 0;
1419 	mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1420 	misc2->outer_first_mpls_over_udp_ttl = 0;
1421 
1422 	parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1423 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1424 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1425 
1426 	return 0;
1427 }
1428 
1429 static void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1430 						   struct mlx5dr_match_param *mask)
1431 {
1432 	dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1433 
1434 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1435 	 * flex parsers_{0-3}/{4-7} respectively.
1436 	 */
1437 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1438 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1439 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1440 
1441 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1442 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
1443 }
1444 
1445 static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1446 						 struct mlx5dr_ste_build *sb,
1447 						 u8 *tag)
1448 {
1449 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1450 	u8 *parser_ptr;
1451 	u8 parser_id;
1452 	u32 mpls_hdr;
1453 
1454 	mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1455 	misc2->outer_first_mpls_over_gre_label = 0;
1456 	mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1457 	misc2->outer_first_mpls_over_gre_exp = 0;
1458 	mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1459 	misc2->outer_first_mpls_over_gre_s_bos = 0;
1460 	mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1461 	misc2->outer_first_mpls_over_gre_ttl = 0;
1462 
1463 	parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1464 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1465 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1466 
1467 	return 0;
1468 }
1469 
1470 static void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1471 						   struct mlx5dr_match_param *mask)
1472 {
1473 	dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1474 
1475 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1476 	 * flex parsers_{0-3}/{4-7} respectively.
1477 	 */
1478 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1479 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1480 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1481 
1482 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1483 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
1484 }
1485 
1486 static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
1487 				    struct mlx5dr_ste_build *sb,
1488 				    u8 *tag)
1489 {
1490 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1491 	bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
1492 	u32 *icmp_header_data;
1493 	u8 *icmp_type;
1494 	u8 *icmp_code;
1495 
1496 	if (is_ipv4) {
1497 		icmp_header_data	= &misc3->icmpv4_header_data;
1498 		icmp_type		= &misc3->icmpv4_type;
1499 		icmp_code		= &misc3->icmpv4_code;
1500 	} else {
1501 		icmp_header_data	= &misc3->icmpv6_header_data;
1502 		icmp_type		= &misc3->icmpv6_type;
1503 		icmp_code		= &misc3->icmpv6_code;
1504 	}
1505 
1506 	MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
1507 	MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
1508 	MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
1509 
1510 	*icmp_header_data = 0;
1511 	*icmp_type = 0;
1512 	*icmp_code = 0;
1513 
1514 	return 0;
1515 }
1516 
1517 static void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
1518 				      struct mlx5dr_match_param *mask)
1519 {
1520 	dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
1521 
1522 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1523 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1524 	sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
1525 }
1526 
1527 static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
1528 					       struct mlx5dr_ste_build *sb,
1529 					       u8 *tag)
1530 {
1531 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1532 
1533 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1534 		       misc2, metadata_reg_a);
1535 
1536 	return 0;
1537 }
1538 
1539 static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1540 						 struct mlx5dr_match_param *mask)
1541 {
1542 	dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
1543 
1544 	sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
1545 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1546 	sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
1547 }
1548 
1549 static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1550 					   struct mlx5dr_ste_build *sb,
1551 					   u8 *tag)
1552 {
1553 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1554 
1555 	if (sb->inner) {
1556 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
1557 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
1558 	} else {
1559 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
1560 		DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
1561 	}
1562 
1563 	return 0;
1564 }
1565 
1566 static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1567 					     struct mlx5dr_match_param *mask)
1568 {
1569 	dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1570 
1571 	sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
1572 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1573 	sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
1574 }
1575 
1576 static int
1577 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1578 					      struct mlx5dr_ste_build *sb,
1579 					      u8 *tag)
1580 {
1581 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1582 
1583 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1584 		       outer_vxlan_gpe_flags, misc3,
1585 		       outer_vxlan_gpe_flags);
1586 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1587 		       outer_vxlan_gpe_next_protocol, misc3,
1588 		       outer_vxlan_gpe_next_protocol);
1589 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1590 		       outer_vxlan_gpe_vni, misc3,
1591 		       outer_vxlan_gpe_vni);
1592 
1593 	return 0;
1594 }
1595 
1596 static void
1597 dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1598 					       struct mlx5dr_match_param *mask)
1599 {
1600 	dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1601 
1602 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1603 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1604 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
1605 }
1606 
1607 static int
1608 dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1609 					   struct mlx5dr_ste_build *sb,
1610 					   u8 *tag)
1611 {
1612 	struct mlx5dr_match_misc *misc = &value->misc;
1613 
1614 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1615 		       geneve_protocol_type, misc, geneve_protocol_type);
1616 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1617 		       geneve_oam, misc, geneve_oam);
1618 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1619 		       geneve_opt_len, misc, geneve_opt_len);
1620 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1621 		       geneve_vni, misc, geneve_vni);
1622 
1623 	return 0;
1624 }
1625 
1626 static void
1627 dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1628 					    struct mlx5dr_match_param *mask)
1629 {
1630 	dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1631 
1632 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1633 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1634 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
1635 }
1636 
1637 static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
1638 					  struct mlx5dr_ste_build *sb,
1639 					  u8 *tag)
1640 {
1641 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1642 
1643 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1644 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1645 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1646 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1647 
1648 	return 0;
1649 }
1650 
1651 static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
1652 					    struct mlx5dr_match_param *mask)
1653 {
1654 	dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
1655 
1656 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
1657 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1658 	sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
1659 }
1660 
1661 static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
1662 					  struct mlx5dr_ste_build *sb,
1663 					  u8 *tag)
1664 {
1665 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1666 
1667 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1668 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1669 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1670 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1671 
1672 	return 0;
1673 }
1674 
1675 static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
1676 					    struct mlx5dr_match_param *mask)
1677 {
1678 	dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
1679 
1680 	sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
1681 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1682 	sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
1683 }
1684 
1685 static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1686 						  u8 *bit_mask)
1687 {
1688 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1689 
1690 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
1691 	DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
1692 	misc_mask->source_eswitch_owner_vhca_id = 0;
1693 }
1694 
1695 static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1696 					    struct mlx5dr_ste_build *sb,
1697 					    u8 *tag)
1698 {
1699 	struct mlx5dr_match_misc *misc = &value->misc;
1700 	struct mlx5dr_cmd_vport_cap *vport_cap;
1701 	struct mlx5dr_domain *dmn = sb->dmn;
1702 	struct mlx5dr_cmd_caps *caps;
1703 	u8 *bit_mask = sb->bit_mask;
1704 
1705 	DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
1706 
1707 	if (sb->vhca_id_valid) {
1708 		/* Find port GVMI based on the eswitch_owner_vhca_id */
1709 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1710 			caps = &dmn->info.caps;
1711 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1712 					   dmn->peer_dmn->info.caps.gvmi))
1713 			caps = &dmn->peer_dmn->info.caps;
1714 		else
1715 			return -EINVAL;
1716 
1717 		 misc->source_eswitch_owner_vhca_id = 0;
1718 	} else {
1719 		caps = &dmn->info.caps;
1720 	}
1721 
1722 	if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
1723 		return 0;
1724 
1725 	vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
1726 	if (!vport_cap) {
1727 		mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1728 			   misc->source_port);
1729 		return -EINVAL;
1730 	}
1731 
1732 	if (vport_cap->vport_gvmi)
1733 		MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
1734 
1735 	misc->source_port = 0;
1736 	return 0;
1737 }
1738 
1739 static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1740 					      struct mlx5dr_match_param *mask)
1741 {
1742 	dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1743 
1744 	sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
1745 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1746 	sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
1747 }
1748 
1749 static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
1750 				      u32 *misc4_field_value,
1751 				      bool *parser_is_used,
1752 				      u8 *tag)
1753 {
1754 	u32 id = *misc4_field_id;
1755 	u8 *parser_ptr;
1756 
1757 	if (parser_is_used[id])
1758 		return;
1759 
1760 	parser_is_used[id] = true;
1761 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1762 
1763 	*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1764 	*misc4_field_id = 0;
1765 	*misc4_field_value = 0;
1766 }
1767 
1768 static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
1769 					   struct mlx5dr_ste_build *sb,
1770 					   u8 *tag)
1771 {
1772 	struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1773 	bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1774 
1775 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1776 				  &misc_4_mask->prog_sample_field_value_0,
1777 				  parser_is_used, tag);
1778 
1779 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1780 				  &misc_4_mask->prog_sample_field_value_1,
1781 				  parser_is_used, tag);
1782 
1783 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1784 				  &misc_4_mask->prog_sample_field_value_2,
1785 				  parser_is_used, tag);
1786 
1787 	dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1788 				  &misc_4_mask->prog_sample_field_value_3,
1789 				  parser_is_used, tag);
1790 
1791 	return 0;
1792 }
1793 
1794 static void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1795 					       struct mlx5dr_match_param *mask)
1796 {
1797 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1798 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1799 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1800 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1801 }
1802 
1803 static void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1804 					       struct mlx5dr_match_param *mask)
1805 {
1806 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
1807 	dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
1808 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1809 	sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
1810 }
1811 
1812 static int
1813 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1814 						   struct mlx5dr_ste_build *sb,
1815 						   u8 *tag)
1816 {
1817 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1818 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1819 	u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1820 
1821 	MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1822 		 misc3->geneve_tlv_option_0_data);
1823 	misc3->geneve_tlv_option_0_data = 0;
1824 
1825 	return 0;
1826 }
1827 
1828 static void
1829 dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1830 						    struct mlx5dr_match_param *mask)
1831 {
1832 	dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1833 
1834 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1835 	 * flex parsers_{0-3}/{4-7} respectively.
1836 	 */
1837 	sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
1838 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
1839 		      DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1840 
1841 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1842 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
1843 }
1844 
1845 static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
1846 						    struct mlx5dr_ste_build *sb,
1847 						    uint8_t *tag)
1848 {
1849 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1850 
1851 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
1852 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
1853 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
1854 
1855 	return 0;
1856 }
1857 
1858 static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
1859 						      struct mlx5dr_match_param *mask)
1860 {
1861 	dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
1862 
1863 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1864 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1865 	sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
1866 }
1867 
1868 static int
1869 dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
1870 					   struct mlx5dr_ste_build *sb,
1871 					   uint8_t *tag)
1872 {
1873 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
1874 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1875 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
1876 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1877 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
1878 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1879 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1880 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1881 	return 0;
1882 }
1883 
1884 static void
1885 dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1886 					    struct mlx5dr_match_param *mask)
1887 {
1888 	dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
1889 
1890 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
1891 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1892 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
1893 }
1894 
1895 static int
1896 dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
1897 					   struct mlx5dr_ste_build *sb,
1898 					   uint8_t *tag)
1899 {
1900 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
1901 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1902 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
1903 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1904 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
1905 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1906 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1907 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1908 	return 0;
1909 }
1910 
1911 static void
1912 dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1913 					    struct mlx5dr_match_param *mask)
1914 {
1915 	dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
1916 
1917 	sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
1918 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1919 	sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
1920 }
1921 
1922 struct mlx5dr_ste_ctx ste_ctx_v1 = {
1923 	/* Builders */
1924 	.build_eth_l2_src_dst_init	= &dr_ste_v1_build_eth_l2_src_dst_init,
1925 	.build_eth_l3_ipv6_src_init	= &dr_ste_v1_build_eth_l3_ipv6_src_init,
1926 	.build_eth_l3_ipv6_dst_init	= &dr_ste_v1_build_eth_l3_ipv6_dst_init,
1927 	.build_eth_l3_ipv4_5_tuple_init	= &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
1928 	.build_eth_l2_src_init		= &dr_ste_v1_build_eth_l2_src_init,
1929 	.build_eth_l2_dst_init		= &dr_ste_v1_build_eth_l2_dst_init,
1930 	.build_eth_l2_tnl_init		= &dr_ste_v1_build_eth_l2_tnl_init,
1931 	.build_eth_l3_ipv4_misc_init	= &dr_ste_v1_build_eth_l3_ipv4_misc_init,
1932 	.build_eth_ipv6_l3_l4_init	= &dr_ste_v1_build_eth_ipv6_l3_l4_init,
1933 	.build_mpls_init		= &dr_ste_v1_build_mpls_init,
1934 	.build_tnl_gre_init		= &dr_ste_v1_build_tnl_gre_init,
1935 	.build_tnl_mpls_init		= &dr_ste_v1_build_tnl_mpls_init,
1936 	.build_tnl_mpls_over_udp_init	= &dr_ste_v1_build_tnl_mpls_over_udp_init,
1937 	.build_tnl_mpls_over_gre_init	= &dr_ste_v1_build_tnl_mpls_over_gre_init,
1938 	.build_icmp_init		= &dr_ste_v1_build_icmp_init,
1939 	.build_general_purpose_init	= &dr_ste_v1_build_general_purpose_init,
1940 	.build_eth_l4_misc_init		= &dr_ste_v1_build_eth_l4_misc_init,
1941 	.build_tnl_vxlan_gpe_init	= &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
1942 	.build_tnl_geneve_init		= &dr_ste_v1_build_flex_parser_tnl_geneve_init,
1943 	.build_tnl_geneve_tlv_opt_init	= &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
1944 	.build_register_0_init		= &dr_ste_v1_build_register_0_init,
1945 	.build_register_1_init		= &dr_ste_v1_build_register_1_init,
1946 	.build_src_gvmi_qpn_init	= &dr_ste_v1_build_src_gvmi_qpn_init,
1947 	.build_flex_parser_0_init	= &dr_ste_v1_build_flex_parser_0_init,
1948 	.build_flex_parser_1_init	= &dr_ste_v1_build_flex_parser_1_init,
1949 	.build_tnl_gtpu_init		= &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
1950 	.build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
1951 	.build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
1952 
1953 	/* Getters and Setters */
1954 	.ste_init			= &dr_ste_v1_init,
1955 	.set_next_lu_type		= &dr_ste_v1_set_next_lu_type,
1956 	.get_next_lu_type		= &dr_ste_v1_get_next_lu_type,
1957 	.set_miss_addr			= &dr_ste_v1_set_miss_addr,
1958 	.get_miss_addr			= &dr_ste_v1_get_miss_addr,
1959 	.set_hit_addr			= &dr_ste_v1_set_hit_addr,
1960 	.set_byte_mask			= &dr_ste_v1_set_byte_mask,
1961 	.get_byte_mask			= &dr_ste_v1_get_byte_mask,
1962 	/* Actions */
1963 	.actions_caps			= DR_STE_CTX_ACTION_CAP_RX_ENCAP,
1964 	.set_actions_rx			= &dr_ste_v1_set_actions_rx,
1965 	.set_actions_tx			= &dr_ste_v1_set_actions_tx,
1966 	.modify_field_arr_sz		= ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
1967 	.modify_field_arr		= dr_ste_v1_action_modify_field_arr,
1968 	.set_action_set			= &dr_ste_v1_set_action_set,
1969 	.set_action_add			= &dr_ste_v1_set_action_add,
1970 	.set_action_copy		= &dr_ste_v1_set_action_copy,
1971 	.set_action_decap_l3_list	= &dr_ste_v1_set_action_decap_l3_list,
1972 	/* Send */
1973 	.prepare_for_postsend		= &dr_ste_v1_prepare_for_postsend,
1974 };
1975