1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 #define SVLAN_ETHERTYPE		0x88a8
9 #define DR_STE_ENABLE_FLOW_TAG	BIT(31)
10 
11 enum dr_ste_v0_entry_type {
12 	DR_STE_TYPE_TX          = 1,
13 	DR_STE_TYPE_RX          = 2,
14 	DR_STE_TYPE_MODIFY_PKT  = 6,
15 };
16 
17 enum dr_ste_v0_action_tunl {
18 	DR_STE_TUNL_ACTION_NONE		= 0,
19 	DR_STE_TUNL_ACTION_ENABLE	= 1,
20 	DR_STE_TUNL_ACTION_DECAP	= 2,
21 	DR_STE_TUNL_ACTION_L3_DECAP	= 3,
22 	DR_STE_TUNL_ACTION_POP_VLAN	= 4,
23 };
24 
25 enum dr_ste_v0_action_type {
26 	DR_STE_ACTION_TYPE_PUSH_VLAN	= 1,
27 	DR_STE_ACTION_TYPE_ENCAP_L3	= 3,
28 	DR_STE_ACTION_TYPE_ENCAP	= 4,
29 };
30 
31 enum dr_ste_v0_action_mdfy_op {
32 	DR_STE_ACTION_MDFY_OP_COPY	= 0x1,
33 	DR_STE_ACTION_MDFY_OP_SET	= 0x2,
34 	DR_STE_ACTION_MDFY_OP_ADD	= 0x3,
35 };
36 
37 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
38 	((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
39 		   (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
40 			  DR_STE_V0_LU_TYPE_##lookup_type##_O)
41 
42 enum {
43 	DR_STE_V0_LU_TYPE_NOP				= 0x00,
44 	DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP		= 0x05,
45 	DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I		= 0x0a,
46 	DR_STE_V0_LU_TYPE_ETHL2_DST_O			= 0x06,
47 	DR_STE_V0_LU_TYPE_ETHL2_DST_I			= 0x07,
48 	DR_STE_V0_LU_TYPE_ETHL2_DST_D			= 0x1b,
49 	DR_STE_V0_LU_TYPE_ETHL2_SRC_O			= 0x08,
50 	DR_STE_V0_LU_TYPE_ETHL2_SRC_I			= 0x09,
51 	DR_STE_V0_LU_TYPE_ETHL2_SRC_D			= 0x1c,
52 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O		= 0x36,
53 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I		= 0x37,
54 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D		= 0x38,
55 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O		= 0x0d,
56 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I		= 0x0e,
57 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D		= 0x1e,
58 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O		= 0x0f,
59 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I		= 0x10,
60 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D		= 0x1f,
61 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O		= 0x11,
62 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I		= 0x12,
63 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D		= 0x20,
64 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O		= 0x29,
65 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I		= 0x2a,
66 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D		= 0x2b,
67 	DR_STE_V0_LU_TYPE_ETHL4_O			= 0x13,
68 	DR_STE_V0_LU_TYPE_ETHL4_I			= 0x14,
69 	DR_STE_V0_LU_TYPE_ETHL4_D			= 0x21,
70 	DR_STE_V0_LU_TYPE_ETHL4_MISC_O			= 0x2c,
71 	DR_STE_V0_LU_TYPE_ETHL4_MISC_I			= 0x2d,
72 	DR_STE_V0_LU_TYPE_ETHL4_MISC_D			= 0x2e,
73 	DR_STE_V0_LU_TYPE_MPLS_FIRST_O			= 0x15,
74 	DR_STE_V0_LU_TYPE_MPLS_FIRST_I			= 0x24,
75 	DR_STE_V0_LU_TYPE_MPLS_FIRST_D			= 0x25,
76 	DR_STE_V0_LU_TYPE_GRE				= 0x16,
77 	DR_STE_V0_LU_TYPE_FLEX_PARSER_0			= 0x22,
78 	DR_STE_V0_LU_TYPE_FLEX_PARSER_1			= 0x23,
79 	DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER	= 0x19,
80 	DR_STE_V0_LU_TYPE_GENERAL_PURPOSE		= 0x18,
81 	DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0		= 0x2f,
82 	DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1		= 0x30,
83 	DR_STE_V0_LU_TYPE_DONT_CARE			= MLX5DR_STE_LU_TYPE_DONT_CARE,
84 };
85 
86 enum {
87 	DR_STE_V0_ACTION_MDFY_FLD_L2_0		= 0,
88 	DR_STE_V0_ACTION_MDFY_FLD_L2_1		= 1,
89 	DR_STE_V0_ACTION_MDFY_FLD_L2_2		= 2,
90 	DR_STE_V0_ACTION_MDFY_FLD_L3_0		= 3,
91 	DR_STE_V0_ACTION_MDFY_FLD_L3_1		= 4,
92 	DR_STE_V0_ACTION_MDFY_FLD_L3_2		= 5,
93 	DR_STE_V0_ACTION_MDFY_FLD_L3_3		= 6,
94 	DR_STE_V0_ACTION_MDFY_FLD_L3_4		= 7,
95 	DR_STE_V0_ACTION_MDFY_FLD_L4_0		= 8,
96 	DR_STE_V0_ACTION_MDFY_FLD_L4_1		= 9,
97 	DR_STE_V0_ACTION_MDFY_FLD_MPLS		= 10,
98 	DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0	= 11,
99 	DR_STE_V0_ACTION_MDFY_FLD_REG_0		= 12,
100 	DR_STE_V0_ACTION_MDFY_FLD_REG_1		= 13,
101 	DR_STE_V0_ACTION_MDFY_FLD_REG_2		= 14,
102 	DR_STE_V0_ACTION_MDFY_FLD_REG_3		= 15,
103 	DR_STE_V0_ACTION_MDFY_FLD_L4_2		= 16,
104 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_0	= 17,
105 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_1	= 18,
106 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_2	= 19,
107 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_3	= 20,
108 	DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1	= 21,
109 	DR_STE_V0_ACTION_MDFY_FLD_METADATA	= 22,
110 	DR_STE_V0_ACTION_MDFY_FLD_RESERVED	= 23,
111 };
112 
113 static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
114 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
115 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
116 	},
117 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
118 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
119 	},
120 	[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
121 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
122 	},
123 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
124 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
125 	},
126 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
127 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
128 	},
129 	[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
130 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
131 	},
132 	[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
133 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
134 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
135 	},
136 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
137 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
138 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
139 	},
140 	[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
141 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
142 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
143 	},
144 	[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
145 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
146 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
147 	},
148 	[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
149 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
150 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
151 	},
152 	[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
153 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
154 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
155 	},
156 	[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
157 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
158 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
159 	},
160 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
161 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
162 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
163 	},
164 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
165 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
166 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
167 	},
168 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
169 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
170 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
171 	},
172 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
173 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
174 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
175 	},
176 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
177 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
178 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
179 	},
180 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
181 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
182 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
183 	},
184 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
185 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
186 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
187 	},
188 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
189 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
190 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
191 	},
192 	[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
193 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
194 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
195 	},
196 	[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
197 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
198 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
199 	},
200 	[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
201 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
202 	},
203 	[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
204 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
205 	},
206 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
207 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
208 	},
209 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
210 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
211 	},
212 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
213 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
214 	},
215 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
216 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
217 	},
218 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
219 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
220 	},
221 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
222 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
223 	},
224 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
225 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
226 	},
227 	[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
228 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
229 	},
230 	[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
231 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
232 	},
233 };
234 
235 static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
236 {
237 	MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
238 }
239 
240 static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
241 {
242 	return MLX5_GET(ste_general, hw_ste_p, entry_type);
243 }
244 
245 static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
246 {
247 	u64 index = miss_addr >> 6;
248 
249 	/* Miss address for TX and RX STEs located in the same offsets */
250 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
251 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
252 }
253 
254 static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
255 {
256 	u64 index =
257 		((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
258 		 ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
259 
260 	return index << 6;
261 }
262 
263 static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
264 {
265 	MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
266 }
267 
268 static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
269 {
270 	return MLX5_GET(ste_general, hw_ste_p, byte_mask);
271 }
272 
273 static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
274 {
275 	MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
276 }
277 
278 static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
279 {
280 	MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
281 }
282 
283 static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
284 {
285 	return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
286 }
287 
288 static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
289 {
290 	MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
291 }
292 
293 static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
294 {
295 	u64 index = (icm_addr >> 5) | ht_size;
296 
297 	MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
298 	MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
299 }
300 
301 static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type,
302 				enum dr_ste_v0_entry_type entry_type, u16 gvmi)
303 {
304 	dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
305 	dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
306 	dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
307 
308 	/* Set GVMI once, this is the same for RX/TX
309 	 * bits 63_48 of next table base / miss address encode the next GVMI
310 	 */
311 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
312 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
313 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
314 }
315 
316 static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
317 			   bool is_rx, u16 gvmi)
318 {
319 	enum dr_ste_v0_entry_type entry_type;
320 
321 	entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX;
322 	dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi);
323 }
324 
325 static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
326 {
327 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
328 		 DR_STE_ENABLE_FLOW_TAG | flow_tag);
329 }
330 
331 static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
332 {
333 	/* This can be used for both rx_steering_mult and for sx_transmit */
334 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
335 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
336 }
337 
338 static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
339 {
340 	MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
341 }
342 
343 static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
344 				       bool go_back)
345 {
346 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
347 		 DR_STE_ACTION_TYPE_PUSH_VLAN);
348 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
349 	/* Due to HW limitation we need to set this bit, otherwise reformat +
350 	 * push vlan will not work.
351 	 */
352 	if (go_back)
353 		dr_ste_v0_set_go_back_bit(hw_ste_p);
354 }
355 
356 static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
357 				   int size, bool encap_l3)
358 {
359 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
360 		 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
361 	/* The hardware expects here size in words (2 byte) */
362 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
363 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
364 }
365 
366 static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
367 {
368 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
369 		 DR_STE_TUNL_ACTION_DECAP);
370 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
371 }
372 
373 static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
374 {
375 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
376 		 DR_STE_TUNL_ACTION_POP_VLAN);
377 }
378 
379 static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
380 {
381 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
382 		 DR_STE_TUNL_ACTION_L3_DECAP);
383 	MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
384 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
385 }
386 
387 static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
388 					  u32 re_write_index)
389 {
390 	MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
391 		 num_of_actions);
392 	MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
393 		 re_write_index);
394 }
395 
396 static void dr_ste_v0_arr_init_next(u8 **last_ste,
397 				    u32 *added_stes,
398 				    enum dr_ste_v0_entry_type entry_type,
399 				    u16 gvmi)
400 {
401 	(*added_stes)++;
402 	*last_ste += DR_STE_SIZE;
403 	dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
404 			    entry_type, gvmi);
405 }
406 
407 static void
408 dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
409 			 u8 *action_type_set,
410 			 u8 *last_ste,
411 			 struct mlx5dr_ste_actions_attr *attr,
412 			 u32 *added_stes)
413 {
414 	bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
415 		action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
416 
417 	/* We want to make sure the modify header comes before L2
418 	 * encapsulation. The reason for that is that we support
419 	 * modify headers for outer headers only
420 	 */
421 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
422 		dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
423 		dr_ste_v0_set_rewrite_actions(last_ste,
424 					      attr->modify_actions,
425 					      attr->modify_index);
426 	}
427 
428 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
429 		int i;
430 
431 		for (i = 0; i < attr->vlans.count; i++) {
432 			if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
433 				dr_ste_v0_arr_init_next(&last_ste,
434 							added_stes,
435 							DR_STE_TYPE_TX,
436 							attr->gvmi);
437 
438 			dr_ste_v0_set_tx_push_vlan(last_ste,
439 						   attr->vlans.headers[i],
440 						   encap);
441 		}
442 	}
443 
444 	if (encap) {
445 		/* Modify header and encapsulation require a different STEs.
446 		 * Since modify header STE format doesn't support encapsulation
447 		 * tunneling_action.
448 		 */
449 		if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
450 		    action_type_set[DR_ACTION_TYP_PUSH_VLAN])
451 			dr_ste_v0_arr_init_next(&last_ste,
452 						added_stes,
453 						DR_STE_TYPE_TX,
454 						attr->gvmi);
455 
456 		dr_ste_v0_set_tx_encap(last_ste,
457 				       attr->reformat.id,
458 				       attr->reformat.size,
459 				       action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
460 		/* Whenever prio_tag_required enabled, we can be sure that the
461 		 * previous table (ACL) already push vlan to our packet,
462 		 * And due to HW limitation we need to set this bit, otherwise
463 		 * push vlan + reformat will not work.
464 		 */
465 		if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
466 			dr_ste_v0_set_go_back_bit(last_ste);
467 	}
468 
469 	if (action_type_set[DR_ACTION_TYP_CTR])
470 		dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
471 
472 	dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
473 	dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
474 }
475 
476 static void
477 dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
478 			 u8 *action_type_set,
479 			 u8 *last_ste,
480 			 struct mlx5dr_ste_actions_attr *attr,
481 			 u32 *added_stes)
482 {
483 	if (action_type_set[DR_ACTION_TYP_CTR])
484 		dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
485 
486 	if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
487 		dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
488 		dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
489 		dr_ste_v0_set_rewrite_actions(last_ste,
490 					      attr->decap_actions,
491 					      attr->decap_index);
492 	}
493 
494 	if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
495 		dr_ste_v0_set_rx_decap(last_ste);
496 
497 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
498 		int i;
499 
500 		for (i = 0; i < attr->vlans.count; i++) {
501 			if (i ||
502 			    action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
503 			    action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
504 				dr_ste_v0_arr_init_next(&last_ste,
505 							added_stes,
506 							DR_STE_TYPE_RX,
507 							attr->gvmi);
508 
509 			dr_ste_v0_set_rx_pop_vlan(last_ste);
510 		}
511 	}
512 
513 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
514 		if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
515 			dr_ste_v0_arr_init_next(&last_ste,
516 						added_stes,
517 						DR_STE_TYPE_MODIFY_PKT,
518 						attr->gvmi);
519 		else
520 			dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
521 
522 		dr_ste_v0_set_rewrite_actions(last_ste,
523 					      attr->modify_actions,
524 					      attr->modify_index);
525 	}
526 
527 	if (action_type_set[DR_ACTION_TYP_TAG]) {
528 		if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
529 			dr_ste_v0_arr_init_next(&last_ste,
530 						added_stes,
531 						DR_STE_TYPE_RX,
532 						attr->gvmi);
533 
534 		dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
535 	}
536 
537 	dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
538 	dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
539 }
540 
541 static void dr_ste_v0_set_action_set(u8 *hw_action,
542 				     u8 hw_field,
543 				     u8 shifter,
544 				     u8 length,
545 				     u32 data)
546 {
547 	length = (length == 32) ? 0 : length;
548 	MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
549 	MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
550 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
551 	MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
552 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
553 }
554 
555 static void dr_ste_v0_set_action_add(u8 *hw_action,
556 				     u8 hw_field,
557 				     u8 shifter,
558 				     u8 length,
559 				     u32 data)
560 {
561 	length = (length == 32) ? 0 : length;
562 	MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
563 	MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
564 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
565 	MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
566 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
567 }
568 
569 static void dr_ste_v0_set_action_copy(u8 *hw_action,
570 				      u8 dst_hw_field,
571 				      u8 dst_shifter,
572 				      u8 dst_len,
573 				      u8 src_hw_field,
574 				      u8 src_shifter)
575 {
576 	MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
577 	MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
578 	MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
579 	MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
580 	MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
581 	MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
582 }
583 
584 #define DR_STE_DECAP_L3_MIN_ACTION_NUM	5
585 
586 static int
587 dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
588 				   u8 *hw_action, u32 hw_action_sz,
589 				   u16 *used_hw_action_num)
590 {
591 	struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
592 	u32 hw_action_num;
593 	int required_actions;
594 	u32 hdr_fld_4b;
595 	u16 hdr_fld_2b;
596 	u16 vlan_type;
597 	bool vlan;
598 
599 	vlan = (data_sz != HDR_LEN_L2);
600 	hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
601 	required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
602 
603 	if (hw_action_num < required_actions)
604 		return -ENOMEM;
605 
606 	/* dmac_47_16 */
607 	MLX5_SET(dr_action_hw_set, hw_action,
608 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
609 	MLX5_SET(dr_action_hw_set, hw_action,
610 		 destination_length, 0);
611 	MLX5_SET(dr_action_hw_set, hw_action,
612 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
613 	MLX5_SET(dr_action_hw_set, hw_action,
614 		 destination_left_shifter, 16);
615 	hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
616 	MLX5_SET(dr_action_hw_set, hw_action,
617 		 inline_data, hdr_fld_4b);
618 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
619 
620 	/* smac_47_16 */
621 	MLX5_SET(dr_action_hw_set, hw_action,
622 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
623 	MLX5_SET(dr_action_hw_set, hw_action,
624 		 destination_length, 0);
625 	MLX5_SET(dr_action_hw_set, hw_action,
626 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
627 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
628 	hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
629 		      MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
630 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
631 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
632 
633 	/* dmac_15_0 */
634 	MLX5_SET(dr_action_hw_set, hw_action,
635 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
636 	MLX5_SET(dr_action_hw_set, hw_action,
637 		 destination_length, 16);
638 	MLX5_SET(dr_action_hw_set, hw_action,
639 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
640 	MLX5_SET(dr_action_hw_set, hw_action,
641 		 destination_left_shifter, 0);
642 	hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
643 	MLX5_SET(dr_action_hw_set, hw_action,
644 		 inline_data, hdr_fld_2b);
645 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
646 
647 	/* ethertype + (optional) vlan */
648 	MLX5_SET(dr_action_hw_set, hw_action,
649 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
650 	MLX5_SET(dr_action_hw_set, hw_action,
651 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
652 	MLX5_SET(dr_action_hw_set, hw_action,
653 		 destination_left_shifter, 32);
654 	if (!vlan) {
655 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
656 		MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
657 		MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
658 	} else {
659 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
660 		vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
661 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
662 		hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
663 		MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
664 		MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
665 	}
666 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
667 
668 	/* smac_15_0 */
669 	MLX5_SET(dr_action_hw_set, hw_action,
670 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
671 	MLX5_SET(dr_action_hw_set, hw_action,
672 		 destination_length, 16);
673 	MLX5_SET(dr_action_hw_set, hw_action,
674 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
675 	MLX5_SET(dr_action_hw_set, hw_action,
676 		 destination_left_shifter, 0);
677 	hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
678 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
679 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
680 
681 	if (vlan) {
682 		MLX5_SET(dr_action_hw_set, hw_action,
683 			 opcode, DR_STE_ACTION_MDFY_OP_SET);
684 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
685 		MLX5_SET(dr_action_hw_set, hw_action,
686 			 inline_data, hdr_fld_2b);
687 		MLX5_SET(dr_action_hw_set, hw_action,
688 			 destination_length, 16);
689 		MLX5_SET(dr_action_hw_set, hw_action,
690 			 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
691 		MLX5_SET(dr_action_hw_set, hw_action,
692 			 destination_left_shifter, 0);
693 	}
694 
695 	*used_hw_action_num = required_actions;
696 
697 	return 0;
698 }
699 
700 static void
701 dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
702 					bool inner, u8 *bit_mask)
703 {
704 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
705 
706 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
707 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
708 
709 	if (mask->smac_47_16 || mask->smac_15_0) {
710 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
711 			 mask->smac_47_16 >> 16);
712 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
713 			 mask->smac_47_16 << 16 | mask->smac_15_0);
714 		mask->smac_47_16 = 0;
715 		mask->smac_15_0 = 0;
716 	}
717 
718 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
719 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
720 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
721 	DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
722 
723 	if (mask->cvlan_tag) {
724 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
725 		mask->cvlan_tag = 0;
726 	} else if (mask->svlan_tag) {
727 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
728 		mask->svlan_tag = 0;
729 	}
730 }
731 
732 static int
733 dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
734 				   struct mlx5dr_ste_build *sb,
735 				   u8 *tag)
736 {
737 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
738 
739 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
740 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
741 
742 	if (spec->smac_47_16 || spec->smac_15_0) {
743 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
744 			 spec->smac_47_16 >> 16);
745 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
746 			 spec->smac_47_16 << 16 | spec->smac_15_0);
747 		spec->smac_47_16 = 0;
748 		spec->smac_15_0 = 0;
749 	}
750 
751 	if (spec->ip_version) {
752 		if (spec->ip_version == IP_VERSION_IPV4) {
753 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
754 			spec->ip_version = 0;
755 		} else if (spec->ip_version == IP_VERSION_IPV6) {
756 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
757 			spec->ip_version = 0;
758 		} else {
759 			return -EINVAL;
760 		}
761 	}
762 
763 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
764 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
765 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
766 
767 	if (spec->cvlan_tag) {
768 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
769 		spec->cvlan_tag = 0;
770 	} else if (spec->svlan_tag) {
771 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
772 		spec->svlan_tag = 0;
773 	}
774 	return 0;
775 }
776 
777 static void
778 dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
779 				    struct mlx5dr_match_param *mask)
780 {
781 	dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
782 
783 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
784 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
785 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
786 }
787 
788 static int
789 dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
790 				    struct mlx5dr_ste_build *sb,
791 				    u8 *tag)
792 {
793 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
794 
795 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
796 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
797 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
798 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
799 
800 	return 0;
801 }
802 
803 static void
804 dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
805 				     struct mlx5dr_match_param *mask)
806 {
807 	dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
808 
809 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
810 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
811 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
812 }
813 
814 static int
815 dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
816 				    struct mlx5dr_ste_build *sb,
817 				    u8 *tag)
818 {
819 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
820 
821 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
822 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
823 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
824 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
825 
826 	return 0;
827 }
828 
829 static void
830 dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
831 				     struct mlx5dr_match_param *mask)
832 {
833 	dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
834 
835 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
836 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
837 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
838 }
839 
840 static int
841 dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
842 					struct mlx5dr_ste_build *sb,
843 					u8 *tag)
844 {
845 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
846 
847 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
848 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
849 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
850 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
851 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
852 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
853 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
854 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
855 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
856 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
857 
858 	if (spec->tcp_flags) {
859 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
860 		spec->tcp_flags = 0;
861 	}
862 
863 	return 0;
864 }
865 
866 static void
867 dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
868 					 struct mlx5dr_match_param *mask)
869 {
870 	dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
871 
872 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
873 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
874 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
875 }
876 
877 static void
878 dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
879 					   bool inner, u8 *bit_mask)
880 {
881 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
882 	struct mlx5dr_match_misc *misc_mask = &value->misc;
883 
884 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
885 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
886 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
887 	DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
888 	DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
889 	DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
890 
891 	if (mask->svlan_tag || mask->cvlan_tag) {
892 		MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
893 		mask->cvlan_tag = 0;
894 		mask->svlan_tag = 0;
895 	}
896 
897 	if (inner) {
898 		if (misc_mask->inner_second_cvlan_tag ||
899 		    misc_mask->inner_second_svlan_tag) {
900 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
901 			misc_mask->inner_second_cvlan_tag = 0;
902 			misc_mask->inner_second_svlan_tag = 0;
903 		}
904 
905 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
906 			       second_vlan_id, misc_mask, inner_second_vid);
907 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
908 			       second_cfi, misc_mask, inner_second_cfi);
909 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
910 			       second_priority, misc_mask, inner_second_prio);
911 	} else {
912 		if (misc_mask->outer_second_cvlan_tag ||
913 		    misc_mask->outer_second_svlan_tag) {
914 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
915 			misc_mask->outer_second_cvlan_tag = 0;
916 			misc_mask->outer_second_svlan_tag = 0;
917 		}
918 
919 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
920 			       second_vlan_id, misc_mask, outer_second_vid);
921 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
922 			       second_cfi, misc_mask, outer_second_cfi);
923 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
924 			       second_priority, misc_mask, outer_second_prio);
925 	}
926 }
927 
928 static int
929 dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
930 				      bool inner, u8 *tag)
931 {
932 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
933 	struct mlx5dr_match_misc *misc_spec = &value->misc;
934 
935 	DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
936 	DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
937 	DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
938 	DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
939 	DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
940 
941 	if (spec->ip_version) {
942 		if (spec->ip_version == IP_VERSION_IPV4) {
943 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
944 			spec->ip_version = 0;
945 		} else if (spec->ip_version == IP_VERSION_IPV6) {
946 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
947 			spec->ip_version = 0;
948 		} else {
949 			return -EINVAL;
950 		}
951 	}
952 
953 	if (spec->cvlan_tag) {
954 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
955 		spec->cvlan_tag = 0;
956 	} else if (spec->svlan_tag) {
957 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
958 		spec->svlan_tag = 0;
959 	}
960 
961 	if (inner) {
962 		if (misc_spec->inner_second_cvlan_tag) {
963 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
964 			misc_spec->inner_second_cvlan_tag = 0;
965 		} else if (misc_spec->inner_second_svlan_tag) {
966 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
967 			misc_spec->inner_second_svlan_tag = 0;
968 		}
969 
970 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
971 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
972 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
973 	} else {
974 		if (misc_spec->outer_second_cvlan_tag) {
975 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
976 			misc_spec->outer_second_cvlan_tag = 0;
977 		} else if (misc_spec->outer_second_svlan_tag) {
978 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
979 			misc_spec->outer_second_svlan_tag = 0;
980 		}
981 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
982 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
983 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
984 	}
985 
986 	return 0;
987 }
988 
989 static void
990 dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
991 				    bool inner, u8 *bit_mask)
992 {
993 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
994 
995 	DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
996 	DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
997 
998 	dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
999 }
1000 
1001 static int
1002 dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1003 			       struct mlx5dr_ste_build *sb,
1004 			       u8 *tag)
1005 {
1006 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1007 
1008 	DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1009 	DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1010 
1011 	return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1012 }
1013 
1014 static void
1015 dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1016 				struct mlx5dr_match_param *mask)
1017 {
1018 	dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1019 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
1020 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1021 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
1022 }
1023 
1024 static void
1025 dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1026 				    struct mlx5dr_ste_build *sb,
1027 				    u8 *bit_mask)
1028 {
1029 	struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
1030 
1031 	DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1032 	DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1033 
1034 	dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
1035 }
1036 
1037 static int
1038 dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1039 			       struct mlx5dr_ste_build *sb,
1040 			       u8 *tag)
1041 {
1042 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1043 
1044 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1045 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1046 
1047 	return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1048 }
1049 
1050 static void
1051 dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1052 				struct mlx5dr_match_param *mask)
1053 {
1054 	dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
1055 
1056 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
1057 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1058 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
1059 }
1060 
1061 static void
1062 dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1063 				    bool inner, u8 *bit_mask)
1064 {
1065 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1066 	struct mlx5dr_match_misc *misc = &value->misc;
1067 
1068 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1069 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1070 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1071 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1072 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1073 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1074 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1075 	DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1076 
1077 	if (misc->vxlan_vni) {
1078 		MLX5_SET(ste_eth_l2_tnl, bit_mask,
1079 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1080 		misc->vxlan_vni = 0;
1081 	}
1082 
1083 	if (mask->svlan_tag || mask->cvlan_tag) {
1084 		MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1085 		mask->cvlan_tag = 0;
1086 		mask->svlan_tag = 0;
1087 	}
1088 }
1089 
1090 static int
1091 dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1092 			       struct mlx5dr_ste_build *sb,
1093 			       u8 *tag)
1094 {
1095 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1096 	struct mlx5dr_match_misc *misc = &value->misc;
1097 
1098 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1099 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1100 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1101 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1102 	DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1103 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1104 	DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1105 
1106 	if (misc->vxlan_vni) {
1107 		MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1108 			 (misc->vxlan_vni << 8));
1109 		misc->vxlan_vni = 0;
1110 	}
1111 
1112 	if (spec->cvlan_tag) {
1113 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1114 		spec->cvlan_tag = 0;
1115 	} else if (spec->svlan_tag) {
1116 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1117 		spec->svlan_tag = 0;
1118 	}
1119 
1120 	if (spec->ip_version) {
1121 		if (spec->ip_version == IP_VERSION_IPV4) {
1122 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1123 			spec->ip_version = 0;
1124 		} else if (spec->ip_version == IP_VERSION_IPV6) {
1125 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1126 			spec->ip_version = 0;
1127 		} else {
1128 			return -EINVAL;
1129 		}
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static void
1136 dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1137 				struct mlx5dr_match_param *mask)
1138 {
1139 	dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1140 
1141 	sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
1142 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1143 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
1144 }
1145 
1146 static int
1147 dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1148 				     struct mlx5dr_ste_build *sb,
1149 				     u8 *tag)
1150 {
1151 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1152 
1153 	DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1154 
1155 	return 0;
1156 }
1157 
1158 static void
1159 dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1160 				      struct mlx5dr_match_param *mask)
1161 {
1162 	dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1163 
1164 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
1165 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1166 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
1167 }
1168 
1169 static int
1170 dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1171 				   struct mlx5dr_ste_build *sb,
1172 				   u8 *tag)
1173 {
1174 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1175 	struct mlx5dr_match_misc *misc = &value->misc;
1176 
1177 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1178 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1179 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1180 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1181 	DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1182 	DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1183 	DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1184 	DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1185 	DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1186 
1187 	if (sb->inner)
1188 		DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label);
1189 	else
1190 		DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label);
1191 
1192 	if (spec->tcp_flags) {
1193 		DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1194 		spec->tcp_flags = 0;
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static void
1201 dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1202 				    struct mlx5dr_match_param *mask)
1203 {
1204 	dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1205 
1206 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
1207 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1208 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
1209 }
1210 
1211 static int
1212 dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
1213 			 struct mlx5dr_ste_build *sb,
1214 			 u8 *tag)
1215 {
1216 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1217 
1218 	if (sb->inner)
1219 		DR_STE_SET_MPLS(mpls, misc2, inner, tag);
1220 	else
1221 		DR_STE_SET_MPLS(mpls, misc2, outer, tag);
1222 
1223 	return 0;
1224 }
1225 
1226 static void
1227 dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
1228 			  struct mlx5dr_match_param *mask)
1229 {
1230 	dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
1231 
1232 	sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
1233 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1234 	sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
1235 }
1236 
1237 static int
1238 dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1239 			    struct mlx5dr_ste_build *sb,
1240 			    u8 *tag)
1241 {
1242 	struct  mlx5dr_match_misc *misc = &value->misc;
1243 
1244 	DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1245 
1246 	DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1247 	DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1248 	DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1249 
1250 	DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1251 
1252 	DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1253 
1254 	return 0;
1255 }
1256 
1257 static void
1258 dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1259 			     struct mlx5dr_match_param *mask)
1260 {
1261 	dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1262 
1263 	sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
1264 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1265 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
1266 }
1267 
1268 static int
1269 dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1270 			     struct mlx5dr_ste_build *sb,
1271 			     u8 *tag)
1272 {
1273 	struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1274 	u32 mpls_hdr;
1275 
1276 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
1277 		mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1278 		misc_2->outer_first_mpls_over_gre_label = 0;
1279 		mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1280 		misc_2->outer_first_mpls_over_gre_exp = 0;
1281 		mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1282 		misc_2->outer_first_mpls_over_gre_s_bos = 0;
1283 		mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1284 		misc_2->outer_first_mpls_over_gre_ttl = 0;
1285 	} else {
1286 		mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1287 		misc_2->outer_first_mpls_over_udp_label = 0;
1288 		mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1289 		misc_2->outer_first_mpls_over_udp_exp = 0;
1290 		mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1291 		misc_2->outer_first_mpls_over_udp_s_bos = 0;
1292 		mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1293 		misc_2->outer_first_mpls_over_udp_ttl = 0;
1294 	}
1295 
1296 	MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
1297 	return 0;
1298 }
1299 
1300 static void
1301 dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1302 			      struct mlx5dr_match_param *mask)
1303 {
1304 	dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1305 
1306 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1307 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1308 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
1309 }
1310 
1311 static int
1312 dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1313 				      struct mlx5dr_ste_build *sb,
1314 				      u8 *tag)
1315 {
1316 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1317 	u8 *parser_ptr;
1318 	u8 parser_id;
1319 	u32 mpls_hdr;
1320 
1321 	mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1322 	misc2->outer_first_mpls_over_udp_label = 0;
1323 	mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1324 	misc2->outer_first_mpls_over_udp_exp = 0;
1325 	mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1326 	misc2->outer_first_mpls_over_udp_s_bos = 0;
1327 	mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1328 	misc2->outer_first_mpls_over_udp_ttl = 0;
1329 
1330 	parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1331 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1332 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1333 
1334 	return 0;
1335 }
1336 
1337 static void
1338 dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1339 				       struct mlx5dr_match_param *mask)
1340 {
1341 	dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1342 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1343 	 * flex parsers_{0-3}/{4-7} respectively.
1344 	 */
1345 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1346 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1347 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1348 
1349 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1350 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
1351 }
1352 
1353 static int
1354 dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1355 				      struct mlx5dr_ste_build *sb,
1356 				      u8 *tag)
1357 {
1358 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1359 	u8 *parser_ptr;
1360 	u8 parser_id;
1361 	u32 mpls_hdr;
1362 
1363 	mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1364 	misc2->outer_first_mpls_over_gre_label = 0;
1365 	mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1366 	misc2->outer_first_mpls_over_gre_exp = 0;
1367 	mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1368 	misc2->outer_first_mpls_over_gre_s_bos = 0;
1369 	mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1370 	misc2->outer_first_mpls_over_gre_ttl = 0;
1371 
1372 	parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1373 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1374 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1375 
1376 	return 0;
1377 }
1378 
1379 static void
1380 dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1381 				       struct mlx5dr_match_param *mask)
1382 {
1383 	dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1384 
1385 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1386 	 * flex parsers_{0-3}/{4-7} respectively.
1387 	 */
1388 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1389 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1390 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1391 
1392 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1393 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
1394 }
1395 
1396 #define ICMP_TYPE_OFFSET_FIRST_DW	24
1397 #define ICMP_CODE_OFFSET_FIRST_DW	16
1398 
1399 static int
1400 dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
1401 			 struct mlx5dr_ste_build *sb,
1402 			 u8 *tag)
1403 {
1404 	struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1405 	u32 *icmp_header_data;
1406 	int dw0_location;
1407 	int dw1_location;
1408 	u8 *parser_ptr;
1409 	u8 *icmp_type;
1410 	u8 *icmp_code;
1411 	bool is_ipv4;
1412 	u32 icmp_hdr;
1413 
1414 	is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
1415 	if (is_ipv4) {
1416 		icmp_header_data	= &misc_3->icmpv4_header_data;
1417 		icmp_type		= &misc_3->icmpv4_type;
1418 		icmp_code		= &misc_3->icmpv4_code;
1419 		dw0_location		= sb->caps->flex_parser_id_icmp_dw0;
1420 		dw1_location		= sb->caps->flex_parser_id_icmp_dw1;
1421 	} else {
1422 		icmp_header_data	= &misc_3->icmpv6_header_data;
1423 		icmp_type		= &misc_3->icmpv6_type;
1424 		icmp_code		= &misc_3->icmpv6_code;
1425 		dw0_location		= sb->caps->flex_parser_id_icmpv6_dw0;
1426 		dw1_location		= sb->caps->flex_parser_id_icmpv6_dw1;
1427 	}
1428 
1429 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
1430 	icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
1431 		   (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
1432 	*(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
1433 	*icmp_code = 0;
1434 	*icmp_type = 0;
1435 
1436 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
1437 	*(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
1438 	*icmp_header_data = 0;
1439 
1440 	return 0;
1441 }
1442 
1443 static void
1444 dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
1445 			  struct mlx5dr_match_param *mask)
1446 {
1447 	u8 parser_id;
1448 	bool is_ipv4;
1449 
1450 	dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
1451 
1452 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1453 	 * flex parsers_{0-3}/{4-7} respectively.
1454 	 */
1455 	is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
1456 	parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
1457 		    sb->caps->flex_parser_id_icmpv6_dw0;
1458 	sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
1459 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1460 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1461 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1462 	sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
1463 }
1464 
1465 static int
1466 dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
1467 				    struct mlx5dr_ste_build *sb,
1468 				    u8 *tag)
1469 {
1470 	struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1471 
1472 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1473 		       misc_2, metadata_reg_a);
1474 
1475 	return 0;
1476 }
1477 
1478 static void
1479 dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1480 				     struct mlx5dr_match_param *mask)
1481 {
1482 	dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
1483 
1484 	sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
1485 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1486 	sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
1487 }
1488 
1489 static int
1490 dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1491 				struct mlx5dr_ste_build *sb,
1492 				u8 *tag)
1493 {
1494 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1495 
1496 	if (sb->inner) {
1497 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
1498 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
1499 	} else {
1500 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
1501 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 static void
1508 dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1509 				 struct mlx5dr_match_param *mask)
1510 {
1511 	dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1512 
1513 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
1514 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1515 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
1516 }
1517 
1518 static int
1519 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1520 					      struct mlx5dr_ste_build *sb,
1521 					      u8 *tag)
1522 {
1523 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1524 
1525 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1526 		       outer_vxlan_gpe_flags, misc3,
1527 		       outer_vxlan_gpe_flags);
1528 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1529 		       outer_vxlan_gpe_next_protocol, misc3,
1530 		       outer_vxlan_gpe_next_protocol);
1531 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1532 		       outer_vxlan_gpe_vni, misc3,
1533 		       outer_vxlan_gpe_vni);
1534 
1535 	return 0;
1536 }
1537 
1538 static void
1539 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1540 					       struct mlx5dr_match_param *mask)
1541 {
1542 	dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1543 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1544 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1545 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
1546 }
1547 
1548 static int
1549 dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1550 					   struct mlx5dr_ste_build *sb,
1551 					   u8 *tag)
1552 {
1553 	struct mlx5dr_match_misc *misc = &value->misc;
1554 
1555 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1556 		       geneve_protocol_type, misc, geneve_protocol_type);
1557 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1558 		       geneve_oam, misc, geneve_oam);
1559 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1560 		       geneve_opt_len, misc, geneve_opt_len);
1561 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1562 		       geneve_vni, misc, geneve_vni);
1563 
1564 	return 0;
1565 }
1566 
1567 static void
1568 dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1569 					    struct mlx5dr_match_param *mask)
1570 {
1571 	dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1572 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1573 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1574 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
1575 }
1576 
1577 static int
1578 dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
1579 			       struct mlx5dr_ste_build *sb,
1580 			       u8 *tag)
1581 {
1582 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1583 
1584 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1585 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1586 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1587 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1588 
1589 	return 0;
1590 }
1591 
1592 static void
1593 dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
1594 				struct mlx5dr_match_param *mask)
1595 {
1596 	dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
1597 
1598 	sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
1599 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1600 	sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
1601 }
1602 
1603 static int
1604 dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
1605 			       struct mlx5dr_ste_build *sb,
1606 			       u8 *tag)
1607 {
1608 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1609 
1610 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1611 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1612 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1613 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1614 
1615 	return 0;
1616 }
1617 
1618 static void
1619 dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
1620 				struct mlx5dr_match_param *mask)
1621 {
1622 	dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
1623 
1624 	sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
1625 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1626 	sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
1627 }
1628 
1629 static void
1630 dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1631 				      u8 *bit_mask)
1632 {
1633 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1634 
1635 	DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
1636 	DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
1637 	misc_mask->source_eswitch_owner_vhca_id = 0;
1638 }
1639 
1640 static int
1641 dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1642 				 struct mlx5dr_ste_build *sb,
1643 				 u8 *tag)
1644 {
1645 	struct mlx5dr_match_misc *misc = &value->misc;
1646 	struct mlx5dr_cmd_vport_cap *vport_cap;
1647 	struct mlx5dr_domain *dmn = sb->dmn;
1648 	struct mlx5dr_domain *vport_dmn;
1649 	u8 *bit_mask = sb->bit_mask;
1650 	bool source_gvmi_set;
1651 
1652 	DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
1653 
1654 	if (sb->vhca_id_valid) {
1655 		/* Find port GVMI based on the eswitch_owner_vhca_id */
1656 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1657 			vport_dmn = dmn;
1658 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1659 					   dmn->peer_dmn->info.caps.gvmi))
1660 			vport_dmn = dmn->peer_dmn;
1661 		else
1662 			return -EINVAL;
1663 
1664 		misc->source_eswitch_owner_vhca_id = 0;
1665 	} else {
1666 		vport_dmn = dmn;
1667 	}
1668 
1669 	source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
1670 	if (source_gvmi_set) {
1671 		vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
1672 							misc->source_port);
1673 		if (!vport_cap) {
1674 			mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1675 				   misc->source_port);
1676 			return -EINVAL;
1677 		}
1678 
1679 		if (vport_cap->vport_gvmi)
1680 			MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
1681 
1682 		misc->source_port = 0;
1683 	}
1684 
1685 	return 0;
1686 }
1687 
1688 static void
1689 dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1690 				  struct mlx5dr_match_param *mask)
1691 {
1692 	dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1693 
1694 	sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
1695 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1696 	sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
1697 }
1698 
1699 static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
1700 				      u32 *misc4_field_value,
1701 				      bool *parser_is_used,
1702 				      u8 *tag)
1703 {
1704 	u32 id = *misc4_field_id;
1705 	u8 *parser_ptr;
1706 
1707 	if (parser_is_used[id])
1708 		return;
1709 
1710 	parser_is_used[id] = true;
1711 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1712 
1713 	*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1714 	*misc4_field_id = 0;
1715 	*misc4_field_value = 0;
1716 }
1717 
1718 static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
1719 					   struct mlx5dr_ste_build *sb,
1720 					   u8 *tag)
1721 {
1722 	struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1723 	bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1724 
1725 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1726 				  &misc_4_mask->prog_sample_field_value_0,
1727 				  parser_is_used, tag);
1728 
1729 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1730 				  &misc_4_mask->prog_sample_field_value_1,
1731 				  parser_is_used, tag);
1732 
1733 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1734 				  &misc_4_mask->prog_sample_field_value_2,
1735 				  parser_is_used, tag);
1736 
1737 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1738 				  &misc_4_mask->prog_sample_field_value_3,
1739 				  parser_is_used, tag);
1740 
1741 	return 0;
1742 }
1743 
1744 static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1745 					       struct mlx5dr_match_param *mask)
1746 {
1747 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1748 	dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
1749 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1750 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
1751 }
1752 
1753 static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1754 					       struct mlx5dr_match_param *mask)
1755 {
1756 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
1757 	dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
1758 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1759 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
1760 }
1761 
1762 static int
1763 dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1764 						   struct mlx5dr_ste_build *sb,
1765 						   u8 *tag)
1766 {
1767 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1768 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1769 	u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1770 
1771 	MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1772 		 misc3->geneve_tlv_option_0_data);
1773 	misc3->geneve_tlv_option_0_data = 0;
1774 
1775 	return 0;
1776 }
1777 
1778 static void
1779 dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1780 						    struct mlx5dr_match_param *mask)
1781 {
1782 	dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1783 
1784 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1785 	 * flex parsers_{0-3}/{4-7} respectively.
1786 	 */
1787 	sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
1788 		DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1789 		DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1790 
1791 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1792 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
1793 }
1794 
1795 static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
1796 						    struct mlx5dr_ste_build *sb,
1797 						    u8 *tag)
1798 {
1799 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1800 
1801 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1802 		       gtpu_msg_flags, misc3,
1803 		       gtpu_msg_flags);
1804 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1805 		       gtpu_msg_type, misc3,
1806 		       gtpu_msg_type);
1807 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1808 		       gtpu_teid, misc3,
1809 		       gtpu_teid);
1810 
1811 	return 0;
1812 }
1813 
1814 static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
1815 						      struct mlx5dr_match_param *mask)
1816 {
1817 	dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
1818 
1819 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1820 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1821 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
1822 }
1823 
1824 static int
1825 dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
1826 					   struct mlx5dr_ste_build *sb,
1827 					   u8 *tag)
1828 {
1829 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
1830 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1831 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
1832 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1833 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
1834 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1835 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1836 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1837 	return 0;
1838 }
1839 
1840 static void
1841 dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1842 					    struct mlx5dr_match_param *mask)
1843 {
1844 	dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
1845 
1846 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1847 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1848 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
1849 }
1850 
1851 static int
1852 dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
1853 					   struct mlx5dr_ste_build *sb,
1854 					   u8 *tag)
1855 {
1856 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
1857 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1858 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
1859 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1860 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
1861 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1862 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1863 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1864 	return 0;
1865 }
1866 
1867 static void
1868 dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1869 					    struct mlx5dr_match_param *mask)
1870 {
1871 	dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
1872 
1873 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
1874 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1875 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
1876 }
1877 
1878 struct mlx5dr_ste_ctx ste_ctx_v0 = {
1879 	/* Builders */
1880 	.build_eth_l2_src_dst_init	= &dr_ste_v0_build_eth_l2_src_dst_init,
1881 	.build_eth_l3_ipv6_src_init	= &dr_ste_v0_build_eth_l3_ipv6_src_init,
1882 	.build_eth_l3_ipv6_dst_init	= &dr_ste_v0_build_eth_l3_ipv6_dst_init,
1883 	.build_eth_l3_ipv4_5_tuple_init	= &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
1884 	.build_eth_l2_src_init		= &dr_ste_v0_build_eth_l2_src_init,
1885 	.build_eth_l2_dst_init		= &dr_ste_v0_build_eth_l2_dst_init,
1886 	.build_eth_l2_tnl_init		= &dr_ste_v0_build_eth_l2_tnl_init,
1887 	.build_eth_l3_ipv4_misc_init	= &dr_ste_v0_build_eth_l3_ipv4_misc_init,
1888 	.build_eth_ipv6_l3_l4_init	= &dr_ste_v0_build_eth_ipv6_l3_l4_init,
1889 	.build_mpls_init		= &dr_ste_v0_build_mpls_init,
1890 	.build_tnl_gre_init		= &dr_ste_v0_build_tnl_gre_init,
1891 	.build_tnl_mpls_init		= &dr_ste_v0_build_tnl_mpls_init,
1892 	.build_tnl_mpls_over_udp_init	= &dr_ste_v0_build_tnl_mpls_over_udp_init,
1893 	.build_tnl_mpls_over_gre_init	= &dr_ste_v0_build_tnl_mpls_over_gre_init,
1894 	.build_icmp_init		= &dr_ste_v0_build_icmp_init,
1895 	.build_general_purpose_init	= &dr_ste_v0_build_general_purpose_init,
1896 	.build_eth_l4_misc_init		= &dr_ste_v0_build_eth_l4_misc_init,
1897 	.build_tnl_vxlan_gpe_init	= &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
1898 	.build_tnl_geneve_init		= &dr_ste_v0_build_flex_parser_tnl_geneve_init,
1899 	.build_tnl_geneve_tlv_opt_init	= &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
1900 	.build_register_0_init		= &dr_ste_v0_build_register_0_init,
1901 	.build_register_1_init		= &dr_ste_v0_build_register_1_init,
1902 	.build_src_gvmi_qpn_init	= &dr_ste_v0_build_src_gvmi_qpn_init,
1903 	.build_flex_parser_0_init	= &dr_ste_v0_build_flex_parser_0_init,
1904 	.build_flex_parser_1_init	= &dr_ste_v0_build_flex_parser_1_init,
1905 	.build_tnl_gtpu_init		= &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
1906 	.build_tnl_gtpu_flex_parser_0_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
1907 	.build_tnl_gtpu_flex_parser_1_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
1908 
1909 	/* Getters and Setters */
1910 	.ste_init			= &dr_ste_v0_init,
1911 	.set_next_lu_type		= &dr_ste_v0_set_next_lu_type,
1912 	.get_next_lu_type		= &dr_ste_v0_get_next_lu_type,
1913 	.set_miss_addr			= &dr_ste_v0_set_miss_addr,
1914 	.get_miss_addr			= &dr_ste_v0_get_miss_addr,
1915 	.set_hit_addr			= &dr_ste_v0_set_hit_addr,
1916 	.set_byte_mask			= &dr_ste_v0_set_byte_mask,
1917 	.get_byte_mask			= &dr_ste_v0_get_byte_mask,
1918 
1919 	/* Actions */
1920 	.actions_caps			= DR_STE_CTX_ACTION_CAP_NONE,
1921 	.set_actions_rx			= &dr_ste_v0_set_actions_rx,
1922 	.set_actions_tx			= &dr_ste_v0_set_actions_tx,
1923 	.modify_field_arr_sz		= ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
1924 	.modify_field_arr		= dr_ste_v0_action_modify_field_arr,
1925 	.set_action_set			= &dr_ste_v0_set_action_set,
1926 	.set_action_add			= &dr_ste_v0_set_action_add,
1927 	.set_action_copy		= &dr_ste_v0_set_action_copy,
1928 	.set_action_decap_l3_list	= &dr_ste_v0_set_action_decap_l3_list,
1929 };
1930