1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 #define SVLAN_ETHERTYPE		0x88a8
9 #define DR_STE_ENABLE_FLOW_TAG	BIT(31)
10 
11 enum dr_ste_v0_action_tunl {
12 	DR_STE_TUNL_ACTION_NONE		= 0,
13 	DR_STE_TUNL_ACTION_ENABLE	= 1,
14 	DR_STE_TUNL_ACTION_DECAP	= 2,
15 	DR_STE_TUNL_ACTION_L3_DECAP	= 3,
16 	DR_STE_TUNL_ACTION_POP_VLAN	= 4,
17 };
18 
19 enum dr_ste_v0_action_type {
20 	DR_STE_ACTION_TYPE_PUSH_VLAN	= 1,
21 	DR_STE_ACTION_TYPE_ENCAP_L3	= 3,
22 	DR_STE_ACTION_TYPE_ENCAP	= 4,
23 };
24 
25 enum dr_ste_v0_action_mdfy_op {
26 	DR_STE_ACTION_MDFY_OP_COPY	= 0x1,
27 	DR_STE_ACTION_MDFY_OP_SET	= 0x2,
28 	DR_STE_ACTION_MDFY_OP_ADD	= 0x3,
29 };
30 
31 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
32 	((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
33 		   (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
34 			  DR_STE_V0_LU_TYPE_##lookup_type##_O)
35 
36 enum {
37 	DR_STE_V0_LU_TYPE_NOP				= 0x00,
38 	DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP		= 0x05,
39 	DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I		= 0x0a,
40 	DR_STE_V0_LU_TYPE_ETHL2_DST_O			= 0x06,
41 	DR_STE_V0_LU_TYPE_ETHL2_DST_I			= 0x07,
42 	DR_STE_V0_LU_TYPE_ETHL2_DST_D			= 0x1b,
43 	DR_STE_V0_LU_TYPE_ETHL2_SRC_O			= 0x08,
44 	DR_STE_V0_LU_TYPE_ETHL2_SRC_I			= 0x09,
45 	DR_STE_V0_LU_TYPE_ETHL2_SRC_D			= 0x1c,
46 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O		= 0x36,
47 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I		= 0x37,
48 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D		= 0x38,
49 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O		= 0x0d,
50 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I		= 0x0e,
51 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D		= 0x1e,
52 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O		= 0x0f,
53 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I		= 0x10,
54 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D		= 0x1f,
55 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O		= 0x11,
56 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I		= 0x12,
57 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D		= 0x20,
58 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O		= 0x29,
59 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I		= 0x2a,
60 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D		= 0x2b,
61 	DR_STE_V0_LU_TYPE_ETHL4_O			= 0x13,
62 	DR_STE_V0_LU_TYPE_ETHL4_I			= 0x14,
63 	DR_STE_V0_LU_TYPE_ETHL4_D			= 0x21,
64 	DR_STE_V0_LU_TYPE_ETHL4_MISC_O			= 0x2c,
65 	DR_STE_V0_LU_TYPE_ETHL4_MISC_I			= 0x2d,
66 	DR_STE_V0_LU_TYPE_ETHL4_MISC_D			= 0x2e,
67 	DR_STE_V0_LU_TYPE_MPLS_FIRST_O			= 0x15,
68 	DR_STE_V0_LU_TYPE_MPLS_FIRST_I			= 0x24,
69 	DR_STE_V0_LU_TYPE_MPLS_FIRST_D			= 0x25,
70 	DR_STE_V0_LU_TYPE_GRE				= 0x16,
71 	DR_STE_V0_LU_TYPE_FLEX_PARSER_0			= 0x22,
72 	DR_STE_V0_LU_TYPE_FLEX_PARSER_1			= 0x23,
73 	DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER	= 0x19,
74 	DR_STE_V0_LU_TYPE_GENERAL_PURPOSE		= 0x18,
75 	DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0		= 0x2f,
76 	DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1		= 0x30,
77 	DR_STE_V0_LU_TYPE_DONT_CARE			= MLX5DR_STE_LU_TYPE_DONT_CARE,
78 };
79 
80 enum {
81 	DR_STE_V0_ACTION_MDFY_FLD_L2_0		= 0,
82 	DR_STE_V0_ACTION_MDFY_FLD_L2_1		= 1,
83 	DR_STE_V0_ACTION_MDFY_FLD_L2_2		= 2,
84 	DR_STE_V0_ACTION_MDFY_FLD_L3_0		= 3,
85 	DR_STE_V0_ACTION_MDFY_FLD_L3_1		= 4,
86 	DR_STE_V0_ACTION_MDFY_FLD_L3_2		= 5,
87 	DR_STE_V0_ACTION_MDFY_FLD_L3_3		= 6,
88 	DR_STE_V0_ACTION_MDFY_FLD_L3_4		= 7,
89 	DR_STE_V0_ACTION_MDFY_FLD_L4_0		= 8,
90 	DR_STE_V0_ACTION_MDFY_FLD_L4_1		= 9,
91 	DR_STE_V0_ACTION_MDFY_FLD_MPLS		= 10,
92 	DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0	= 11,
93 	DR_STE_V0_ACTION_MDFY_FLD_REG_0		= 12,
94 	DR_STE_V0_ACTION_MDFY_FLD_REG_1		= 13,
95 	DR_STE_V0_ACTION_MDFY_FLD_REG_2		= 14,
96 	DR_STE_V0_ACTION_MDFY_FLD_REG_3		= 15,
97 	DR_STE_V0_ACTION_MDFY_FLD_L4_2		= 16,
98 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_0	= 17,
99 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_1	= 18,
100 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_2	= 19,
101 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_3	= 20,
102 	DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1	= 21,
103 	DR_STE_V0_ACTION_MDFY_FLD_METADATA	= 22,
104 	DR_STE_V0_ACTION_MDFY_FLD_RESERVED	= 23,
105 };
106 
107 static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
108 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
109 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
110 	},
111 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
112 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
113 	},
114 	[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
115 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
116 	},
117 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
118 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
119 	},
120 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
121 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
122 	},
123 	[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
124 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
125 	},
126 	[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
127 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
128 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
129 	},
130 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
131 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
132 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
133 	},
134 	[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
135 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
136 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
137 	},
138 	[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
139 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
140 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
141 	},
142 	[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
143 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
144 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
145 	},
146 	[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
147 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
148 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
149 	},
150 	[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
151 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
152 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
153 	},
154 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
155 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
156 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
157 	},
158 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
159 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
160 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
161 	},
162 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
163 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
164 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
165 	},
166 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
167 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
168 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
169 	},
170 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
171 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
172 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
173 	},
174 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
175 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
176 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
177 	},
178 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
179 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
180 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
181 	},
182 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
183 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
184 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
185 	},
186 	[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
187 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
188 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
189 	},
190 	[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
191 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
192 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
193 	},
194 	[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
195 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
196 	},
197 	[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
198 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
199 	},
200 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
201 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
202 	},
203 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
204 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
205 	},
206 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
207 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
208 	},
209 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
210 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
211 	},
212 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
213 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
214 	},
215 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
216 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
217 	},
218 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
219 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
220 	},
221 	[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
222 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
223 	},
224 	[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
225 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
226 	},
227 };
228 
229 static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
230 {
231 	MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
232 }
233 
234 static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
235 {
236 	return MLX5_GET(ste_general, hw_ste_p, entry_type);
237 }
238 
239 static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
240 {
241 	u64 index = miss_addr >> 6;
242 
243 	/* Miss address for TX and RX STEs located in the same offsets */
244 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
245 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
246 }
247 
248 static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
249 {
250 	u64 index =
251 		((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
252 		 ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
253 
254 	return index << 6;
255 }
256 
257 static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
258 {
259 	MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
260 }
261 
262 static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
263 {
264 	return MLX5_GET(ste_general, hw_ste_p, byte_mask);
265 }
266 
267 static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
268 {
269 	MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
270 }
271 
272 static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
273 {
274 	MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
275 }
276 
277 static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
278 {
279 	return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
280 }
281 
282 static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
283 {
284 	MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
285 }
286 
287 static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
288 {
289 	u64 index = (icm_addr >> 5) | ht_size;
290 
291 	MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
292 	MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
293 }
294 
295 static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
296 			   u8 entry_type, u16 gvmi)
297 {
298 	dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
299 	dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
300 	dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
301 
302 	/* Set GVMI once, this is the same for RX/TX
303 	 * bits 63_48 of next table base / miss address encode the next GVMI
304 	 */
305 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
306 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
307 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
308 }
309 
310 static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
311 {
312 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
313 		 DR_STE_ENABLE_FLOW_TAG | flow_tag);
314 }
315 
316 static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
317 {
318 	/* This can be used for both rx_steering_mult and for sx_transmit */
319 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
320 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
321 }
322 
323 static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
324 {
325 	MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
326 }
327 
328 static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
329 				       bool go_back)
330 {
331 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
332 		 DR_STE_ACTION_TYPE_PUSH_VLAN);
333 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
334 	/* Due to HW limitation we need to set this bit, otherwise reforamt +
335 	 * push vlan will not work.
336 	 */
337 	if (go_back)
338 		dr_ste_v0_set_go_back_bit(hw_ste_p);
339 }
340 
341 static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
342 				   int size, bool encap_l3)
343 {
344 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
345 		 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
346 	/* The hardware expects here size in words (2 byte) */
347 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
348 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
349 }
350 
351 static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
352 {
353 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
354 		 DR_STE_TUNL_ACTION_DECAP);
355 }
356 
357 static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
358 {
359 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
360 		 DR_STE_TUNL_ACTION_POP_VLAN);
361 }
362 
363 static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
364 {
365 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
366 		 DR_STE_TUNL_ACTION_L3_DECAP);
367 	MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
368 }
369 
370 static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
371 					  u32 re_write_index)
372 {
373 	MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
374 		 num_of_actions);
375 	MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
376 		 re_write_index);
377 }
378 
379 static void dr_ste_v0_arr_init_next(u8 **last_ste,
380 				    u32 *added_stes,
381 				    enum mlx5dr_ste_entry_type entry_type,
382 				    u16 gvmi)
383 {
384 	(*added_stes)++;
385 	*last_ste += DR_STE_SIZE;
386 	dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
387 		       entry_type, gvmi);
388 }
389 
390 static void
391 dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
392 			 u8 *action_type_set,
393 			 u8 *last_ste,
394 			 struct mlx5dr_ste_actions_attr *attr,
395 			 u32 *added_stes)
396 {
397 	bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
398 		action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
399 
400 	/* We want to make sure the modify header comes before L2
401 	 * encapsulation. The reason for that is that we support
402 	 * modify headers for outer headers only
403 	 */
404 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
405 		dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
406 		dr_ste_v0_set_rewrite_actions(last_ste,
407 					      attr->modify_actions,
408 					      attr->modify_index);
409 	}
410 
411 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
412 		int i;
413 
414 		for (i = 0; i < attr->vlans.count; i++) {
415 			if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
416 				dr_ste_v0_arr_init_next(&last_ste,
417 							added_stes,
418 							MLX5DR_STE_TYPE_TX,
419 							attr->gvmi);
420 
421 			dr_ste_v0_set_tx_push_vlan(last_ste,
422 						   attr->vlans.headers[i],
423 						   encap);
424 		}
425 	}
426 
427 	if (encap) {
428 		/* Modify header and encapsulation require a different STEs.
429 		 * Since modify header STE format doesn't support encapsulation
430 		 * tunneling_action.
431 		 */
432 		if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
433 		    action_type_set[DR_ACTION_TYP_PUSH_VLAN])
434 			dr_ste_v0_arr_init_next(&last_ste,
435 						added_stes,
436 						MLX5DR_STE_TYPE_TX,
437 						attr->gvmi);
438 
439 		dr_ste_v0_set_tx_encap(last_ste,
440 				       attr->reformat_id,
441 				       attr->reformat_size,
442 				       action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
443 		/* Whenever prio_tag_required enabled, we can be sure that the
444 		 * previous table (ACL) already push vlan to our packet,
445 		 * And due to HW limitation we need to set this bit, otherwise
446 		 * push vlan + reformat will not work.
447 		 */
448 		if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
449 			dr_ste_v0_set_go_back_bit(last_ste);
450 	}
451 
452 	if (action_type_set[DR_ACTION_TYP_CTR])
453 		dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
454 
455 	dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
456 	dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
457 }
458 
459 static void
460 dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
461 			 u8 *action_type_set,
462 			 u8 *last_ste,
463 			 struct mlx5dr_ste_actions_attr *attr,
464 			 u32 *added_stes)
465 {
466 	if (action_type_set[DR_ACTION_TYP_CTR])
467 		dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
468 
469 	if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
470 		dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
471 		dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
472 		dr_ste_v0_set_rewrite_actions(last_ste,
473 					      attr->decap_actions,
474 					      attr->decap_index);
475 	}
476 
477 	if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
478 		dr_ste_v0_set_rx_decap(last_ste);
479 
480 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
481 		int i;
482 
483 		for (i = 0; i < attr->vlans.count; i++) {
484 			if (i ||
485 			    action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
486 			    action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
487 				dr_ste_v0_arr_init_next(&last_ste,
488 							added_stes,
489 							MLX5DR_STE_TYPE_RX,
490 							attr->gvmi);
491 
492 			dr_ste_v0_set_rx_pop_vlan(last_ste);
493 		}
494 	}
495 
496 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
497 		if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
498 			dr_ste_v0_arr_init_next(&last_ste,
499 						added_stes,
500 						MLX5DR_STE_TYPE_MODIFY_PKT,
501 						attr->gvmi);
502 		else
503 			dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
504 
505 		dr_ste_v0_set_rewrite_actions(last_ste,
506 					      attr->modify_actions,
507 					      attr->modify_index);
508 	}
509 
510 	if (action_type_set[DR_ACTION_TYP_TAG]) {
511 		if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
512 			dr_ste_v0_arr_init_next(&last_ste,
513 						added_stes,
514 						MLX5DR_STE_TYPE_RX,
515 						attr->gvmi);
516 
517 		dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
518 	}
519 
520 	dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
521 	dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
522 }
523 
524 static void dr_ste_v0_set_action_set(u8 *hw_action,
525 				     u8 hw_field,
526 				     u8 shifter,
527 				     u8 length,
528 				     u32 data)
529 {
530 	length = (length == 32) ? 0 : length;
531 	MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
532 	MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
533 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
534 	MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
535 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
536 }
537 
538 static void dr_ste_v0_set_action_add(u8 *hw_action,
539 				     u8 hw_field,
540 				     u8 shifter,
541 				     u8 length,
542 				     u32 data)
543 {
544 	length = (length == 32) ? 0 : length;
545 	MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
546 	MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
547 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
548 	MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
549 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
550 }
551 
552 static void dr_ste_v0_set_action_copy(u8 *hw_action,
553 				      u8 dst_hw_field,
554 				      u8 dst_shifter,
555 				      u8 dst_len,
556 				      u8 src_hw_field,
557 				      u8 src_shifter)
558 {
559 	MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
560 	MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
561 	MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
562 	MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
563 	MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
564 	MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
565 }
566 
567 #define DR_STE_DECAP_L3_MIN_ACTION_NUM	5
568 
569 static int
570 dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
571 				   u8 *hw_action, u32 hw_action_sz,
572 				   u16 *used_hw_action_num)
573 {
574 	struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
575 	u32 hw_action_num;
576 	int required_actions;
577 	u32 hdr_fld_4b;
578 	u16 hdr_fld_2b;
579 	u16 vlan_type;
580 	bool vlan;
581 
582 	vlan = (data_sz != HDR_LEN_L2);
583 	hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
584 	required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
585 
586 	if (hw_action_num < required_actions)
587 		return -ENOMEM;
588 
589 	/* dmac_47_16 */
590 	MLX5_SET(dr_action_hw_set, hw_action,
591 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
592 	MLX5_SET(dr_action_hw_set, hw_action,
593 		 destination_length, 0);
594 	MLX5_SET(dr_action_hw_set, hw_action,
595 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
596 	MLX5_SET(dr_action_hw_set, hw_action,
597 		 destination_left_shifter, 16);
598 	hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
599 	MLX5_SET(dr_action_hw_set, hw_action,
600 		 inline_data, hdr_fld_4b);
601 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
602 
603 	/* smac_47_16 */
604 	MLX5_SET(dr_action_hw_set, hw_action,
605 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
606 	MLX5_SET(dr_action_hw_set, hw_action,
607 		 destination_length, 0);
608 	MLX5_SET(dr_action_hw_set, hw_action,
609 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
610 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
611 	hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
612 		      MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
613 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
614 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
615 
616 	/* dmac_15_0 */
617 	MLX5_SET(dr_action_hw_set, hw_action,
618 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
619 	MLX5_SET(dr_action_hw_set, hw_action,
620 		 destination_length, 16);
621 	MLX5_SET(dr_action_hw_set, hw_action,
622 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
623 	MLX5_SET(dr_action_hw_set, hw_action,
624 		 destination_left_shifter, 0);
625 	hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
626 	MLX5_SET(dr_action_hw_set, hw_action,
627 		 inline_data, hdr_fld_2b);
628 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
629 
630 	/* ethertype + (optional) vlan */
631 	MLX5_SET(dr_action_hw_set, hw_action,
632 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
633 	MLX5_SET(dr_action_hw_set, hw_action,
634 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
635 	MLX5_SET(dr_action_hw_set, hw_action,
636 		 destination_left_shifter, 32);
637 	if (!vlan) {
638 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
639 		MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
640 		MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
641 	} else {
642 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
643 		vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
644 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
645 		hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
646 		MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
647 		MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
648 	}
649 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
650 
651 	/* smac_15_0 */
652 	MLX5_SET(dr_action_hw_set, hw_action,
653 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
654 	MLX5_SET(dr_action_hw_set, hw_action,
655 		 destination_length, 16);
656 	MLX5_SET(dr_action_hw_set, hw_action,
657 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
658 	MLX5_SET(dr_action_hw_set, hw_action,
659 		 destination_left_shifter, 0);
660 	hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
661 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
662 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
663 
664 	if (vlan) {
665 		MLX5_SET(dr_action_hw_set, hw_action,
666 			 opcode, DR_STE_ACTION_MDFY_OP_SET);
667 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
668 		MLX5_SET(dr_action_hw_set, hw_action,
669 			 inline_data, hdr_fld_2b);
670 		MLX5_SET(dr_action_hw_set, hw_action,
671 			 destination_length, 16);
672 		MLX5_SET(dr_action_hw_set, hw_action,
673 			 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
674 		MLX5_SET(dr_action_hw_set, hw_action,
675 			 destination_left_shifter, 0);
676 	}
677 
678 	*used_hw_action_num = required_actions;
679 
680 	return 0;
681 }
682 
683 static void
684 dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
685 					bool inner, u8 *bit_mask)
686 {
687 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
688 
689 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
690 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
691 
692 	if (mask->smac_47_16 || mask->smac_15_0) {
693 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
694 			 mask->smac_47_16 >> 16);
695 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
696 			 mask->smac_47_16 << 16 | mask->smac_15_0);
697 		mask->smac_47_16 = 0;
698 		mask->smac_15_0 = 0;
699 	}
700 
701 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
702 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
703 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
704 	DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
705 
706 	if (mask->cvlan_tag) {
707 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
708 		mask->cvlan_tag = 0;
709 	} else if (mask->svlan_tag) {
710 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
711 		mask->svlan_tag = 0;
712 	}
713 }
714 
715 static int
716 dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
717 				   struct mlx5dr_ste_build *sb,
718 				   u8 *tag)
719 {
720 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
721 
722 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
723 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
724 
725 	if (spec->smac_47_16 || spec->smac_15_0) {
726 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
727 			 spec->smac_47_16 >> 16);
728 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
729 			 spec->smac_47_16 << 16 | spec->smac_15_0);
730 		spec->smac_47_16 = 0;
731 		spec->smac_15_0 = 0;
732 	}
733 
734 	if (spec->ip_version) {
735 		if (spec->ip_version == IP_VERSION_IPV4) {
736 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
737 			spec->ip_version = 0;
738 		} else if (spec->ip_version == IP_VERSION_IPV6) {
739 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
740 			spec->ip_version = 0;
741 		} else {
742 			return -EINVAL;
743 		}
744 	}
745 
746 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
747 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
748 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
749 
750 	if (spec->cvlan_tag) {
751 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
752 		spec->cvlan_tag = 0;
753 	} else if (spec->svlan_tag) {
754 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
755 		spec->svlan_tag = 0;
756 	}
757 	return 0;
758 }
759 
760 static void
761 dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
762 				    struct mlx5dr_match_param *mask)
763 {
764 	dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
765 
766 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
767 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
768 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
769 }
770 
771 static int
772 dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
773 				    struct mlx5dr_ste_build *sb,
774 				    u8 *tag)
775 {
776 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
777 
778 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
779 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
780 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
781 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
782 
783 	return 0;
784 }
785 
786 static void
787 dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
788 				     struct mlx5dr_match_param *mask)
789 {
790 	dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
791 
792 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
793 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
794 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
795 }
796 
797 static int
798 dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
799 				    struct mlx5dr_ste_build *sb,
800 				    u8 *tag)
801 {
802 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
803 
804 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
805 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
806 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
807 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
808 
809 	return 0;
810 }
811 
812 static void
813 dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
814 				     struct mlx5dr_match_param *mask)
815 {
816 	dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
817 
818 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
819 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
820 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
821 }
822 
823 static int
824 dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
825 					struct mlx5dr_ste_build *sb,
826 					u8 *tag)
827 {
828 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
829 
830 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
831 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
832 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
833 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
834 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
835 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
836 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
837 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
838 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
839 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
840 
841 	if (spec->tcp_flags) {
842 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
843 		spec->tcp_flags = 0;
844 	}
845 
846 	return 0;
847 }
848 
849 static void
850 dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
851 					 struct mlx5dr_match_param *mask)
852 {
853 	dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
854 
855 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
856 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
857 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
858 }
859 
860 static void
861 dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
862 					   bool inner, u8 *bit_mask)
863 {
864 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
865 	struct mlx5dr_match_misc *misc_mask = &value->misc;
866 
867 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
868 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
869 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
870 	DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
871 	DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
872 	DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
873 
874 	if (mask->svlan_tag || mask->cvlan_tag) {
875 		MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
876 		mask->cvlan_tag = 0;
877 		mask->svlan_tag = 0;
878 	}
879 
880 	if (inner) {
881 		if (misc_mask->inner_second_cvlan_tag ||
882 		    misc_mask->inner_second_svlan_tag) {
883 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
884 			misc_mask->inner_second_cvlan_tag = 0;
885 			misc_mask->inner_second_svlan_tag = 0;
886 		}
887 
888 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
889 			       second_vlan_id, misc_mask, inner_second_vid);
890 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
891 			       second_cfi, misc_mask, inner_second_cfi);
892 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
893 			       second_priority, misc_mask, inner_second_prio);
894 	} else {
895 		if (misc_mask->outer_second_cvlan_tag ||
896 		    misc_mask->outer_second_svlan_tag) {
897 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
898 			misc_mask->outer_second_cvlan_tag = 0;
899 			misc_mask->outer_second_svlan_tag = 0;
900 		}
901 
902 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
903 			       second_vlan_id, misc_mask, outer_second_vid);
904 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
905 			       second_cfi, misc_mask, outer_second_cfi);
906 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
907 			       second_priority, misc_mask, outer_second_prio);
908 	}
909 }
910 
911 static int
912 dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
913 				      bool inner, u8 *tag)
914 {
915 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
916 	struct mlx5dr_match_misc *misc_spec = &value->misc;
917 
918 	DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
919 	DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
920 	DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
921 	DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
922 	DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
923 
924 	if (spec->ip_version) {
925 		if (spec->ip_version == IP_VERSION_IPV4) {
926 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
927 			spec->ip_version = 0;
928 		} else if (spec->ip_version == IP_VERSION_IPV6) {
929 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
930 			spec->ip_version = 0;
931 		} else {
932 			return -EINVAL;
933 		}
934 	}
935 
936 	if (spec->cvlan_tag) {
937 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
938 		spec->cvlan_tag = 0;
939 	} else if (spec->svlan_tag) {
940 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
941 		spec->svlan_tag = 0;
942 	}
943 
944 	if (inner) {
945 		if (misc_spec->inner_second_cvlan_tag) {
946 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
947 			misc_spec->inner_second_cvlan_tag = 0;
948 		} else if (misc_spec->inner_second_svlan_tag) {
949 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
950 			misc_spec->inner_second_svlan_tag = 0;
951 		}
952 
953 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
954 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
955 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
956 	} else {
957 		if (misc_spec->outer_second_cvlan_tag) {
958 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
959 			misc_spec->outer_second_cvlan_tag = 0;
960 		} else if (misc_spec->outer_second_svlan_tag) {
961 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
962 			misc_spec->outer_second_svlan_tag = 0;
963 		}
964 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
965 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
966 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
967 	}
968 
969 	return 0;
970 }
971 
972 static void
973 dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
974 				    bool inner, u8 *bit_mask)
975 {
976 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
977 
978 	DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
979 	DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
980 
981 	dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
982 }
983 
984 static int
985 dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
986 			       struct mlx5dr_ste_build *sb,
987 			       u8 *tag)
988 {
989 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
990 
991 	DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
992 	DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
993 
994 	return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
995 }
996 
997 static void
998 dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
999 				struct mlx5dr_match_param *mask)
1000 {
1001 	dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1002 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
1003 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1004 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
1005 }
1006 
1007 static void
1008 dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1009 				    struct mlx5dr_ste_build *sb,
1010 				    u8 *bit_mask)
1011 {
1012 	struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
1013 
1014 	DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1015 	DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1016 
1017 	dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
1018 }
1019 
1020 static int
1021 dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1022 			       struct mlx5dr_ste_build *sb,
1023 			       u8 *tag)
1024 {
1025 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1026 
1027 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1028 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1029 
1030 	return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1031 }
1032 
1033 static void
1034 dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1035 				struct mlx5dr_match_param *mask)
1036 {
1037 	dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
1038 
1039 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
1040 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1041 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
1042 }
1043 
1044 static void
1045 dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1046 				    bool inner, u8 *bit_mask)
1047 {
1048 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1049 	struct mlx5dr_match_misc *misc = &value->misc;
1050 
1051 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1052 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1053 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1054 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1055 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1056 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1057 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1058 	DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1059 
1060 	if (misc->vxlan_vni) {
1061 		MLX5_SET(ste_eth_l2_tnl, bit_mask,
1062 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1063 		misc->vxlan_vni = 0;
1064 	}
1065 
1066 	if (mask->svlan_tag || mask->cvlan_tag) {
1067 		MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1068 		mask->cvlan_tag = 0;
1069 		mask->svlan_tag = 0;
1070 	}
1071 }
1072 
1073 static int
1074 dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1075 			       struct mlx5dr_ste_build *sb,
1076 			       u8 *tag)
1077 {
1078 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1079 	struct mlx5dr_match_misc *misc = &value->misc;
1080 
1081 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1082 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1083 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1084 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1085 	DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1086 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1087 	DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1088 
1089 	if (misc->vxlan_vni) {
1090 		MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1091 			 (misc->vxlan_vni << 8));
1092 		misc->vxlan_vni = 0;
1093 	}
1094 
1095 	if (spec->cvlan_tag) {
1096 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1097 		spec->cvlan_tag = 0;
1098 	} else if (spec->svlan_tag) {
1099 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1100 		spec->svlan_tag = 0;
1101 	}
1102 
1103 	if (spec->ip_version) {
1104 		if (spec->ip_version == IP_VERSION_IPV4) {
1105 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1106 			spec->ip_version = 0;
1107 		} else if (spec->ip_version == IP_VERSION_IPV6) {
1108 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1109 			spec->ip_version = 0;
1110 		} else {
1111 			return -EINVAL;
1112 		}
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 static void
1119 dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1120 				struct mlx5dr_match_param *mask)
1121 {
1122 	dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1123 
1124 	sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
1125 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1126 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
1127 }
1128 
1129 static int
1130 dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1131 				     struct mlx5dr_ste_build *sb,
1132 				     u8 *tag)
1133 {
1134 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1135 
1136 	DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1137 
1138 	return 0;
1139 }
1140 
1141 static void
1142 dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1143 				      struct mlx5dr_match_param *mask)
1144 {
1145 	dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1146 
1147 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
1148 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1149 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
1150 }
1151 
1152 static int
1153 dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1154 				   struct mlx5dr_ste_build *sb,
1155 				   u8 *tag)
1156 {
1157 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1158 
1159 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1160 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1161 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1162 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1163 	DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1164 	DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1165 	DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1166 	DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1167 	DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1168 
1169 	if (spec->tcp_flags) {
1170 		DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1171 		spec->tcp_flags = 0;
1172 	}
1173 
1174 	return 0;
1175 }
1176 
1177 static void
1178 dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1179 				    struct mlx5dr_match_param *mask)
1180 {
1181 	dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1182 
1183 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
1184 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1185 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
1186 }
1187 
1188 static int
1189 dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
1190 			 struct mlx5dr_ste_build *sb,
1191 			 u8 *tag)
1192 {
1193 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1194 
1195 	if (sb->inner)
1196 		DR_STE_SET_MPLS(mpls, misc2, inner, tag);
1197 	else
1198 		DR_STE_SET_MPLS(mpls, misc2, outer, tag);
1199 
1200 	return 0;
1201 }
1202 
1203 static void
1204 dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
1205 			  struct mlx5dr_match_param *mask)
1206 {
1207 	dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
1208 
1209 	sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
1210 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1211 	sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
1212 }
1213 
1214 static int
1215 dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1216 			    struct mlx5dr_ste_build *sb,
1217 			    u8 *tag)
1218 {
1219 	struct  mlx5dr_match_misc *misc = &value->misc;
1220 
1221 	DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1222 
1223 	DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1224 	DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1225 	DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1226 
1227 	DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1228 
1229 	DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1230 
1231 	return 0;
1232 }
1233 
1234 static void
1235 dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1236 			     struct mlx5dr_match_param *mask)
1237 {
1238 	dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1239 
1240 	sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
1241 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1242 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
1243 }
1244 
1245 static int
1246 dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1247 			     struct mlx5dr_ste_build *sb,
1248 			     u8 *tag)
1249 {
1250 	struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1251 
1252 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
1253 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1254 			       misc_2, outer_first_mpls_over_gre_label);
1255 
1256 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1257 			       misc_2, outer_first_mpls_over_gre_exp);
1258 
1259 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1260 			       misc_2, outer_first_mpls_over_gre_s_bos);
1261 
1262 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1263 			       misc_2, outer_first_mpls_over_gre_ttl);
1264 	} else {
1265 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1266 			       misc_2, outer_first_mpls_over_udp_label);
1267 
1268 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1269 			       misc_2, outer_first_mpls_over_udp_exp);
1270 
1271 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1272 			       misc_2, outer_first_mpls_over_udp_s_bos);
1273 
1274 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1275 			       misc_2, outer_first_mpls_over_udp_ttl);
1276 	}
1277 	return 0;
1278 }
1279 
1280 static void
1281 dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1282 			      struct mlx5dr_match_param *mask)
1283 {
1284 	dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1285 
1286 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1287 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1288 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
1289 }
1290 
1291 #define ICMP_TYPE_OFFSET_FIRST_DW	24
1292 #define ICMP_CODE_OFFSET_FIRST_DW	16
1293 
1294 static int
1295 dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
1296 			 struct mlx5dr_ste_build *sb,
1297 			 u8 *tag)
1298 {
1299 	struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1300 	u32 *icmp_header_data;
1301 	int dw0_location;
1302 	int dw1_location;
1303 	u8 *icmp_type;
1304 	u8 *icmp_code;
1305 	bool is_ipv4;
1306 
1307 	is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
1308 	if (is_ipv4) {
1309 		icmp_header_data	= &misc_3->icmpv4_header_data;
1310 		icmp_type		= &misc_3->icmpv4_type;
1311 		icmp_code		= &misc_3->icmpv4_code;
1312 		dw0_location		= sb->caps->flex_parser_id_icmp_dw0;
1313 		dw1_location		= sb->caps->flex_parser_id_icmp_dw1;
1314 	} else {
1315 		icmp_header_data	= &misc_3->icmpv6_header_data;
1316 		icmp_type		= &misc_3->icmpv6_type;
1317 		icmp_code		= &misc_3->icmpv6_code;
1318 		dw0_location		= sb->caps->flex_parser_id_icmpv6_dw0;
1319 		dw1_location		= sb->caps->flex_parser_id_icmpv6_dw1;
1320 	}
1321 
1322 	switch (dw0_location) {
1323 	case 4:
1324 		MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1325 			 (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
1326 			 (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
1327 
1328 		*icmp_type = 0;
1329 		*icmp_code = 0;
1330 		break;
1331 	default:
1332 		return -EINVAL;
1333 	}
1334 
1335 	switch (dw1_location) {
1336 	case 5:
1337 		MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
1338 			 *icmp_header_data);
1339 		*icmp_header_data = 0;
1340 		break;
1341 	default:
1342 		return -EINVAL;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 static int
1349 dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
1350 			  struct mlx5dr_match_param *mask)
1351 {
1352 	int ret;
1353 
1354 	ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
1355 	if (ret)
1356 		return ret;
1357 
1358 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
1359 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1360 	sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
1361 
1362 	return 0;
1363 }
1364 
1365 static int
1366 dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
1367 				    struct mlx5dr_ste_build *sb,
1368 				    u8 *tag)
1369 {
1370 	struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1371 
1372 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1373 		       misc_2, metadata_reg_a);
1374 
1375 	return 0;
1376 }
1377 
1378 static void
1379 dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1380 				     struct mlx5dr_match_param *mask)
1381 {
1382 	dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
1383 
1384 	sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
1385 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1386 	sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
1387 }
1388 
1389 static int
1390 dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1391 				struct mlx5dr_ste_build *sb,
1392 				u8 *tag)
1393 {
1394 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1395 
1396 	if (sb->inner) {
1397 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
1398 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
1399 	} else {
1400 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
1401 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static void
1408 dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1409 				 struct mlx5dr_match_param *mask)
1410 {
1411 	dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1412 
1413 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
1414 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1415 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
1416 }
1417 
1418 static int
1419 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1420 					      struct mlx5dr_ste_build *sb,
1421 					      u8 *tag)
1422 {
1423 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1424 
1425 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1426 		       outer_vxlan_gpe_flags, misc3,
1427 		       outer_vxlan_gpe_flags);
1428 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1429 		       outer_vxlan_gpe_next_protocol, misc3,
1430 		       outer_vxlan_gpe_next_protocol);
1431 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1432 		       outer_vxlan_gpe_vni, misc3,
1433 		       outer_vxlan_gpe_vni);
1434 
1435 	return 0;
1436 }
1437 
1438 static void
1439 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1440 					       struct mlx5dr_match_param *mask)
1441 {
1442 	dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1443 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1444 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1445 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
1446 }
1447 
1448 static int
1449 dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1450 					   struct mlx5dr_ste_build *sb,
1451 					   u8 *tag)
1452 {
1453 	struct mlx5dr_match_misc *misc = &value->misc;
1454 
1455 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1456 		       geneve_protocol_type, misc, geneve_protocol_type);
1457 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1458 		       geneve_oam, misc, geneve_oam);
1459 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1460 		       geneve_opt_len, misc, geneve_opt_len);
1461 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1462 		       geneve_vni, misc, geneve_vni);
1463 
1464 	return 0;
1465 }
1466 
1467 static void
1468 dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1469 					    struct mlx5dr_match_param *mask)
1470 {
1471 	dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1472 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1473 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1474 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
1475 }
1476 
1477 static int
1478 dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
1479 			       struct mlx5dr_ste_build *sb,
1480 			       u8 *tag)
1481 {
1482 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1483 
1484 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1485 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1486 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1487 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1488 
1489 	return 0;
1490 }
1491 
1492 static void
1493 dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
1494 				struct mlx5dr_match_param *mask)
1495 {
1496 	dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
1497 
1498 	sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
1499 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1500 	sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
1501 }
1502 
1503 static int
1504 dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
1505 			       struct mlx5dr_ste_build *sb,
1506 			       u8 *tag)
1507 {
1508 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1509 
1510 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1511 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1512 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1513 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1514 
1515 	return 0;
1516 }
1517 
1518 static void
1519 dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
1520 				struct mlx5dr_match_param *mask)
1521 {
1522 	dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
1523 
1524 	sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
1525 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1526 	sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
1527 }
1528 
1529 static void
1530 dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1531 				      u8 *bit_mask)
1532 {
1533 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1534 
1535 	DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
1536 	DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
1537 	misc_mask->source_eswitch_owner_vhca_id = 0;
1538 }
1539 
1540 static int
1541 dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1542 				 struct mlx5dr_ste_build *sb,
1543 				 u8 *tag)
1544 {
1545 	struct mlx5dr_match_misc *misc = &value->misc;
1546 	struct mlx5dr_cmd_vport_cap *vport_cap;
1547 	struct mlx5dr_domain *dmn = sb->dmn;
1548 	struct mlx5dr_cmd_caps *caps;
1549 	u8 *bit_mask = sb->bit_mask;
1550 	bool source_gvmi_set;
1551 
1552 	DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
1553 
1554 	if (sb->vhca_id_valid) {
1555 		/* Find port GVMI based on the eswitch_owner_vhca_id */
1556 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1557 			caps = &dmn->info.caps;
1558 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1559 					   dmn->peer_dmn->info.caps.gvmi))
1560 			caps = &dmn->peer_dmn->info.caps;
1561 		else
1562 			return -EINVAL;
1563 
1564 		misc->source_eswitch_owner_vhca_id = 0;
1565 	} else {
1566 		caps = &dmn->info.caps;
1567 	}
1568 
1569 	source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
1570 	if (source_gvmi_set) {
1571 		vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
1572 		if (!vport_cap) {
1573 			mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
1574 				   misc->source_port);
1575 			return -EINVAL;
1576 		}
1577 
1578 		if (vport_cap->vport_gvmi)
1579 			MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
1580 
1581 		misc->source_port = 0;
1582 	}
1583 
1584 	return 0;
1585 }
1586 
1587 static void
1588 dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1589 				  struct mlx5dr_match_param *mask)
1590 {
1591 	dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1592 
1593 	sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
1594 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1595 	sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
1596 }
1597 
1598 struct mlx5dr_ste_ctx ste_ctx_v0 = {
1599 	/* Builders */
1600 	.build_eth_l2_src_dst_init	= &dr_ste_v0_build_eth_l2_src_dst_init,
1601 	.build_eth_l3_ipv6_src_init	= &dr_ste_v0_build_eth_l3_ipv6_src_init,
1602 	.build_eth_l3_ipv6_dst_init	= &dr_ste_v0_build_eth_l3_ipv6_dst_init,
1603 	.build_eth_l3_ipv4_5_tuple_init	= &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
1604 	.build_eth_l2_src_init		= &dr_ste_v0_build_eth_l2_src_init,
1605 	.build_eth_l2_dst_init		= &dr_ste_v0_build_eth_l2_dst_init,
1606 	.build_eth_l2_tnl_init		= &dr_ste_v0_build_eth_l2_tnl_init,
1607 	.build_eth_l3_ipv4_misc_init	= &dr_ste_v0_build_eth_l3_ipv4_misc_init,
1608 	.build_eth_ipv6_l3_l4_init	= &dr_ste_v0_build_eth_ipv6_l3_l4_init,
1609 	.build_mpls_init		= &dr_ste_v0_build_mpls_init,
1610 	.build_tnl_gre_init		= &dr_ste_v0_build_tnl_gre_init,
1611 	.build_tnl_mpls_init		= &dr_ste_v0_build_tnl_mpls_init,
1612 	.build_icmp_init		= &dr_ste_v0_build_icmp_init,
1613 	.build_general_purpose_init	= &dr_ste_v0_build_general_purpose_init,
1614 	.build_eth_l4_misc_init		= &dr_ste_v0_build_eth_l4_misc_init,
1615 	.build_tnl_vxlan_gpe_init	= &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
1616 	.build_tnl_geneve_init		= &dr_ste_v0_build_flex_parser_tnl_geneve_init,
1617 	.build_register_0_init		= &dr_ste_v0_build_register_0_init,
1618 	.build_register_1_init		= &dr_ste_v0_build_register_1_init,
1619 	.build_src_gvmi_qpn_init	= &dr_ste_v0_build_src_gvmi_qpn_init,
1620 
1621 	/* Getters and Setters */
1622 	.ste_init			= &dr_ste_v0_init,
1623 	.set_next_lu_type		= &dr_ste_v0_set_next_lu_type,
1624 	.get_next_lu_type		= &dr_ste_v0_get_next_lu_type,
1625 	.set_miss_addr			= &dr_ste_v0_set_miss_addr,
1626 	.get_miss_addr			= &dr_ste_v0_get_miss_addr,
1627 	.set_hit_addr			= &dr_ste_v0_set_hit_addr,
1628 	.set_byte_mask			= &dr_ste_v0_set_byte_mask,
1629 	.get_byte_mask			= &dr_ste_v0_get_byte_mask,
1630 
1631 	/* Actions */
1632 	.set_actions_rx			= &dr_ste_v0_set_actions_rx,
1633 	.set_actions_tx			= &dr_ste_v0_set_actions_tx,
1634 	.modify_field_arr_sz		= ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
1635 	.modify_field_arr		= dr_ste_v0_action_modify_field_arr,
1636 	.set_action_set			= &dr_ste_v0_set_action_set,
1637 	.set_action_add			= &dr_ste_v0_set_action_add,
1638 	.set_action_copy		= &dr_ste_v0_set_action_copy,
1639 	.set_action_decap_l3_list	= &dr_ste_v0_set_action_decap_l3_list,
1640 };
1641