1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec)
7 {
8 	return (spec->smac_47_16 || spec->smac_15_0);
9 }
10 
11 static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
12 {
13 	return (spec->dmac_47_16 || spec->dmac_15_0);
14 }
15 
16 static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
17 {
18 	return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
19 		spec->src_ip_63_32 || spec->src_ip_31_0);
20 }
21 
22 static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
23 {
24 	return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
25 		spec->dst_ip_63_32 || spec->dst_ip_31_0);
26 }
27 
28 static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
29 {
30 	return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
31 		spec->ip_ecn || spec->ip_dscp);
32 }
33 
34 static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec)
35 {
36 	return (spec->tcp_sport || spec->tcp_dport ||
37 		spec->udp_sport || spec->udp_dport);
38 }
39 
40 static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec)
41 {
42 	return (spec->dst_ip_31_0 || spec->src_ip_31_0);
43 }
44 
45 static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec)
46 {
47 	return (dr_mask_is_l3_base_set(spec) ||
48 		dr_mask_is_tcp_udp_base_set(spec) ||
49 		dr_mask_is_ipv4_set(spec));
50 }
51 
52 static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc)
53 {
54 	return misc->vxlan_vni;
55 }
56 
57 static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
58 {
59 	return spec->ttl_hoplimit;
60 }
61 
62 #define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
63 	(_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
64 	(_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
65 	(_spec).ethertype || (_spec).ip_version || \
66 	(_misc)._inner_outer##_second_vid || \
67 	(_misc)._inner_outer##_second_cfi || \
68 	(_misc)._inner_outer##_second_prio || \
69 	(_misc)._inner_outer##_second_cvlan_tag || \
70 	(_misc)._inner_outer##_second_svlan_tag)
71 
72 #define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \
73 	dr_mask_is_l3_base_set(&(_spec)) || \
74 	dr_mask_is_tcp_udp_base_set(&(_spec)) || \
75 	dr_mask_is_ttl_set(&(_spec)) || \
76 	(_misc)._inner_outer##_ipv6_flow_label)
77 
78 #define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \
79 	(_misc3)._inner_outer##_tcp_seq_num || \
80 	(_misc3)._inner_outer##_tcp_ack_num)
81 
82 #define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \
83 	(_misc2)._inner_outer##_first_mpls_label || \
84 	(_misc2)._inner_outer##_first_mpls_exp || \
85 	(_misc2)._inner_outer##_first_mpls_s_bos || \
86 	(_misc2)._inner_outer##_first_mpls_ttl)
87 
88 static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
89 {
90 	return (misc->gre_key_h || misc->gre_key_l ||
91 		misc->gre_protocol || misc->gre_c_present ||
92 		misc->gre_k_present || misc->gre_s_present);
93 }
94 
95 #define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
96 	(_misc)->outer_first_mpls_over_gre_label || \
97 	(_misc)->outer_first_mpls_over_gre_exp || \
98 	(_misc)->outer_first_mpls_over_gre_s_bos || \
99 	(_misc)->outer_first_mpls_over_gre_ttl)
100 
101 #define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
102 	(_misc)->outer_first_mpls_over_udp_label || \
103 	(_misc)->outer_first_mpls_over_udp_exp || \
104 	(_misc)->outer_first_mpls_over_udp_s_bos || \
105 	(_misc)->outer_first_mpls_over_udp_ttl)
106 
107 static bool
108 dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
109 {
110 	return (misc3->outer_vxlan_gpe_vni ||
111 		misc3->outer_vxlan_gpe_next_protocol ||
112 		misc3->outer_vxlan_gpe_flags);
113 }
114 
115 static bool
116 dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
117 {
118 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
119 	       (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
120 }
121 
122 static bool
123 dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask,
124 			 struct mlx5dr_domain *dmn)
125 {
126 	return dr_mask_is_vxlan_gpe_set(&mask->misc3) &&
127 	       dr_matcher_supp_vxlan_gpe(&dmn->info.caps);
128 }
129 
130 static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
131 {
132 	return misc->geneve_vni ||
133 	       misc->geneve_oam ||
134 	       misc->geneve_protocol_type ||
135 	       misc->geneve_opt_len;
136 }
137 
138 static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
139 {
140 	return misc3->geneve_tlv_option_0_data;
141 }
142 
143 static bool
144 dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
145 {
146 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
147 	       (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
148 }
149 
150 static bool
151 dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
152 		      struct mlx5dr_domain *dmn)
153 {
154 	return dr_mask_is_tnl_geneve_set(&mask->misc) &&
155 	       dr_matcher_supp_tnl_geneve(&dmn->info.caps);
156 }
157 
158 static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
159 {
160 	return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
161 }
162 
163 static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
164 {
165 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
166 }
167 
168 static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
169 				struct mlx5dr_domain *dmn)
170 {
171 	return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
172 	       dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
173 }
174 
175 static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
176 {
177 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
178 }
179 
180 static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
181 				     struct mlx5dr_domain *dmn)
182 {
183 	return mask->misc3.gtpu_dw_0 &&
184 	       dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
185 }
186 
187 static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
188 {
189 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
190 }
191 
192 static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
193 				     struct mlx5dr_domain *dmn)
194 {
195 	return mask->misc3.gtpu_teid &&
196 	       dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
197 }
198 
199 static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
200 {
201 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
202 }
203 
204 static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
205 				     struct mlx5dr_domain *dmn)
206 {
207 	return mask->misc3.gtpu_dw_2 &&
208 	       dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
209 }
210 
211 static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
212 {
213 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
214 }
215 
216 static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
217 					  struct mlx5dr_domain *dmn)
218 {
219 	return mask->misc3.gtpu_first_ext_dw_0 &&
220 	       dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
221 }
222 
223 static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
224 					      struct mlx5dr_domain *dmn)
225 {
226 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
227 
228 	return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
229 		dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
230 	       (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
231 		dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
232 	       (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
233 		dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
234 	       (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
235 		dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
236 }
237 
238 static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
239 					      struct mlx5dr_domain *dmn)
240 {
241 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
242 
243 	return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
244 		dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
245 	       (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
246 		dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
247 	       (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
248 		dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
249 	       (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
250 		dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
251 }
252 
253 static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
254 				    struct mlx5dr_domain *dmn)
255 {
256 	return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
257 	       dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
258 	       dr_mask_is_tnl_gtpu(mask, dmn);
259 }
260 
261 static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
262 {
263 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
264 	       (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
265 }
266 
267 static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
268 {
269 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
270 	       (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
271 }
272 
273 static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
274 {
275 	return (misc3->icmpv6_type || misc3->icmpv6_code ||
276 		misc3->icmpv6_header_data);
277 }
278 
279 static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask,
280 			    struct mlx5dr_domain *dmn)
281 {
282 	if (DR_MASK_IS_ICMPV4_SET(&mask->misc3))
283 		return dr_matcher_supp_icmp_v4(&dmn->info.caps);
284 	else if (dr_mask_is_icmpv6_set(&mask->misc3))
285 		return dr_matcher_supp_icmp_v6(&dmn->info.caps);
286 
287 	return false;
288 }
289 
290 static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
291 {
292 	return misc2->metadata_reg_a;
293 }
294 
295 static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2)
296 {
297 	return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 ||
298 		misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3);
299 }
300 
301 static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2)
302 {
303 	return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 ||
304 		misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7);
305 }
306 
307 static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
308 {
309 	return (misc->source_sqn || misc->source_port);
310 }
311 
312 static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
313 					      u32 flex_parser_value)
314 {
315 	if (flex_parser_id)
316 		return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
317 
318 	/* Using flex_parser 0 means that id is zero, thus value must be set. */
319 	return flex_parser_value;
320 }
321 
322 static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
323 {
324 	return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
325 						  misc4->prog_sample_field_value_0) ||
326 		dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
327 						  misc4->prog_sample_field_value_1) ||
328 		dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
329 						  misc4->prog_sample_field_value_2) ||
330 		dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
331 						  misc4->prog_sample_field_value_3));
332 }
333 
334 static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
335 {
336 	return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
337 	       flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
338 }
339 
340 static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
341 {
342 	return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
343 		dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
344 		dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
345 		dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
346 }
347 
348 static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
349 {
350 	return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
351 }
352 
353 static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
354 					 struct mlx5dr_domain *dmn)
355 {
356 	return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
357 	       dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
358 }
359 
360 static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
361 {
362 	return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
363 }
364 
365 static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
366 					 struct mlx5dr_domain *dmn)
367 {
368 	return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
369 	       dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
370 }
371 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
372 				   struct mlx5dr_matcher_rx_tx *nic_matcher,
373 				   enum mlx5dr_ipv outer_ipv,
374 				   enum mlx5dr_ipv inner_ipv)
375 {
376 	nic_matcher->ste_builder =
377 		nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
378 	nic_matcher->num_of_builders =
379 		nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
380 
381 	if (!nic_matcher->num_of_builders) {
382 		mlx5dr_dbg(matcher->tbl->dmn,
383 			   "Rule not supported on this matcher due to IP related fields\n");
384 		return -EINVAL;
385 	}
386 
387 	return 0;
388 }
389 
390 static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
391 				       struct mlx5dr_matcher_rx_tx *nic_matcher,
392 				       enum mlx5dr_ipv outer_ipv,
393 				       enum mlx5dr_ipv inner_ipv)
394 {
395 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
396 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
397 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
398 	struct mlx5dr_match_param mask = {};
399 	bool allow_empty_match = false;
400 	struct mlx5dr_ste_build *sb;
401 	bool inner, rx;
402 	int idx = 0;
403 	int ret, i;
404 
405 	sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
406 	rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
407 
408 	/* Create a temporary mask to track and clear used mask fields */
409 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
410 		mask.outer = matcher->mask.outer;
411 
412 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC)
413 		mask.misc = matcher->mask.misc;
414 
415 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER)
416 		mask.inner = matcher->mask.inner;
417 
418 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2)
419 		mask.misc2 = matcher->mask.misc2;
420 
421 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
422 		mask.misc3 = matcher->mask.misc3;
423 
424 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
425 		mask.misc4 = matcher->mask.misc4;
426 
427 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
428 		mask.misc5 = matcher->mask.misc5;
429 
430 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
431 					 &matcher->mask, NULL);
432 	if (ret)
433 		return ret;
434 
435 	/* Optimize RX pipe by reducing source port match, since
436 	 * the FDB RX part is connected only to the wire.
437 	 */
438 	if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
439 	    rx && mask.misc.source_port) {
440 		mask.misc.source_port = 0;
441 		mask.misc.source_eswitch_owner_vhca_id = 0;
442 		allow_empty_match = true;
443 	}
444 
445 	/* Outer */
446 	if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
447 				       DR_MATCHER_CRITERIA_MISC |
448 				       DR_MATCHER_CRITERIA_MISC2 |
449 				       DR_MATCHER_CRITERIA_MISC3)) {
450 		inner = false;
451 
452 		if (dr_mask_is_wqe_metadata_set(&mask.misc2))
453 			mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
454 							 &mask, inner, rx);
455 
456 		if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
457 			mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
458 						    &mask, inner, rx);
459 
460 		if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
461 			mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
462 						    &mask, inner, rx);
463 
464 		if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
465 		    (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
466 		     dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
467 			mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
468 						      &mask, dmn, inner, rx);
469 		}
470 
471 		if (dr_mask_is_smac_set(&mask.outer) &&
472 		    dr_mask_is_dmac_set(&mask.outer)) {
473 			mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
474 							&mask, inner, rx);
475 		}
476 
477 		if (dr_mask_is_smac_set(&mask.outer))
478 			mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
479 						    &mask, inner, rx);
480 
481 		if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
482 			mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
483 						    &mask, inner, rx);
484 
485 		if (outer_ipv == DR_RULE_IPV6) {
486 			if (dr_mask_is_dst_addr_set(&mask.outer))
487 				mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
488 								 &mask, inner, rx);
489 
490 			if (dr_mask_is_src_addr_set(&mask.outer))
491 				mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
492 								 &mask, inner, rx);
493 
494 			if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
495 				mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
496 								&mask, inner, rx);
497 		} else {
498 			if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
499 				mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
500 								     &mask, inner, rx);
501 
502 			if (dr_mask_is_ttl_set(&mask.outer))
503 				mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
504 								  &mask, inner, rx);
505 		}
506 
507 		if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
508 			mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
509 						       &mask, inner, rx);
510 		else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
511 			mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
512 						    &mask, inner, rx);
513 			if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
514 				mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
515 								    &mask, &dmn->info.caps,
516 								    inner, rx);
517 		} else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
518 			if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
519 				mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
520 									&mask, &dmn->info.caps,
521 									inner, rx);
522 
523 			if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
524 				mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
525 									&mask, &dmn->info.caps,
526 									inner, rx);
527 
528 			if (dr_mask_is_tnl_gtpu(&mask, dmn))
529 				mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
530 							  &mask, inner, rx);
531 		}
532 
533 		if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
534 			mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
535 						     &mask, inner, rx);
536 
537 		if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
538 			mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
539 					      &mask, inner, rx);
540 
541 		if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
542 			mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
543 							   &mask, &dmn->info.caps,
544 							   inner, rx);
545 		else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
546 			mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
547 							   &mask, &dmn->info.caps,
548 							   inner, rx);
549 
550 		if (dr_mask_is_icmp(&mask, dmn))
551 			mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
552 					      &mask, &dmn->info.caps,
553 					      inner, rx);
554 
555 		if (dr_mask_is_tnl_gre_set(&mask.misc))
556 			mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
557 						 &mask, inner, rx);
558 	}
559 
560 	/* Inner */
561 	if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER |
562 				       DR_MATCHER_CRITERIA_MISC |
563 				       DR_MATCHER_CRITERIA_MISC2 |
564 				       DR_MATCHER_CRITERIA_MISC3)) {
565 		inner = true;
566 
567 		if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
568 			mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
569 						    &mask, inner, rx);
570 
571 		if (dr_mask_is_smac_set(&mask.inner) &&
572 		    dr_mask_is_dmac_set(&mask.inner)) {
573 			mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
574 							&mask, inner, rx);
575 		}
576 
577 		if (dr_mask_is_smac_set(&mask.inner))
578 			mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
579 						    &mask, inner, rx);
580 
581 		if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
582 			mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
583 						    &mask, inner, rx);
584 
585 		if (inner_ipv == DR_RULE_IPV6) {
586 			if (dr_mask_is_dst_addr_set(&mask.inner))
587 				mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
588 								 &mask, inner, rx);
589 
590 			if (dr_mask_is_src_addr_set(&mask.inner))
591 				mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
592 								 &mask, inner, rx);
593 
594 			if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
595 				mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
596 								&mask, inner, rx);
597 		} else {
598 			if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
599 				mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
600 								     &mask, inner, rx);
601 
602 			if (dr_mask_is_ttl_set(&mask.inner))
603 				mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
604 								  &mask, inner, rx);
605 		}
606 
607 		if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
608 			mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
609 						     &mask, inner, rx);
610 
611 		if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
612 			mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
613 					      &mask, inner, rx);
614 
615 		if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
616 			mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
617 							   &mask, &dmn->info.caps,
618 							   inner, rx);
619 		else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
620 			mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
621 							   &mask, &dmn->info.caps,
622 							   inner, rx);
623 	}
624 
625 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
626 		if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
627 			mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
628 						       &mask, false, rx);
629 
630 		if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
631 			mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
632 						       &mask, false, rx);
633 	}
634 
635 	/* Empty matcher, takes all */
636 	if ((!idx && allow_empty_match) ||
637 	    matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
638 		mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
639 
640 	if (idx == 0) {
641 		mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
642 		return -EINVAL;
643 	}
644 
645 	/* Check that all mask fields were consumed */
646 	for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
647 		if (((u8 *)&mask)[i] != 0) {
648 			mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n");
649 			return -EOPNOTSUPP;
650 		}
651 	}
652 
653 	nic_matcher->ste_builder = sb;
654 	nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
655 
656 	return 0;
657 }
658 
659 static int dr_matcher_connect(struct mlx5dr_domain *dmn,
660 			      struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
661 			      struct mlx5dr_matcher_rx_tx *next_nic_matcher,
662 			      struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
663 {
664 	struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
665 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
666 	struct mlx5dr_htbl_connect_info info;
667 	struct mlx5dr_ste_htbl *prev_htbl;
668 	int ret;
669 
670 	/* Connect end anchor hash table to next_htbl or to the default address */
671 	if (next_nic_matcher) {
672 		info.type = CONNECT_HIT;
673 		info.hit_next_htbl = next_nic_matcher->s_htbl;
674 	} else {
675 		info.type = CONNECT_MISS;
676 		info.miss_icm_addr = nic_tbl->default_icm_addr;
677 	}
678 	ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
679 						curr_nic_matcher->e_anchor,
680 						&info, info.type == CONNECT_HIT);
681 	if (ret)
682 		return ret;
683 
684 	/* Connect start hash table to end anchor */
685 	info.type = CONNECT_MISS;
686 	info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
687 	ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
688 						curr_nic_matcher->s_htbl,
689 						&info, false);
690 	if (ret)
691 		return ret;
692 
693 	/* Connect previous hash table to matcher start hash table */
694 	if (prev_nic_matcher)
695 		prev_htbl = prev_nic_matcher->e_anchor;
696 	else
697 		prev_htbl = nic_tbl->s_anchor;
698 
699 	info.type = CONNECT_HIT;
700 	info.hit_next_htbl = curr_nic_matcher->s_htbl;
701 	ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl,
702 						&info, true);
703 	if (ret)
704 		return ret;
705 
706 	/* Update the pointing ste and next hash table */
707 	curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
708 	prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
709 
710 	if (next_nic_matcher) {
711 		next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
712 		curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
713 	}
714 
715 	return 0;
716 }
717 
718 static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
719 {
720 	struct mlx5dr_matcher *next_matcher, *prev_matcher, *tmp_matcher;
721 	struct mlx5dr_table *tbl = matcher->tbl;
722 	struct mlx5dr_domain *dmn = tbl->dmn;
723 	bool first = true;
724 	int ret;
725 
726 	next_matcher = NULL;
727 	list_for_each_entry(tmp_matcher, &tbl->matcher_list, list_node) {
728 		if (tmp_matcher->prio >= matcher->prio) {
729 			next_matcher = tmp_matcher;
730 			break;
731 		}
732 		first = false;
733 	}
734 
735 	prev_matcher = NULL;
736 	if (next_matcher && !first)
737 		prev_matcher = list_prev_entry(next_matcher, list_node);
738 	else if (!first)
739 		prev_matcher = list_last_entry(&tbl->matcher_list,
740 					       struct mlx5dr_matcher,
741 					       list_node);
742 
743 	if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
744 	    dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
745 		ret = dr_matcher_connect(dmn, &matcher->rx,
746 					 next_matcher ? &next_matcher->rx : NULL,
747 					 prev_matcher ?	&prev_matcher->rx : NULL);
748 		if (ret)
749 			return ret;
750 	}
751 
752 	if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
753 	    dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
754 		ret = dr_matcher_connect(dmn, &matcher->tx,
755 					 next_matcher ? &next_matcher->tx : NULL,
756 					 prev_matcher ?	&prev_matcher->tx : NULL);
757 		if (ret)
758 			return ret;
759 	}
760 
761 	if (prev_matcher)
762 		list_add(&matcher->list_node, &prev_matcher->list_node);
763 	else if (next_matcher)
764 		list_add_tail(&matcher->list_node,
765 			      &next_matcher->list_node);
766 	else
767 		list_add(&matcher->list_node, &tbl->matcher_list);
768 
769 	return 0;
770 }
771 
772 static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
773 {
774 	mlx5dr_htbl_put(nic_matcher->s_htbl);
775 	mlx5dr_htbl_put(nic_matcher->e_anchor);
776 }
777 
778 static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher)
779 {
780 	dr_matcher_uninit_nic(&matcher->rx);
781 	dr_matcher_uninit_nic(&matcher->tx);
782 }
783 
784 static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
785 {
786 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
787 
788 	switch (dmn->type) {
789 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
790 		dr_matcher_uninit_nic(&matcher->rx);
791 		break;
792 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
793 		dr_matcher_uninit_nic(&matcher->tx);
794 		break;
795 	case MLX5DR_DOMAIN_TYPE_FDB:
796 		dr_matcher_uninit_fdb(matcher);
797 		break;
798 	default:
799 		WARN_ON(true);
800 		break;
801 	}
802 }
803 
804 static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
805 					   struct mlx5dr_matcher_rx_tx *nic_matcher)
806 {
807 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
808 
809 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
810 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
811 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
812 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
813 
814 	if (!nic_matcher->ste_builder) {
815 		mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
816 		return -EINVAL;
817 	}
818 
819 	return 0;
820 }
821 
822 static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
823 			       struct mlx5dr_matcher_rx_tx *nic_matcher)
824 {
825 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
826 	int ret;
827 
828 	ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
829 	if (ret)
830 		return ret;
831 
832 	nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
833 						      DR_CHUNK_SIZE_1,
834 						      MLX5DR_STE_LU_TYPE_DONT_CARE,
835 						      0);
836 	if (!nic_matcher->e_anchor)
837 		return -ENOMEM;
838 
839 	nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
840 						    DR_CHUNK_SIZE_1,
841 						    nic_matcher->ste_builder[0].lu_type,
842 						    nic_matcher->ste_builder[0].byte_mask);
843 	if (!nic_matcher->s_htbl) {
844 		ret = -ENOMEM;
845 		goto free_e_htbl;
846 	}
847 
848 	/* make sure the tables exist while empty */
849 	mlx5dr_htbl_get(nic_matcher->s_htbl);
850 	mlx5dr_htbl_get(nic_matcher->e_anchor);
851 
852 	return 0;
853 
854 free_e_htbl:
855 	mlx5dr_ste_htbl_free(nic_matcher->e_anchor);
856 	return ret;
857 }
858 
859 static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher)
860 {
861 	int ret;
862 
863 	ret = dr_matcher_init_nic(matcher, &matcher->rx);
864 	if (ret)
865 		return ret;
866 
867 	ret = dr_matcher_init_nic(matcher, &matcher->tx);
868 	if (ret)
869 		goto uninit_nic_rx;
870 
871 	return 0;
872 
873 uninit_nic_rx:
874 	dr_matcher_uninit_nic(&matcher->rx);
875 	return ret;
876 }
877 
878 static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
879 				 struct mlx5dr_match_parameters *mask)
880 {
881 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
882 	struct mlx5dr_match_parameters consumed_mask;
883 	int i, ret = 0;
884 
885 	if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
886 		mlx5dr_err(dmn, "Invalid match criteria attribute\n");
887 		return -EINVAL;
888 	}
889 
890 	if (mask) {
891 		if (mask->match_sz > DR_SZ_MATCH_PARAM) {
892 			mlx5dr_err(dmn, "Invalid match size attribute\n");
893 			return -EINVAL;
894 		}
895 
896 		consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL);
897 		if (!consumed_mask.match_buf)
898 			return -ENOMEM;
899 
900 		consumed_mask.match_sz = mask->match_sz;
901 		memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
902 		mlx5dr_ste_copy_param(matcher->match_criteria,
903 				      &matcher->mask, &consumed_mask, true);
904 
905 		/* Check that all mask data was consumed */
906 		for (i = 0; i < consumed_mask.match_sz; i++) {
907 			if (!((u8 *)consumed_mask.match_buf)[i])
908 				continue;
909 
910 			mlx5dr_dbg(dmn,
911 				   "Match param mask contains unsupported parameters\n");
912 			ret = -EOPNOTSUPP;
913 			break;
914 		}
915 
916 		kfree(consumed_mask.match_buf);
917 	}
918 
919 	return ret;
920 }
921 
922 static int dr_matcher_init(struct mlx5dr_matcher *matcher,
923 			   struct mlx5dr_match_parameters *mask)
924 {
925 	struct mlx5dr_table *tbl = matcher->tbl;
926 	struct mlx5dr_domain *dmn = tbl->dmn;
927 	int ret;
928 
929 	ret = dr_matcher_copy_param(matcher, mask);
930 	if (ret)
931 		return ret;
932 
933 	switch (dmn->type) {
934 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
935 		matcher->rx.nic_tbl = &tbl->rx;
936 		ret = dr_matcher_init_nic(matcher, &matcher->rx);
937 		break;
938 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
939 		matcher->tx.nic_tbl = &tbl->tx;
940 		ret = dr_matcher_init_nic(matcher, &matcher->tx);
941 		break;
942 	case MLX5DR_DOMAIN_TYPE_FDB:
943 		matcher->rx.nic_tbl = &tbl->rx;
944 		matcher->tx.nic_tbl = &tbl->tx;
945 		ret = dr_matcher_init_fdb(matcher);
946 		break;
947 	default:
948 		WARN_ON(true);
949 		ret = -EINVAL;
950 	}
951 
952 	return ret;
953 }
954 
955 struct mlx5dr_matcher *
956 mlx5dr_matcher_create(struct mlx5dr_table *tbl,
957 		      u32 priority,
958 		      u8 match_criteria_enable,
959 		      struct mlx5dr_match_parameters *mask)
960 {
961 	struct mlx5dr_matcher *matcher;
962 	int ret;
963 
964 	refcount_inc(&tbl->refcount);
965 
966 	matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
967 	if (!matcher)
968 		goto dec_ref;
969 
970 	matcher->tbl = tbl;
971 	matcher->prio = priority;
972 	matcher->match_criteria = match_criteria_enable;
973 	refcount_set(&matcher->refcount, 1);
974 	INIT_LIST_HEAD(&matcher->list_node);
975 	INIT_LIST_HEAD(&matcher->dbg_rule_list);
976 
977 	mlx5dr_domain_lock(tbl->dmn);
978 
979 	ret = dr_matcher_init(matcher, mask);
980 	if (ret)
981 		goto free_matcher;
982 
983 	ret = dr_matcher_add_to_tbl(matcher);
984 	if (ret)
985 		goto matcher_uninit;
986 
987 	mlx5dr_domain_unlock(tbl->dmn);
988 
989 	return matcher;
990 
991 matcher_uninit:
992 	dr_matcher_uninit(matcher);
993 free_matcher:
994 	mlx5dr_domain_unlock(tbl->dmn);
995 	kfree(matcher);
996 dec_ref:
997 	refcount_dec(&tbl->refcount);
998 	return NULL;
999 }
1000 
1001 static int dr_matcher_disconnect(struct mlx5dr_domain *dmn,
1002 				 struct mlx5dr_table_rx_tx *nic_tbl,
1003 				 struct mlx5dr_matcher_rx_tx *next_nic_matcher,
1004 				 struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
1005 {
1006 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
1007 	struct mlx5dr_htbl_connect_info info;
1008 	struct mlx5dr_ste_htbl *prev_anchor;
1009 
1010 	if (prev_nic_matcher)
1011 		prev_anchor = prev_nic_matcher->e_anchor;
1012 	else
1013 		prev_anchor = nic_tbl->s_anchor;
1014 
1015 	/* Connect previous anchor hash table to next matcher or to the default address */
1016 	if (next_nic_matcher) {
1017 		info.type = CONNECT_HIT;
1018 		info.hit_next_htbl = next_nic_matcher->s_htbl;
1019 		next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
1020 		prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
1021 	} else {
1022 		info.type = CONNECT_MISS;
1023 		info.miss_icm_addr = nic_tbl->default_icm_addr;
1024 		prev_anchor->ste_arr[0].next_htbl = NULL;
1025 	}
1026 
1027 	return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
1028 						 &info, true);
1029 }
1030 
1031 static int dr_matcher_remove_from_tbl(struct mlx5dr_matcher *matcher)
1032 {
1033 	struct mlx5dr_matcher *prev_matcher, *next_matcher;
1034 	struct mlx5dr_table *tbl = matcher->tbl;
1035 	struct mlx5dr_domain *dmn = tbl->dmn;
1036 	int ret = 0;
1037 
1038 	if (list_is_last(&matcher->list_node, &tbl->matcher_list))
1039 		next_matcher = NULL;
1040 	else
1041 		next_matcher = list_next_entry(matcher, list_node);
1042 
1043 	if (matcher->list_node.prev == &tbl->matcher_list)
1044 		prev_matcher = NULL;
1045 	else
1046 		prev_matcher = list_prev_entry(matcher, list_node);
1047 
1048 	if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
1049 	    dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
1050 		ret = dr_matcher_disconnect(dmn, &tbl->rx,
1051 					    next_matcher ? &next_matcher->rx : NULL,
1052 					    prev_matcher ? &prev_matcher->rx : NULL);
1053 		if (ret)
1054 			return ret;
1055 	}
1056 
1057 	if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
1058 	    dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
1059 		ret = dr_matcher_disconnect(dmn, &tbl->tx,
1060 					    next_matcher ? &next_matcher->tx : NULL,
1061 					    prev_matcher ? &prev_matcher->tx : NULL);
1062 		if (ret)
1063 			return ret;
1064 	}
1065 
1066 	list_del(&matcher->list_node);
1067 
1068 	return 0;
1069 }
1070 
1071 int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
1072 {
1073 	struct mlx5dr_table *tbl = matcher->tbl;
1074 
1075 	if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
1076 		return -EBUSY;
1077 
1078 	mlx5dr_domain_lock(tbl->dmn);
1079 
1080 	dr_matcher_remove_from_tbl(matcher);
1081 	dr_matcher_uninit(matcher);
1082 	refcount_dec(&matcher->tbl->refcount);
1083 
1084 	mlx5dr_domain_unlock(tbl->dmn);
1085 	kfree(matcher);
1086 
1087 	return 0;
1088 }
1089