1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include "dr_types.h"
5 
6 static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec)
7 {
8 	return (spec->smac_47_16 || spec->smac_15_0);
9 }
10 
11 static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
12 {
13 	return (spec->dmac_47_16 || spec->dmac_15_0);
14 }
15 
16 static bool dr_mask_is_src_addr_set(struct mlx5dr_match_spec *spec)
17 {
18 	return (spec->src_ip_127_96 || spec->src_ip_95_64 ||
19 		spec->src_ip_63_32 || spec->src_ip_31_0);
20 }
21 
22 static bool dr_mask_is_dst_addr_set(struct mlx5dr_match_spec *spec)
23 {
24 	return (spec->dst_ip_127_96 || spec->dst_ip_95_64 ||
25 		spec->dst_ip_63_32 || spec->dst_ip_31_0);
26 }
27 
28 static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
29 {
30 	return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
31 		spec->ip_ecn || spec->ip_dscp);
32 }
33 
34 static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec)
35 {
36 	return (spec->tcp_sport || spec->tcp_dport ||
37 		spec->udp_sport || spec->udp_dport);
38 }
39 
40 static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec)
41 {
42 	return (spec->dst_ip_31_0 || spec->src_ip_31_0);
43 }
44 
45 static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec)
46 {
47 	return (dr_mask_is_l3_base_set(spec) ||
48 		dr_mask_is_tcp_udp_base_set(spec) ||
49 		dr_mask_is_ipv4_set(spec));
50 }
51 
52 static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc)
53 {
54 	return misc->vxlan_vni;
55 }
56 
57 static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
58 {
59 	return spec->ttl_hoplimit;
60 }
61 
62 #define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
63 	(_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
64 	(_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
65 	(_spec).ethertype || (_spec).ip_version || \
66 	(_misc)._inner_outer##_second_vid || \
67 	(_misc)._inner_outer##_second_cfi || \
68 	(_misc)._inner_outer##_second_prio || \
69 	(_misc)._inner_outer##_second_cvlan_tag || \
70 	(_misc)._inner_outer##_second_svlan_tag)
71 
72 #define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \
73 	dr_mask_is_l3_base_set(&(_spec)) || \
74 	dr_mask_is_tcp_udp_base_set(&(_spec)) || \
75 	dr_mask_is_ttl_set(&(_spec)) || \
76 	(_misc)._inner_outer##_ipv6_flow_label)
77 
78 #define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \
79 	(_misc3)._inner_outer##_tcp_seq_num || \
80 	(_misc3)._inner_outer##_tcp_ack_num)
81 
82 #define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \
83 	(_misc2)._inner_outer##_first_mpls_label || \
84 	(_misc2)._inner_outer##_first_mpls_exp || \
85 	(_misc2)._inner_outer##_first_mpls_s_bos || \
86 	(_misc2)._inner_outer##_first_mpls_ttl)
87 
88 static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
89 {
90 	return (misc->gre_key_h || misc->gre_key_l ||
91 		misc->gre_protocol || misc->gre_c_present ||
92 		misc->gre_k_present || misc->gre_s_present);
93 }
94 
95 #define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
96 	(_misc)->outer_first_mpls_over_gre_label || \
97 	(_misc)->outer_first_mpls_over_gre_exp || \
98 	(_misc)->outer_first_mpls_over_gre_s_bos || \
99 	(_misc)->outer_first_mpls_over_gre_ttl)
100 
101 #define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
102 	(_misc)->outer_first_mpls_over_udp_label || \
103 	(_misc)->outer_first_mpls_over_udp_exp || \
104 	(_misc)->outer_first_mpls_over_udp_s_bos || \
105 	(_misc)->outer_first_mpls_over_udp_ttl)
106 
107 static bool
108 dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
109 {
110 	return (misc3->outer_vxlan_gpe_vni ||
111 		misc3->outer_vxlan_gpe_next_protocol ||
112 		misc3->outer_vxlan_gpe_flags);
113 }
114 
115 static bool
116 dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
117 {
118 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
119 	       (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
120 }
121 
122 static bool
123 dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask,
124 			 struct mlx5dr_domain *dmn)
125 {
126 	return dr_mask_is_vxlan_gpe_set(&mask->misc3) &&
127 	       dr_matcher_supp_vxlan_gpe(&dmn->info.caps);
128 }
129 
130 static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
131 {
132 	return misc->geneve_vni ||
133 	       misc->geneve_oam ||
134 	       misc->geneve_protocol_type ||
135 	       misc->geneve_opt_len;
136 }
137 
138 static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
139 {
140 	return misc3->geneve_tlv_option_0_data;
141 }
142 
143 static bool
144 dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps)
145 {
146 	return caps->flex_parser_ok_bits_supp;
147 }
148 
149 static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc,
150 						    struct mlx5dr_domain *dmn)
151 {
152 	return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) &&
153 	       misc->geneve_tlv_option_0_exist;
154 }
155 
156 static bool
157 dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
158 {
159 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
160 	       (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
161 }
162 
163 static bool
164 dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
165 		      struct mlx5dr_domain *dmn)
166 {
167 	return dr_mask_is_tnl_geneve_set(&mask->misc) &&
168 	       dr_matcher_supp_tnl_geneve(&dmn->info.caps);
169 }
170 
171 static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
172 {
173 	return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
174 }
175 
176 static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
177 {
178 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
179 }
180 
181 static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
182 				struct mlx5dr_domain *dmn)
183 {
184 	return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
185 	       dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
186 }
187 
188 static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
189 {
190 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
191 }
192 
193 static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
194 				     struct mlx5dr_domain *dmn)
195 {
196 	return mask->misc3.gtpu_dw_0 &&
197 	       dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
198 }
199 
200 static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
201 {
202 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
203 }
204 
205 static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
206 				     struct mlx5dr_domain *dmn)
207 {
208 	return mask->misc3.gtpu_teid &&
209 	       dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
210 }
211 
212 static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
213 {
214 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
215 }
216 
217 static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
218 				     struct mlx5dr_domain *dmn)
219 {
220 	return mask->misc3.gtpu_dw_2 &&
221 	       dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
222 }
223 
224 static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
225 {
226 	return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
227 }
228 
229 static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
230 					  struct mlx5dr_domain *dmn)
231 {
232 	return mask->misc3.gtpu_first_ext_dw_0 &&
233 	       dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
234 }
235 
236 static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
237 					      struct mlx5dr_domain *dmn)
238 {
239 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
240 
241 	return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
242 		dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
243 	       (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
244 		dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
245 	       (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
246 		dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
247 	       (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
248 		dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
249 }
250 
251 static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
252 					      struct mlx5dr_domain *dmn)
253 {
254 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
255 
256 	return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
257 		dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
258 	       (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
259 		dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
260 	       (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
261 		dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
262 	       (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
263 		dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
264 }
265 
266 static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
267 				    struct mlx5dr_domain *dmn)
268 {
269 	return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
270 	       dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
271 	       dr_mask_is_tnl_gtpu(mask, dmn);
272 }
273 
274 static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
275 {
276 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
277 	       (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
278 }
279 
280 static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
281 {
282 	return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
283 	       (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
284 }
285 
286 static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
287 {
288 	return (misc3->icmpv6_type || misc3->icmpv6_code ||
289 		misc3->icmpv6_header_data);
290 }
291 
292 static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask,
293 			    struct mlx5dr_domain *dmn)
294 {
295 	if (DR_MASK_IS_ICMPV4_SET(&mask->misc3))
296 		return dr_matcher_supp_icmp_v4(&dmn->info.caps);
297 	else if (dr_mask_is_icmpv6_set(&mask->misc3))
298 		return dr_matcher_supp_icmp_v6(&dmn->info.caps);
299 
300 	return false;
301 }
302 
303 static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
304 {
305 	return misc2->metadata_reg_a;
306 }
307 
308 static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2)
309 {
310 	return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 ||
311 		misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3);
312 }
313 
314 static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2)
315 {
316 	return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 ||
317 		misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7);
318 }
319 
320 static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
321 {
322 	return (misc->source_sqn || misc->source_port);
323 }
324 
325 static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
326 					      u32 flex_parser_value)
327 {
328 	if (flex_parser_id)
329 		return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
330 
331 	/* Using flex_parser 0 means that id is zero, thus value must be set. */
332 	return flex_parser_value;
333 }
334 
335 static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
336 {
337 	return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
338 						  misc4->prog_sample_field_value_0) ||
339 		dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
340 						  misc4->prog_sample_field_value_1) ||
341 		dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
342 						  misc4->prog_sample_field_value_2) ||
343 		dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
344 						  misc4->prog_sample_field_value_3));
345 }
346 
347 static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
348 {
349 	return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
350 	       flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
351 }
352 
353 static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
354 {
355 	return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
356 		dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
357 		dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
358 		dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
359 }
360 
361 static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
362 {
363 	return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
364 }
365 
366 static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
367 					 struct mlx5dr_domain *dmn)
368 {
369 	return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
370 	       dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
371 }
372 
373 static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
374 {
375 	return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
376 }
377 
378 static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
379 					 struct mlx5dr_domain *dmn)
380 {
381 	return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
382 	       dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
383 }
384 
385 static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5)
386 {
387 	return misc5->tunnel_header_0 || misc5->tunnel_header_1;
388 }
389 
390 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
391 				   struct mlx5dr_matcher_rx_tx *nic_matcher,
392 				   enum mlx5dr_ipv outer_ipv,
393 				   enum mlx5dr_ipv inner_ipv)
394 {
395 	nic_matcher->ste_builder =
396 		nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
397 	nic_matcher->num_of_builders =
398 		nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
399 
400 	if (!nic_matcher->num_of_builders) {
401 		mlx5dr_dbg(matcher->tbl->dmn,
402 			   "Rule not supported on this matcher due to IP related fields\n");
403 		return -EINVAL;
404 	}
405 
406 	return 0;
407 }
408 
409 static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
410 				       struct mlx5dr_matcher_rx_tx *nic_matcher,
411 				       enum mlx5dr_ipv outer_ipv,
412 				       enum mlx5dr_ipv inner_ipv)
413 {
414 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
415 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
416 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
417 	struct mlx5dr_match_param mask = {};
418 	bool allow_empty_match = false;
419 	struct mlx5dr_ste_build *sb;
420 	bool inner, rx;
421 	int idx = 0;
422 	int ret, i;
423 
424 	sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
425 	rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
426 
427 	/* Create a temporary mask to track and clear used mask fields */
428 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
429 		mask.outer = matcher->mask.outer;
430 
431 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC)
432 		mask.misc = matcher->mask.misc;
433 
434 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER)
435 		mask.inner = matcher->mask.inner;
436 
437 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2)
438 		mask.misc2 = matcher->mask.misc2;
439 
440 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
441 		mask.misc3 = matcher->mask.misc3;
442 
443 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
444 		mask.misc4 = matcher->mask.misc4;
445 
446 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
447 		mask.misc5 = matcher->mask.misc5;
448 
449 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
450 					 &matcher->mask, NULL);
451 	if (ret)
452 		return ret;
453 
454 	/* Optimize RX pipe by reducing source port match, since
455 	 * the FDB RX part is connected only to the wire.
456 	 */
457 	if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
458 	    rx && mask.misc.source_port) {
459 		mask.misc.source_port = 0;
460 		mask.misc.source_eswitch_owner_vhca_id = 0;
461 		allow_empty_match = true;
462 	}
463 
464 	/* Outer */
465 	if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
466 				       DR_MATCHER_CRITERIA_MISC |
467 				       DR_MATCHER_CRITERIA_MISC2 |
468 				       DR_MATCHER_CRITERIA_MISC3 |
469 				       DR_MATCHER_CRITERIA_MISC5)) {
470 		inner = false;
471 
472 		if (dr_mask_is_wqe_metadata_set(&mask.misc2))
473 			mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
474 							 &mask, inner, rx);
475 
476 		if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
477 			mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
478 						    &mask, inner, rx);
479 
480 		if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
481 			mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
482 						    &mask, inner, rx);
483 
484 		if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
485 		    (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
486 		     dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
487 			mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
488 						      &mask, dmn, inner, rx);
489 		}
490 
491 		if (dr_mask_is_smac_set(&mask.outer) &&
492 		    dr_mask_is_dmac_set(&mask.outer)) {
493 			mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
494 							&mask, inner, rx);
495 		}
496 
497 		if (dr_mask_is_smac_set(&mask.outer))
498 			mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
499 						    &mask, inner, rx);
500 
501 		if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
502 			mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
503 						    &mask, inner, rx);
504 
505 		if (outer_ipv == DR_RULE_IPV6) {
506 			if (dr_mask_is_dst_addr_set(&mask.outer))
507 				mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
508 								 &mask, inner, rx);
509 
510 			if (dr_mask_is_src_addr_set(&mask.outer))
511 				mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
512 								 &mask, inner, rx);
513 
514 			if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
515 				mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
516 								&mask, inner, rx);
517 		} else {
518 			if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
519 				mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
520 								     &mask, inner, rx);
521 
522 			if (dr_mask_is_ttl_set(&mask.outer))
523 				mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
524 								  &mask, inner, rx);
525 		}
526 
527 		if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
528 			mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
529 						       &mask, inner, rx);
530 		else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
531 			mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
532 						    &mask, inner, rx);
533 			if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
534 				mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
535 								    &mask, &dmn->info.caps,
536 								    inner, rx);
537 			if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn))
538 				mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++],
539 									  &mask, &dmn->info.caps,
540 									  inner, rx);
541 		} else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
542 			if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
543 				mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
544 									&mask, &dmn->info.caps,
545 									inner, rx);
546 
547 			if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
548 				mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
549 									&mask, &dmn->info.caps,
550 									inner, rx);
551 
552 			if (dr_mask_is_tnl_gtpu(&mask, dmn))
553 				mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
554 							  &mask, inner, rx);
555 		} else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) {
556 			mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++],
557 							&mask, inner, rx);
558 		}
559 
560 		if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
561 			mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
562 						     &mask, inner, rx);
563 
564 		if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
565 			mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
566 					      &mask, inner, rx);
567 
568 		if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
569 			mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
570 							   &mask, &dmn->info.caps,
571 							   inner, rx);
572 		else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
573 			mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
574 							   &mask, &dmn->info.caps,
575 							   inner, rx);
576 
577 		if (dr_mask_is_icmp(&mask, dmn))
578 			mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
579 					      &mask, &dmn->info.caps,
580 					      inner, rx);
581 
582 		if (dr_mask_is_tnl_gre_set(&mask.misc))
583 			mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
584 						 &mask, inner, rx);
585 	}
586 
587 	/* Inner */
588 	if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER |
589 				       DR_MATCHER_CRITERIA_MISC |
590 				       DR_MATCHER_CRITERIA_MISC2 |
591 				       DR_MATCHER_CRITERIA_MISC3)) {
592 		inner = true;
593 
594 		if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
595 			mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
596 						    &mask, inner, rx);
597 
598 		if (dr_mask_is_smac_set(&mask.inner) &&
599 		    dr_mask_is_dmac_set(&mask.inner)) {
600 			mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
601 							&mask, inner, rx);
602 		}
603 
604 		if (dr_mask_is_smac_set(&mask.inner))
605 			mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
606 						    &mask, inner, rx);
607 
608 		if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
609 			mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
610 						    &mask, inner, rx);
611 
612 		if (inner_ipv == DR_RULE_IPV6) {
613 			if (dr_mask_is_dst_addr_set(&mask.inner))
614 				mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
615 								 &mask, inner, rx);
616 
617 			if (dr_mask_is_src_addr_set(&mask.inner))
618 				mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
619 								 &mask, inner, rx);
620 
621 			if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
622 				mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
623 								&mask, inner, rx);
624 		} else {
625 			if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
626 				mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
627 								     &mask, inner, rx);
628 
629 			if (dr_mask_is_ttl_set(&mask.inner))
630 				mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
631 								  &mask, inner, rx);
632 		}
633 
634 		if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
635 			mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
636 						     &mask, inner, rx);
637 
638 		if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
639 			mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
640 					      &mask, inner, rx);
641 
642 		if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
643 			mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
644 							   &mask, &dmn->info.caps,
645 							   inner, rx);
646 		else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
647 			mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
648 							   &mask, &dmn->info.caps,
649 							   inner, rx);
650 	}
651 
652 	if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
653 		if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
654 			mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
655 						       &mask, false, rx);
656 
657 		if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
658 			mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
659 						       &mask, false, rx);
660 	}
661 
662 	/* Empty matcher, takes all */
663 	if ((!idx && allow_empty_match) ||
664 	    matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
665 		mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
666 
667 	if (idx == 0) {
668 		mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
669 		return -EINVAL;
670 	}
671 
672 	/* Check that all mask fields were consumed */
673 	for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
674 		if (((u8 *)&mask)[i] != 0) {
675 			mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n");
676 			return -EOPNOTSUPP;
677 		}
678 	}
679 
680 	nic_matcher->ste_builder = sb;
681 	nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
682 
683 	return 0;
684 }
685 
686 static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
687 				  struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
688 				  struct mlx5dr_matcher_rx_tx *next_nic_matcher,
689 				  struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
690 {
691 	struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
692 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
693 	struct mlx5dr_htbl_connect_info info;
694 	struct mlx5dr_ste_htbl *prev_htbl;
695 	int ret;
696 
697 	/* Connect end anchor hash table to next_htbl or to the default address */
698 	if (next_nic_matcher) {
699 		info.type = CONNECT_HIT;
700 		info.hit_next_htbl = next_nic_matcher->s_htbl;
701 	} else {
702 		info.type = CONNECT_MISS;
703 		info.miss_icm_addr = nic_tbl->default_icm_addr;
704 	}
705 	ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
706 						curr_nic_matcher->e_anchor,
707 						&info, info.type == CONNECT_HIT);
708 	if (ret)
709 		return ret;
710 
711 	/* Connect start hash table to end anchor */
712 	info.type = CONNECT_MISS;
713 	info.miss_icm_addr = curr_nic_matcher->e_anchor->chunk->icm_addr;
714 	ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
715 						curr_nic_matcher->s_htbl,
716 						&info, false);
717 	if (ret)
718 		return ret;
719 
720 	/* Connect previous hash table to matcher start hash table */
721 	if (prev_nic_matcher)
722 		prev_htbl = prev_nic_matcher->e_anchor;
723 	else
724 		prev_htbl = nic_tbl->s_anchor;
725 
726 	info.type = CONNECT_HIT;
727 	info.hit_next_htbl = curr_nic_matcher->s_htbl;
728 	ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl,
729 						&info, true);
730 	if (ret)
731 		return ret;
732 
733 	/* Update the pointing ste and next hash table */
734 	curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->ste_arr;
735 	prev_htbl->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
736 
737 	if (next_nic_matcher) {
738 		next_nic_matcher->s_htbl->pointing_ste = curr_nic_matcher->e_anchor->ste_arr;
739 		curr_nic_matcher->e_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
740 	}
741 
742 	return 0;
743 }
744 
745 int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
746 				  struct mlx5dr_matcher_rx_tx *nic_matcher)
747 {
748 	struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher;
749 	struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
750 	bool first = true;
751 	int ret;
752 
753 	/* If the nic matcher is already on its parent nic table list,
754 	 * then it is already connected to the chain of nic matchers.
755 	 */
756 	if (!list_empty(&nic_matcher->list_node))
757 		return 0;
758 
759 	next_nic_matcher = NULL;
760 	list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) {
761 		if (tmp_nic_matcher->prio >= nic_matcher->prio) {
762 			next_nic_matcher = tmp_nic_matcher;
763 			break;
764 		}
765 		first = false;
766 	}
767 
768 	prev_nic_matcher = NULL;
769 	if (next_nic_matcher && !first)
770 		prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node);
771 	else if (!first)
772 		prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
773 						   struct mlx5dr_matcher_rx_tx,
774 						   list_node);
775 
776 	ret = dr_nic_matcher_connect(dmn, nic_matcher,
777 				     next_nic_matcher, prev_nic_matcher);
778 	if (ret)
779 		return ret;
780 
781 	if (prev_nic_matcher)
782 		list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node);
783 	else if (next_nic_matcher)
784 		list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node);
785 	else
786 		list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list);
787 
788 	return ret;
789 }
790 
791 static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
792 {
793 	mlx5dr_htbl_put(nic_matcher->s_htbl);
794 	mlx5dr_htbl_put(nic_matcher->e_anchor);
795 }
796 
797 static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher)
798 {
799 	dr_matcher_uninit_nic(&matcher->rx);
800 	dr_matcher_uninit_nic(&matcher->tx);
801 }
802 
803 static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
804 {
805 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
806 
807 	switch (dmn->type) {
808 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
809 		dr_matcher_uninit_nic(&matcher->rx);
810 		break;
811 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
812 		dr_matcher_uninit_nic(&matcher->tx);
813 		break;
814 	case MLX5DR_DOMAIN_TYPE_FDB:
815 		dr_matcher_uninit_fdb(matcher);
816 		break;
817 	default:
818 		WARN_ON(true);
819 		break;
820 	}
821 }
822 
823 static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
824 					   struct mlx5dr_matcher_rx_tx *nic_matcher)
825 {
826 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
827 
828 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
829 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
830 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
831 	dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
832 
833 	if (!nic_matcher->ste_builder) {
834 		mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
835 		return -EINVAL;
836 	}
837 
838 	return 0;
839 }
840 
841 static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
842 			       struct mlx5dr_matcher_rx_tx *nic_matcher)
843 {
844 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
845 	int ret;
846 
847 	nic_matcher->prio = matcher->prio;
848 	INIT_LIST_HEAD(&nic_matcher->list_node);
849 
850 	ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
851 	if (ret)
852 		return ret;
853 
854 	nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
855 						      DR_CHUNK_SIZE_1,
856 						      MLX5DR_STE_LU_TYPE_DONT_CARE,
857 						      0);
858 	if (!nic_matcher->e_anchor)
859 		return -ENOMEM;
860 
861 	nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
862 						    DR_CHUNK_SIZE_1,
863 						    nic_matcher->ste_builder[0].lu_type,
864 						    nic_matcher->ste_builder[0].byte_mask);
865 	if (!nic_matcher->s_htbl) {
866 		ret = -ENOMEM;
867 		goto free_e_htbl;
868 	}
869 
870 	/* make sure the tables exist while empty */
871 	mlx5dr_htbl_get(nic_matcher->s_htbl);
872 	mlx5dr_htbl_get(nic_matcher->e_anchor);
873 
874 	return 0;
875 
876 free_e_htbl:
877 	mlx5dr_ste_htbl_free(nic_matcher->e_anchor);
878 	return ret;
879 }
880 
881 static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher)
882 {
883 	int ret;
884 
885 	ret = dr_matcher_init_nic(matcher, &matcher->rx);
886 	if (ret)
887 		return ret;
888 
889 	ret = dr_matcher_init_nic(matcher, &matcher->tx);
890 	if (ret)
891 		goto uninit_nic_rx;
892 
893 	return 0;
894 
895 uninit_nic_rx:
896 	dr_matcher_uninit_nic(&matcher->rx);
897 	return ret;
898 }
899 
900 static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
901 				 struct mlx5dr_match_parameters *mask)
902 {
903 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
904 	struct mlx5dr_match_parameters consumed_mask;
905 	int i, ret = 0;
906 
907 	if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
908 		mlx5dr_err(dmn, "Invalid match criteria attribute\n");
909 		return -EINVAL;
910 	}
911 
912 	if (mask) {
913 		if (mask->match_sz > DR_SZ_MATCH_PARAM) {
914 			mlx5dr_err(dmn, "Invalid match size attribute\n");
915 			return -EINVAL;
916 		}
917 
918 		consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL);
919 		if (!consumed_mask.match_buf)
920 			return -ENOMEM;
921 
922 		consumed_mask.match_sz = mask->match_sz;
923 		memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
924 		mlx5dr_ste_copy_param(matcher->match_criteria,
925 				      &matcher->mask, &consumed_mask, true);
926 
927 		/* Check that all mask data was consumed */
928 		for (i = 0; i < consumed_mask.match_sz; i++) {
929 			if (!((u8 *)consumed_mask.match_buf)[i])
930 				continue;
931 
932 			mlx5dr_dbg(dmn,
933 				   "Match param mask contains unsupported parameters\n");
934 			ret = -EOPNOTSUPP;
935 			break;
936 		}
937 
938 		kfree(consumed_mask.match_buf);
939 	}
940 
941 	return ret;
942 }
943 
944 static int dr_matcher_init(struct mlx5dr_matcher *matcher,
945 			   struct mlx5dr_match_parameters *mask)
946 {
947 	struct mlx5dr_table *tbl = matcher->tbl;
948 	struct mlx5dr_domain *dmn = tbl->dmn;
949 	int ret;
950 
951 	ret = dr_matcher_copy_param(matcher, mask);
952 	if (ret)
953 		return ret;
954 
955 	switch (dmn->type) {
956 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
957 		matcher->rx.nic_tbl = &tbl->rx;
958 		ret = dr_matcher_init_nic(matcher, &matcher->rx);
959 		break;
960 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
961 		matcher->tx.nic_tbl = &tbl->tx;
962 		ret = dr_matcher_init_nic(matcher, &matcher->tx);
963 		break;
964 	case MLX5DR_DOMAIN_TYPE_FDB:
965 		matcher->rx.nic_tbl = &tbl->rx;
966 		matcher->tx.nic_tbl = &tbl->tx;
967 		ret = dr_matcher_init_fdb(matcher);
968 		break;
969 	default:
970 		WARN_ON(true);
971 		ret = -EINVAL;
972 	}
973 
974 	return ret;
975 }
976 
977 static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher)
978 {
979 	mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
980 	list_add(&matcher->list_node, &matcher->tbl->matcher_list);
981 	mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
982 }
983 
984 static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher)
985 {
986 	mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
987 	list_del(&matcher->list_node);
988 	mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
989 }
990 
991 struct mlx5dr_matcher *
992 mlx5dr_matcher_create(struct mlx5dr_table *tbl,
993 		      u32 priority,
994 		      u8 match_criteria_enable,
995 		      struct mlx5dr_match_parameters *mask)
996 {
997 	struct mlx5dr_matcher *matcher;
998 	int ret;
999 
1000 	refcount_inc(&tbl->refcount);
1001 
1002 	matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
1003 	if (!matcher)
1004 		goto dec_ref;
1005 
1006 	matcher->tbl = tbl;
1007 	matcher->prio = priority;
1008 	matcher->match_criteria = match_criteria_enable;
1009 	refcount_set(&matcher->refcount, 1);
1010 	INIT_LIST_HEAD(&matcher->list_node);
1011 	INIT_LIST_HEAD(&matcher->dbg_rule_list);
1012 
1013 	mlx5dr_domain_lock(tbl->dmn);
1014 
1015 	ret = dr_matcher_init(matcher, mask);
1016 	if (ret)
1017 		goto free_matcher;
1018 
1019 	dr_matcher_add_to_dbg_list(matcher);
1020 
1021 	mlx5dr_domain_unlock(tbl->dmn);
1022 
1023 	return matcher;
1024 
1025 free_matcher:
1026 	mlx5dr_domain_unlock(tbl->dmn);
1027 	kfree(matcher);
1028 dec_ref:
1029 	refcount_dec(&tbl->refcount);
1030 	return NULL;
1031 }
1032 
1033 static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
1034 				     struct mlx5dr_table_rx_tx *nic_tbl,
1035 				     struct mlx5dr_matcher_rx_tx *next_nic_matcher,
1036 				     struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
1037 {
1038 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
1039 	struct mlx5dr_htbl_connect_info info;
1040 	struct mlx5dr_ste_htbl *prev_anchor;
1041 
1042 	if (prev_nic_matcher)
1043 		prev_anchor = prev_nic_matcher->e_anchor;
1044 	else
1045 		prev_anchor = nic_tbl->s_anchor;
1046 
1047 	/* Connect previous anchor hash table to next matcher or to the default address */
1048 	if (next_nic_matcher) {
1049 		info.type = CONNECT_HIT;
1050 		info.hit_next_htbl = next_nic_matcher->s_htbl;
1051 		next_nic_matcher->s_htbl->pointing_ste = prev_anchor->ste_arr;
1052 		prev_anchor->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
1053 	} else {
1054 		info.type = CONNECT_MISS;
1055 		info.miss_icm_addr = nic_tbl->default_icm_addr;
1056 		prev_anchor->ste_arr[0].next_htbl = NULL;
1057 	}
1058 
1059 	return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
1060 						 &info, true);
1061 }
1062 
1063 int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
1064 				       struct mlx5dr_matcher_rx_tx *nic_matcher)
1065 {
1066 	struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher;
1067 	struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
1068 	int ret;
1069 
1070 	/* If the nic matcher is not on its parent nic table list,
1071 	 * then it is detached - no need to disconnect it.
1072 	 */
1073 	if (list_empty(&nic_matcher->list_node))
1074 		return 0;
1075 
1076 	if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list))
1077 		next_nic_matcher = NULL;
1078 	else
1079 		next_nic_matcher = list_next_entry(nic_matcher, list_node);
1080 
1081 	if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list)
1082 		prev_nic_matcher = NULL;
1083 	else
1084 		prev_nic_matcher = list_prev_entry(nic_matcher, list_node);
1085 
1086 	ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher);
1087 	if (ret)
1088 		return ret;
1089 
1090 	list_del_init(&nic_matcher->list_node);
1091 	return 0;
1092 }
1093 
1094 int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
1095 {
1096 	struct mlx5dr_table *tbl = matcher->tbl;
1097 
1098 	if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
1099 		return -EBUSY;
1100 
1101 	mlx5dr_domain_lock(tbl->dmn);
1102 
1103 	dr_matcher_remove_from_dbg_list(matcher);
1104 	dr_matcher_uninit(matcher);
1105 	refcount_dec(&matcher->tbl->refcount);
1106 
1107 	mlx5dr_domain_unlock(tbl->dmn);
1108 	kfree(matcher);
1109 
1110 	return 0;
1111 }
1112