1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
13 
14 #include "spectrum.h"
15 #include "core_acl_flex_keys.h"
16 
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 					 struct mlxsw_sp_acl_block *block,
19 					 struct mlxsw_sp_acl_rule_info *rulei,
20 					 struct tcf_exts *exts,
21 					 struct netlink_ext_ack *extack)
22 {
23 	const struct tc_action *a;
24 	LIST_HEAD(actions);
25 	int err;
26 
27 	if (!tcf_exts_has_actions(exts))
28 		return 0;
29 
30 	/* Count action is inserted first */
31 	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
32 	if (err)
33 		return err;
34 
35 	tcf_exts_to_list(exts, &actions);
36 	list_for_each_entry(a, &actions, list) {
37 		if (is_tcf_gact_ok(a)) {
38 			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 			if (err) {
40 				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
41 				return err;
42 			}
43 		} else if (is_tcf_gact_shot(a)) {
44 			err = mlxsw_sp_acl_rulei_act_drop(rulei);
45 			if (err) {
46 				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
47 				return err;
48 			}
49 		} else if (is_tcf_gact_trap(a)) {
50 			err = mlxsw_sp_acl_rulei_act_trap(rulei);
51 			if (err) {
52 				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
53 				return err;
54 			}
55 		} else if (is_tcf_gact_goto_chain(a)) {
56 			u32 chain_index = tcf_gact_goto_chain_index(a);
57 			struct mlxsw_sp_acl_ruleset *ruleset;
58 			u16 group_id;
59 
60 			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
61 							      chain_index,
62 							      MLXSW_SP_ACL_PROFILE_FLOWER);
63 			if (IS_ERR(ruleset))
64 				return PTR_ERR(ruleset);
65 
66 			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
67 			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
68 			if (err) {
69 				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
70 				return err;
71 			}
72 		} else if (is_tcf_mirred_egress_redirect(a)) {
73 			struct net_device *out_dev;
74 			struct mlxsw_sp_fid *fid;
75 			u16 fid_index;
76 
77 			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
78 			fid_index = mlxsw_sp_fid_index(fid);
79 			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
80 							     fid_index, extack);
81 			if (err)
82 				return err;
83 
84 			out_dev = tcf_mirred_dev(a);
85 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
86 							 out_dev, extack);
87 			if (err)
88 				return err;
89 		} else if (is_tcf_mirred_egress_mirror(a)) {
90 			struct net_device *out_dev = tcf_mirred_dev(a);
91 
92 			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
93 							    block, out_dev,
94 							    extack);
95 			if (err)
96 				return err;
97 		} else if (is_tcf_vlan(a)) {
98 			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
99 			u32 action = tcf_vlan_action(a);
100 			u8 prio = tcf_vlan_push_prio(a);
101 			u16 vid = tcf_vlan_push_vid(a);
102 
103 			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
104 							   action, vid,
105 							   proto, prio, extack);
106 		} else {
107 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
108 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
109 			return -EOPNOTSUPP;
110 		}
111 	}
112 	return 0;
113 }
114 
115 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
116 				       struct tc_cls_flower_offload *f)
117 {
118 	struct flow_dissector_key_ipv4_addrs *key =
119 		skb_flow_dissector_target(f->dissector,
120 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
121 					  f->key);
122 	struct flow_dissector_key_ipv4_addrs *mask =
123 		skb_flow_dissector_target(f->dissector,
124 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
125 					  f->mask);
126 
127 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
128 				       (char *) &key->src,
129 				       (char *) &mask->src, 4);
130 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
131 				       (char *) &key->dst,
132 				       (char *) &mask->dst, 4);
133 }
134 
135 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
136 				       struct tc_cls_flower_offload *f)
137 {
138 	struct flow_dissector_key_ipv6_addrs *key =
139 		skb_flow_dissector_target(f->dissector,
140 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
141 					  f->key);
142 	struct flow_dissector_key_ipv6_addrs *mask =
143 		skb_flow_dissector_target(f->dissector,
144 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
145 					  f->mask);
146 
147 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
148 				       &key->src.s6_addr[0x0],
149 				       &mask->src.s6_addr[0x0], 4);
150 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
151 				       &key->src.s6_addr[0x4],
152 				       &mask->src.s6_addr[0x4], 4);
153 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
154 				       &key->src.s6_addr[0x8],
155 				       &mask->src.s6_addr[0x8], 4);
156 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
157 				       &key->src.s6_addr[0xC],
158 				       &mask->src.s6_addr[0xC], 4);
159 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
160 				       &key->dst.s6_addr[0x0],
161 				       &mask->dst.s6_addr[0x0], 4);
162 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
163 				       &key->dst.s6_addr[0x4],
164 				       &mask->dst.s6_addr[0x4], 4);
165 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
166 				       &key->dst.s6_addr[0x8],
167 				       &mask->dst.s6_addr[0x8], 4);
168 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
169 				       &key->dst.s6_addr[0xC],
170 				       &mask->dst.s6_addr[0xC], 4);
171 }
172 
173 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
174 				       struct mlxsw_sp_acl_rule_info *rulei,
175 				       struct tc_cls_flower_offload *f,
176 				       u8 ip_proto)
177 {
178 	struct flow_dissector_key_ports *key, *mask;
179 
180 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
181 		return 0;
182 
183 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
184 		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
185 		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
186 		return -EINVAL;
187 	}
188 
189 	key = skb_flow_dissector_target(f->dissector,
190 					FLOW_DISSECTOR_KEY_PORTS,
191 					f->key);
192 	mask = skb_flow_dissector_target(f->dissector,
193 					 FLOW_DISSECTOR_KEY_PORTS,
194 					 f->mask);
195 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
196 				       ntohs(key->dst), ntohs(mask->dst));
197 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
198 				       ntohs(key->src), ntohs(mask->src));
199 	return 0;
200 }
201 
202 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
203 				     struct mlxsw_sp_acl_rule_info *rulei,
204 				     struct tc_cls_flower_offload *f,
205 				     u8 ip_proto)
206 {
207 	struct flow_dissector_key_tcp *key, *mask;
208 
209 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
210 		return 0;
211 
212 	if (ip_proto != IPPROTO_TCP) {
213 		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
214 		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
215 		return -EINVAL;
216 	}
217 
218 	key = skb_flow_dissector_target(f->dissector,
219 					FLOW_DISSECTOR_KEY_TCP,
220 					f->key);
221 	mask = skb_flow_dissector_target(f->dissector,
222 					 FLOW_DISSECTOR_KEY_TCP,
223 					 f->mask);
224 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
225 				       ntohs(key->flags), ntohs(mask->flags));
226 	return 0;
227 }
228 
229 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
230 				    struct mlxsw_sp_acl_rule_info *rulei,
231 				    struct tc_cls_flower_offload *f,
232 				    u16 n_proto)
233 {
234 	struct flow_dissector_key_ip *key, *mask;
235 
236 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
237 		return 0;
238 
239 	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
240 		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
241 		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
242 		return -EINVAL;
243 	}
244 
245 	key = skb_flow_dissector_target(f->dissector,
246 					FLOW_DISSECTOR_KEY_IP,
247 					f->key);
248 	mask = skb_flow_dissector_target(f->dissector,
249 					 FLOW_DISSECTOR_KEY_IP,
250 					 f->mask);
251 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
252 				       key->ttl, mask->ttl);
253 
254 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
255 				       key->tos & 0x3, mask->tos & 0x3);
256 
257 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
258 				       key->tos >> 6, mask->tos >> 6);
259 
260 	return 0;
261 }
262 
263 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
264 				 struct mlxsw_sp_acl_block *block,
265 				 struct mlxsw_sp_acl_rule_info *rulei,
266 				 struct tc_cls_flower_offload *f)
267 {
268 	u16 n_proto_mask = 0;
269 	u16 n_proto_key = 0;
270 	u16 addr_type = 0;
271 	u8 ip_proto = 0;
272 	int err;
273 
274 	if (f->dissector->used_keys &
275 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
276 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
277 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
278 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
279 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
280 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
281 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
282 	      BIT(FLOW_DISSECTOR_KEY_IP) |
283 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
284 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
285 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
286 		return -EOPNOTSUPP;
287 	}
288 
289 	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
290 
291 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
292 		struct flow_dissector_key_control *key =
293 			skb_flow_dissector_target(f->dissector,
294 						  FLOW_DISSECTOR_KEY_CONTROL,
295 						  f->key);
296 		addr_type = key->addr_type;
297 	}
298 
299 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
300 		struct flow_dissector_key_basic *key =
301 			skb_flow_dissector_target(f->dissector,
302 						  FLOW_DISSECTOR_KEY_BASIC,
303 						  f->key);
304 		struct flow_dissector_key_basic *mask =
305 			skb_flow_dissector_target(f->dissector,
306 						  FLOW_DISSECTOR_KEY_BASIC,
307 						  f->mask);
308 		n_proto_key = ntohs(key->n_proto);
309 		n_proto_mask = ntohs(mask->n_proto);
310 
311 		if (n_proto_key == ETH_P_ALL) {
312 			n_proto_key = 0;
313 			n_proto_mask = 0;
314 		}
315 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
316 					       MLXSW_AFK_ELEMENT_ETHERTYPE,
317 					       n_proto_key, n_proto_mask);
318 
319 		ip_proto = key->ip_proto;
320 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
321 					       MLXSW_AFK_ELEMENT_IP_PROTO,
322 					       key->ip_proto, mask->ip_proto);
323 	}
324 
325 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
326 		struct flow_dissector_key_eth_addrs *key =
327 			skb_flow_dissector_target(f->dissector,
328 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
329 						  f->key);
330 		struct flow_dissector_key_eth_addrs *mask =
331 			skb_flow_dissector_target(f->dissector,
332 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
333 						  f->mask);
334 
335 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
336 					       MLXSW_AFK_ELEMENT_DMAC_32_47,
337 					       key->dst, mask->dst, 2);
338 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
339 					       MLXSW_AFK_ELEMENT_DMAC_0_31,
340 					       key->dst + 2, mask->dst + 2, 4);
341 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
342 					       MLXSW_AFK_ELEMENT_SMAC_32_47,
343 					       key->src, mask->src, 2);
344 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
345 					       MLXSW_AFK_ELEMENT_SMAC_0_31,
346 					       key->src + 2, mask->src + 2, 4);
347 	}
348 
349 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
350 		struct flow_dissector_key_vlan *key =
351 			skb_flow_dissector_target(f->dissector,
352 						  FLOW_DISSECTOR_KEY_VLAN,
353 						  f->key);
354 		struct flow_dissector_key_vlan *mask =
355 			skb_flow_dissector_target(f->dissector,
356 						  FLOW_DISSECTOR_KEY_VLAN,
357 						  f->mask);
358 
359 		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
360 			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
361 			return -EOPNOTSUPP;
362 		}
363 		if (mask->vlan_id != 0)
364 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
365 						       MLXSW_AFK_ELEMENT_VID,
366 						       key->vlan_id,
367 						       mask->vlan_id);
368 		if (mask->vlan_priority != 0)
369 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
370 						       MLXSW_AFK_ELEMENT_PCP,
371 						       key->vlan_priority,
372 						       mask->vlan_priority);
373 	}
374 
375 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
376 		mlxsw_sp_flower_parse_ipv4(rulei, f);
377 
378 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
379 		mlxsw_sp_flower_parse_ipv6(rulei, f);
380 
381 	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
382 	if (err)
383 		return err;
384 	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
385 	if (err)
386 		return err;
387 
388 	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
389 	if (err)
390 		return err;
391 
392 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
393 					     f->common.extack);
394 }
395 
396 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
397 			    struct mlxsw_sp_acl_block *block,
398 			    struct tc_cls_flower_offload *f)
399 {
400 	struct mlxsw_sp_acl_rule_info *rulei;
401 	struct mlxsw_sp_acl_ruleset *ruleset;
402 	struct mlxsw_sp_acl_rule *rule;
403 	int err;
404 
405 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
406 					   f->common.chain_index,
407 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
408 	if (IS_ERR(ruleset))
409 		return PTR_ERR(ruleset);
410 
411 	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
412 					f->common.extack);
413 	if (IS_ERR(rule)) {
414 		err = PTR_ERR(rule);
415 		goto err_rule_create;
416 	}
417 
418 	rulei = mlxsw_sp_acl_rule_rulei(rule);
419 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
420 	if (err)
421 		goto err_flower_parse;
422 
423 	err = mlxsw_sp_acl_rulei_commit(rulei);
424 	if (err)
425 		goto err_rulei_commit;
426 
427 	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
428 	if (err)
429 		goto err_rule_add;
430 
431 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
432 	return 0;
433 
434 err_rule_add:
435 err_rulei_commit:
436 err_flower_parse:
437 	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
438 err_rule_create:
439 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
440 	return err;
441 }
442 
443 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
444 			     struct mlxsw_sp_acl_block *block,
445 			     struct tc_cls_flower_offload *f)
446 {
447 	struct mlxsw_sp_acl_ruleset *ruleset;
448 	struct mlxsw_sp_acl_rule *rule;
449 
450 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
451 					   f->common.chain_index,
452 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
453 	if (IS_ERR(ruleset))
454 		return;
455 
456 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
457 	if (rule) {
458 		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
459 		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
460 	}
461 
462 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
463 }
464 
465 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
466 			  struct mlxsw_sp_acl_block *block,
467 			  struct tc_cls_flower_offload *f)
468 {
469 	struct mlxsw_sp_acl_ruleset *ruleset;
470 	struct mlxsw_sp_acl_rule *rule;
471 	u64 packets;
472 	u64 lastuse;
473 	u64 bytes;
474 	int err;
475 
476 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
477 					   f->common.chain_index,
478 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
479 	if (WARN_ON(IS_ERR(ruleset)))
480 		return -EINVAL;
481 
482 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
483 	if (!rule)
484 		return -EINVAL;
485 
486 	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
487 					  &lastuse);
488 	if (err)
489 		goto err_rule_get_stats;
490 
491 	tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
492 
493 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
494 	return 0;
495 
496 err_rule_get_stats:
497 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
498 	return err;
499 }
500 
501 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
502 				 struct mlxsw_sp_acl_block *block,
503 				 struct tc_cls_flower_offload *f)
504 {
505 	struct mlxsw_sp_acl_ruleset *ruleset;
506 	struct mlxsw_sp_acl_rule_info rulei;
507 	int err;
508 
509 	memset(&rulei, 0, sizeof(rulei));
510 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
511 	if (err)
512 		return err;
513 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
514 					   f->common.chain_index,
515 					   MLXSW_SP_ACL_PROFILE_FLOWER,
516 					   &rulei.values.elusage);
517 
518 	/* keep the reference to the ruleset */
519 	return PTR_ERR_OR_ZERO(ruleset);
520 }
521 
522 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
523 				   struct mlxsw_sp_acl_block *block,
524 				   struct tc_cls_flower_offload *f)
525 {
526 	struct mlxsw_sp_acl_ruleset *ruleset;
527 
528 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
529 					   f->common.chain_index,
530 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
531 	if (IS_ERR(ruleset))
532 		return;
533 	/* put the reference to the ruleset kept in create */
534 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
535 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
536 }
537