xref: /openbmc/linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c (revision a0ae2562c6c4b2721d9fddba63b7286c13517d9f)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/net_namespace.h>
39 #include <net/flow_dissector.h>
40 #include <net/pkt_cls.h>
41 #include <net/tc_act/tc_gact.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 
45 #include "spectrum.h"
46 #include "core_acl_flex_keys.h"
47 
48 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
49 					 struct mlxsw_sp_acl_block *block,
50 					 struct mlxsw_sp_acl_rule_info *rulei,
51 					 struct tcf_exts *exts)
52 {
53 	const struct tc_action *a;
54 	LIST_HEAD(actions);
55 	int err;
56 
57 	if (!tcf_exts_has_actions(exts))
58 		return 0;
59 
60 	/* Count action is inserted first */
61 	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
62 	if (err)
63 		return err;
64 
65 	tcf_exts_to_list(exts, &actions);
66 	list_for_each_entry(a, &actions, list) {
67 		if (is_tcf_gact_ok(a)) {
68 			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
69 			if (err)
70 				return err;
71 		} else if (is_tcf_gact_shot(a)) {
72 			err = mlxsw_sp_acl_rulei_act_drop(rulei);
73 			if (err)
74 				return err;
75 		} else if (is_tcf_gact_trap(a)) {
76 			err = mlxsw_sp_acl_rulei_act_trap(rulei);
77 			if (err)
78 				return err;
79 		} else if (is_tcf_gact_goto_chain(a)) {
80 			u32 chain_index = tcf_gact_goto_chain_index(a);
81 			struct mlxsw_sp_acl_ruleset *ruleset;
82 			u16 group_id;
83 
84 			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
85 							      chain_index,
86 							      MLXSW_SP_ACL_PROFILE_FLOWER);
87 			if (IS_ERR(ruleset))
88 				return PTR_ERR(ruleset);
89 
90 			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
91 			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
92 			if (err)
93 				return err;
94 		} else if (is_tcf_mirred_egress_redirect(a)) {
95 			struct net_device *out_dev;
96 			struct mlxsw_sp_fid *fid;
97 			u16 fid_index;
98 
99 			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
100 			fid_index = mlxsw_sp_fid_index(fid);
101 			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
102 							     fid_index);
103 			if (err)
104 				return err;
105 
106 			out_dev = tcf_mirred_dev(a);
107 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
108 							 out_dev);
109 			if (err)
110 				return err;
111 		} else if (is_tcf_mirred_egress_mirror(a)) {
112 			struct net_device *out_dev = tcf_mirred_dev(a);
113 
114 			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
115 							    block, out_dev);
116 			if (err)
117 				return err;
118 		} else if (is_tcf_vlan(a)) {
119 			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
120 			u32 action = tcf_vlan_action(a);
121 			u8 prio = tcf_vlan_push_prio(a);
122 			u16 vid = tcf_vlan_push_vid(a);
123 
124 			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
125 							   action, vid,
126 							   proto, prio);
127 		} else {
128 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
129 			return -EOPNOTSUPP;
130 		}
131 	}
132 	return 0;
133 }
134 
135 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
136 				       struct tc_cls_flower_offload *f)
137 {
138 	struct flow_dissector_key_ipv4_addrs *key =
139 		skb_flow_dissector_target(f->dissector,
140 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
141 					  f->key);
142 	struct flow_dissector_key_ipv4_addrs *mask =
143 		skb_flow_dissector_target(f->dissector,
144 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
145 					  f->mask);
146 
147 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
148 				       (char *) &key->src,
149 				       (char *) &mask->src, 4);
150 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
151 				       (char *) &key->dst,
152 				       (char *) &mask->dst, 4);
153 }
154 
155 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
156 				       struct tc_cls_flower_offload *f)
157 {
158 	struct flow_dissector_key_ipv6_addrs *key =
159 		skb_flow_dissector_target(f->dissector,
160 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
161 					  f->key);
162 	struct flow_dissector_key_ipv6_addrs *mask =
163 		skb_flow_dissector_target(f->dissector,
164 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
165 					  f->mask);
166 
167 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
168 				       &key->src.s6_addr[0x0],
169 				       &mask->src.s6_addr[0x0], 4);
170 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
171 				       &key->src.s6_addr[0x4],
172 				       &mask->src.s6_addr[0x4], 4);
173 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
174 				       &key->src.s6_addr[0x8],
175 				       &mask->src.s6_addr[0x8], 4);
176 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
177 				       &key->src.s6_addr[0xC],
178 				       &mask->src.s6_addr[0xC], 4);
179 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
180 				       &key->dst.s6_addr[0x0],
181 				       &mask->dst.s6_addr[0x0], 4);
182 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
183 				       &key->dst.s6_addr[0x4],
184 				       &mask->dst.s6_addr[0x4], 4);
185 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
186 				       &key->dst.s6_addr[0x8],
187 				       &mask->dst.s6_addr[0x8], 4);
188 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
189 				       &key->dst.s6_addr[0xC],
190 				       &mask->dst.s6_addr[0xC], 4);
191 }
192 
193 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
194 				       struct mlxsw_sp_acl_rule_info *rulei,
195 				       struct tc_cls_flower_offload *f,
196 				       u8 ip_proto)
197 {
198 	struct flow_dissector_key_ports *key, *mask;
199 
200 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
201 		return 0;
202 
203 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
204 		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
205 		return -EINVAL;
206 	}
207 
208 	key = skb_flow_dissector_target(f->dissector,
209 					FLOW_DISSECTOR_KEY_PORTS,
210 					f->key);
211 	mask = skb_flow_dissector_target(f->dissector,
212 					 FLOW_DISSECTOR_KEY_PORTS,
213 					 f->mask);
214 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
215 				       ntohs(key->dst), ntohs(mask->dst));
216 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
217 				       ntohs(key->src), ntohs(mask->src));
218 	return 0;
219 }
220 
221 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
222 				     struct mlxsw_sp_acl_rule_info *rulei,
223 				     struct tc_cls_flower_offload *f,
224 				     u8 ip_proto)
225 {
226 	struct flow_dissector_key_tcp *key, *mask;
227 
228 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
229 		return 0;
230 
231 	if (ip_proto != IPPROTO_TCP) {
232 		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
233 		return -EINVAL;
234 	}
235 
236 	key = skb_flow_dissector_target(f->dissector,
237 					FLOW_DISSECTOR_KEY_TCP,
238 					f->key);
239 	mask = skb_flow_dissector_target(f->dissector,
240 					 FLOW_DISSECTOR_KEY_TCP,
241 					 f->mask);
242 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
243 				       ntohs(key->flags), ntohs(mask->flags));
244 	return 0;
245 }
246 
247 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
248 				    struct mlxsw_sp_acl_rule_info *rulei,
249 				    struct tc_cls_flower_offload *f,
250 				    u16 n_proto)
251 {
252 	struct flow_dissector_key_ip *key, *mask;
253 
254 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
255 		return 0;
256 
257 	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
258 		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
259 		return -EINVAL;
260 	}
261 
262 	key = skb_flow_dissector_target(f->dissector,
263 					FLOW_DISSECTOR_KEY_IP,
264 					f->key);
265 	mask = skb_flow_dissector_target(f->dissector,
266 					 FLOW_DISSECTOR_KEY_IP,
267 					 f->mask);
268 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
269 				       key->ttl, mask->ttl);
270 
271 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
272 				       key->tos & 0x3, mask->tos & 0x3);
273 
274 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
275 				       key->tos >> 6, mask->tos >> 6);
276 
277 	return 0;
278 }
279 
280 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
281 				 struct mlxsw_sp_acl_block *block,
282 				 struct mlxsw_sp_acl_rule_info *rulei,
283 				 struct tc_cls_flower_offload *f)
284 {
285 	u16 n_proto_mask = 0;
286 	u16 n_proto_key = 0;
287 	u16 addr_type = 0;
288 	u8 ip_proto = 0;
289 	int err;
290 
291 	if (f->dissector->used_keys &
292 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
293 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
294 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
295 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
296 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
297 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
298 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
299 	      BIT(FLOW_DISSECTOR_KEY_IP) |
300 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
301 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
302 		return -EOPNOTSUPP;
303 	}
304 
305 	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
306 
307 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
308 		struct flow_dissector_key_control *key =
309 			skb_flow_dissector_target(f->dissector,
310 						  FLOW_DISSECTOR_KEY_CONTROL,
311 						  f->key);
312 		addr_type = key->addr_type;
313 	}
314 
315 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
316 		struct flow_dissector_key_basic *key =
317 			skb_flow_dissector_target(f->dissector,
318 						  FLOW_DISSECTOR_KEY_BASIC,
319 						  f->key);
320 		struct flow_dissector_key_basic *mask =
321 			skb_flow_dissector_target(f->dissector,
322 						  FLOW_DISSECTOR_KEY_BASIC,
323 						  f->mask);
324 		n_proto_key = ntohs(key->n_proto);
325 		n_proto_mask = ntohs(mask->n_proto);
326 
327 		if (n_proto_key == ETH_P_ALL) {
328 			n_proto_key = 0;
329 			n_proto_mask = 0;
330 		}
331 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
332 					       MLXSW_AFK_ELEMENT_ETHERTYPE,
333 					       n_proto_key, n_proto_mask);
334 
335 		ip_proto = key->ip_proto;
336 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
337 					       MLXSW_AFK_ELEMENT_IP_PROTO,
338 					       key->ip_proto, mask->ip_proto);
339 	}
340 
341 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
342 		struct flow_dissector_key_eth_addrs *key =
343 			skb_flow_dissector_target(f->dissector,
344 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
345 						  f->key);
346 		struct flow_dissector_key_eth_addrs *mask =
347 			skb_flow_dissector_target(f->dissector,
348 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
349 						  f->mask);
350 
351 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
352 					       MLXSW_AFK_ELEMENT_DMAC_32_47,
353 					       key->dst, mask->dst, 2);
354 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
355 					       MLXSW_AFK_ELEMENT_DMAC_0_31,
356 					       key->dst + 2, mask->dst + 2, 4);
357 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
358 					       MLXSW_AFK_ELEMENT_SMAC_32_47,
359 					       key->src, mask->src, 2);
360 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
361 					       MLXSW_AFK_ELEMENT_SMAC_0_31,
362 					       key->src + 2, mask->src + 2, 4);
363 	}
364 
365 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
366 		struct flow_dissector_key_vlan *key =
367 			skb_flow_dissector_target(f->dissector,
368 						  FLOW_DISSECTOR_KEY_VLAN,
369 						  f->key);
370 		struct flow_dissector_key_vlan *mask =
371 			skb_flow_dissector_target(f->dissector,
372 						  FLOW_DISSECTOR_KEY_VLAN,
373 						  f->mask);
374 		if (mask->vlan_id != 0)
375 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
376 						       MLXSW_AFK_ELEMENT_VID,
377 						       key->vlan_id,
378 						       mask->vlan_id);
379 		if (mask->vlan_priority != 0)
380 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
381 						       MLXSW_AFK_ELEMENT_PCP,
382 						       key->vlan_priority,
383 						       mask->vlan_priority);
384 	}
385 
386 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
387 		mlxsw_sp_flower_parse_ipv4(rulei, f);
388 
389 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
390 		mlxsw_sp_flower_parse_ipv6(rulei, f);
391 
392 	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
393 	if (err)
394 		return err;
395 	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
396 	if (err)
397 		return err;
398 
399 	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
400 	if (err)
401 		return err;
402 
403 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts);
404 }
405 
406 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
407 			    struct mlxsw_sp_acl_block *block,
408 			    struct tc_cls_flower_offload *f)
409 {
410 	struct mlxsw_sp_acl_rule_info *rulei;
411 	struct mlxsw_sp_acl_ruleset *ruleset;
412 	struct mlxsw_sp_acl_rule *rule;
413 	int err;
414 
415 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
416 					   f->common.chain_index,
417 					   MLXSW_SP_ACL_PROFILE_FLOWER);
418 	if (IS_ERR(ruleset))
419 		return PTR_ERR(ruleset);
420 
421 	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
422 	if (IS_ERR(rule)) {
423 		err = PTR_ERR(rule);
424 		goto err_rule_create;
425 	}
426 
427 	rulei = mlxsw_sp_acl_rule_rulei(rule);
428 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
429 	if (err)
430 		goto err_flower_parse;
431 
432 	err = mlxsw_sp_acl_rulei_commit(rulei);
433 	if (err)
434 		goto err_rulei_commit;
435 
436 	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
437 	if (err)
438 		goto err_rule_add;
439 
440 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
441 	return 0;
442 
443 err_rule_add:
444 err_rulei_commit:
445 err_flower_parse:
446 	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
447 err_rule_create:
448 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
449 	return err;
450 }
451 
452 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
453 			     struct mlxsw_sp_acl_block *block,
454 			     struct tc_cls_flower_offload *f)
455 {
456 	struct mlxsw_sp_acl_ruleset *ruleset;
457 	struct mlxsw_sp_acl_rule *rule;
458 
459 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
460 					   f->common.chain_index,
461 					   MLXSW_SP_ACL_PROFILE_FLOWER);
462 	if (IS_ERR(ruleset))
463 		return;
464 
465 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
466 	if (rule) {
467 		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
468 		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
469 	}
470 
471 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
472 }
473 
474 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
475 			  struct mlxsw_sp_acl_block *block,
476 			  struct tc_cls_flower_offload *f)
477 {
478 	struct mlxsw_sp_acl_ruleset *ruleset;
479 	struct mlxsw_sp_acl_rule *rule;
480 	u64 packets;
481 	u64 lastuse;
482 	u64 bytes;
483 	int err;
484 
485 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
486 					   f->common.chain_index,
487 					   MLXSW_SP_ACL_PROFILE_FLOWER);
488 	if (WARN_ON(IS_ERR(ruleset)))
489 		return -EINVAL;
490 
491 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
492 	if (!rule)
493 		return -EINVAL;
494 
495 	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
496 					  &lastuse);
497 	if (err)
498 		goto err_rule_get_stats;
499 
500 	tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
501 
502 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
503 	return 0;
504 
505 err_rule_get_stats:
506 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
507 	return err;
508 }
509