1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
13 
14 #include "spectrum.h"
15 #include "core_acl_flex_keys.h"
16 
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 					 struct mlxsw_sp_acl_block *block,
19 					 struct mlxsw_sp_acl_rule_info *rulei,
20 					 struct flow_action *flow_action,
21 					 struct netlink_ext_ack *extack)
22 {
23 	const struct flow_action_entry *act;
24 	int mirror_act_count = 0;
25 	int err, i;
26 
27 	if (!flow_action_has_entries(flow_action))
28 		return 0;
29 	if (!flow_action_mixed_hw_stats_check(flow_action, extack))
30 		return -EOPNOTSUPP;
31 
32 	act = flow_action_first_entry_get(flow_action);
33 	if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY ||
34 	    act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) {
35 		/* Count action is inserted first */
36 		err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
37 		if (err)
38 			return err;
39 	} else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) {
40 		NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
41 		return -EOPNOTSUPP;
42 	}
43 
44 	flow_action_for_each(i, act, flow_action) {
45 		switch (act->id) {
46 		case FLOW_ACTION_ACCEPT:
47 			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
48 			if (err) {
49 				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
50 				return err;
51 			}
52 			break;
53 		case FLOW_ACTION_DROP: {
54 			bool ingress;
55 
56 			if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
57 				NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
58 				return -EOPNOTSUPP;
59 			}
60 			ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
61 			err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
62 							  act->cookie, extack);
63 			if (err) {
64 				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
65 				return err;
66 			}
67 
68 			/* Forbid block with this rulei to be bound
69 			 * to ingress/egress in future. Ingress rule is
70 			 * a blocker for egress and vice versa.
71 			 */
72 			if (ingress)
73 				rulei->egress_bind_blocker = 1;
74 			else
75 				rulei->ingress_bind_blocker = 1;
76 			}
77 			break;
78 		case FLOW_ACTION_TRAP:
79 			err = mlxsw_sp_acl_rulei_act_trap(rulei);
80 			if (err) {
81 				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
82 				return err;
83 			}
84 			break;
85 		case FLOW_ACTION_GOTO: {
86 			u32 chain_index = act->chain_index;
87 			struct mlxsw_sp_acl_ruleset *ruleset;
88 			u16 group_id;
89 
90 			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
91 							      chain_index,
92 							      MLXSW_SP_ACL_PROFILE_FLOWER);
93 			if (IS_ERR(ruleset))
94 				return PTR_ERR(ruleset);
95 
96 			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
97 			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
98 			if (err) {
99 				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
100 				return err;
101 			}
102 			}
103 			break;
104 		case FLOW_ACTION_REDIRECT: {
105 			struct net_device *out_dev;
106 			struct mlxsw_sp_fid *fid;
107 			u16 fid_index;
108 
109 			if (mlxsw_sp_acl_block_is_egress_bound(block)) {
110 				NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
111 				return -EOPNOTSUPP;
112 			}
113 
114 			/* Forbid block with this rulei to be bound
115 			 * to egress in future.
116 			 */
117 			rulei->egress_bind_blocker = 1;
118 
119 			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
120 			fid_index = mlxsw_sp_fid_index(fid);
121 			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
122 							     fid_index, extack);
123 			if (err)
124 				return err;
125 
126 			out_dev = act->dev;
127 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
128 							 out_dev, extack);
129 			if (err)
130 				return err;
131 			}
132 			break;
133 		case FLOW_ACTION_MIRRED: {
134 			struct net_device *out_dev = act->dev;
135 
136 			if (mirror_act_count++) {
137 				NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
138 				return -EOPNOTSUPP;
139 			}
140 
141 			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
142 							    block, out_dev,
143 							    extack);
144 			if (err)
145 				return err;
146 			}
147 			break;
148 		case FLOW_ACTION_VLAN_MANGLE: {
149 			u16 proto = be16_to_cpu(act->vlan.proto);
150 			u8 prio = act->vlan.prio;
151 			u16 vid = act->vlan.vid;
152 
153 			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
154 							   act->id, vid,
155 							   proto, prio, extack);
156 			}
157 		case FLOW_ACTION_PRIORITY:
158 			return mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
159 							       act->priority,
160 							       extack);
161 		case FLOW_ACTION_MANGLE: {
162 			enum flow_action_mangle_base htype = act->mangle.htype;
163 			__be32 be_mask = (__force __be32) act->mangle.mask;
164 			__be32 be_val = (__force __be32) act->mangle.val;
165 			u32 offset = act->mangle.offset;
166 			u32 mask = be32_to_cpu(be_mask);
167 			u32 val = be32_to_cpu(be_val);
168 
169 			err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
170 							    htype, offset,
171 							    mask, val, extack);
172 			if (err)
173 				return err;
174 			break;
175 			}
176 		default:
177 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
178 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
179 			return -EOPNOTSUPP;
180 		}
181 	}
182 	return 0;
183 }
184 
185 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
186 				      struct flow_cls_offload *f,
187 				      struct mlxsw_sp_acl_block *block)
188 {
189 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
190 	struct mlxsw_sp_port *mlxsw_sp_port;
191 	struct net_device *ingress_dev;
192 	struct flow_match_meta match;
193 
194 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
195 		return 0;
196 
197 	flow_rule_match_meta(rule, &match);
198 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
199 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
200 		return -EINVAL;
201 	}
202 
203 	ingress_dev = __dev_get_by_index(block->net,
204 					 match.key->ingress_ifindex);
205 	if (!ingress_dev) {
206 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
207 		return -EINVAL;
208 	}
209 
210 	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
211 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
212 		return -EINVAL;
213 	}
214 
215 	mlxsw_sp_port = netdev_priv(ingress_dev);
216 	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
217 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
218 		return -EINVAL;
219 	}
220 
221 	mlxsw_sp_acl_rulei_keymask_u32(rulei,
222 				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
223 				       mlxsw_sp_port->local_port,
224 				       0xFFFFFFFF);
225 	return 0;
226 }
227 
228 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
229 				       struct flow_cls_offload *f)
230 {
231 	struct flow_match_ipv4_addrs match;
232 
233 	flow_rule_match_ipv4_addrs(f->rule, &match);
234 
235 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
236 				       (char *) &match.key->src,
237 				       (char *) &match.mask->src, 4);
238 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
239 				       (char *) &match.key->dst,
240 				       (char *) &match.mask->dst, 4);
241 }
242 
243 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
244 				       struct flow_cls_offload *f)
245 {
246 	struct flow_match_ipv6_addrs match;
247 
248 	flow_rule_match_ipv6_addrs(f->rule, &match);
249 
250 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
251 				       &match.key->src.s6_addr[0x0],
252 				       &match.mask->src.s6_addr[0x0], 4);
253 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
254 				       &match.key->src.s6_addr[0x4],
255 				       &match.mask->src.s6_addr[0x4], 4);
256 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
257 				       &match.key->src.s6_addr[0x8],
258 				       &match.mask->src.s6_addr[0x8], 4);
259 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
260 				       &match.key->src.s6_addr[0xC],
261 				       &match.mask->src.s6_addr[0xC], 4);
262 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
263 				       &match.key->dst.s6_addr[0x0],
264 				       &match.mask->dst.s6_addr[0x0], 4);
265 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
266 				       &match.key->dst.s6_addr[0x4],
267 				       &match.mask->dst.s6_addr[0x4], 4);
268 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
269 				       &match.key->dst.s6_addr[0x8],
270 				       &match.mask->dst.s6_addr[0x8], 4);
271 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
272 				       &match.key->dst.s6_addr[0xC],
273 				       &match.mask->dst.s6_addr[0xC], 4);
274 }
275 
276 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
277 				       struct mlxsw_sp_acl_rule_info *rulei,
278 				       struct flow_cls_offload *f,
279 				       u8 ip_proto)
280 {
281 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
282 	struct flow_match_ports match;
283 
284 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
285 		return 0;
286 
287 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
288 		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
289 		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
290 		return -EINVAL;
291 	}
292 
293 	flow_rule_match_ports(rule, &match);
294 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
295 				       ntohs(match.key->dst),
296 				       ntohs(match.mask->dst));
297 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
298 				       ntohs(match.key->src),
299 				       ntohs(match.mask->src));
300 	return 0;
301 }
302 
303 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
304 				     struct mlxsw_sp_acl_rule_info *rulei,
305 				     struct flow_cls_offload *f,
306 				     u8 ip_proto)
307 {
308 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
309 	struct flow_match_tcp match;
310 
311 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
312 		return 0;
313 
314 	if (ip_proto != IPPROTO_TCP) {
315 		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
316 		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
317 		return -EINVAL;
318 	}
319 
320 	flow_rule_match_tcp(rule, &match);
321 
322 	if (match.mask->flags & htons(0x0E00)) {
323 		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
324 		dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
325 		return -EINVAL;
326 	}
327 
328 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
329 				       ntohs(match.key->flags),
330 				       ntohs(match.mask->flags));
331 	return 0;
332 }
333 
334 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
335 				    struct mlxsw_sp_acl_rule_info *rulei,
336 				    struct flow_cls_offload *f,
337 				    u16 n_proto)
338 {
339 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
340 	struct flow_match_ip match;
341 
342 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
343 		return 0;
344 
345 	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
346 		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
347 		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
348 		return -EINVAL;
349 	}
350 
351 	flow_rule_match_ip(rule, &match);
352 
353 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
354 				       match.key->ttl, match.mask->ttl);
355 
356 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
357 				       match.key->tos & 0x3,
358 				       match.mask->tos & 0x3);
359 
360 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
361 				       match.key->tos >> 2,
362 				       match.mask->tos >> 2);
363 
364 	return 0;
365 }
366 
367 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
368 				 struct mlxsw_sp_acl_block *block,
369 				 struct mlxsw_sp_acl_rule_info *rulei,
370 				 struct flow_cls_offload *f)
371 {
372 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
373 	struct flow_dissector *dissector = rule->match.dissector;
374 	u16 n_proto_mask = 0;
375 	u16 n_proto_key = 0;
376 	u16 addr_type = 0;
377 	u8 ip_proto = 0;
378 	int err;
379 
380 	if (dissector->used_keys &
381 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
382 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
383 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
384 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
385 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
386 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
387 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
388 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
389 	      BIT(FLOW_DISSECTOR_KEY_IP) |
390 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
391 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
392 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
393 		return -EOPNOTSUPP;
394 	}
395 
396 	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
397 
398 	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
399 	if (err)
400 		return err;
401 
402 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
403 		struct flow_match_control match;
404 
405 		flow_rule_match_control(rule, &match);
406 		addr_type = match.key->addr_type;
407 	}
408 
409 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
410 		struct flow_match_basic match;
411 
412 		flow_rule_match_basic(rule, &match);
413 		n_proto_key = ntohs(match.key->n_proto);
414 		n_proto_mask = ntohs(match.mask->n_proto);
415 
416 		if (n_proto_key == ETH_P_ALL) {
417 			n_proto_key = 0;
418 			n_proto_mask = 0;
419 		}
420 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
421 					       MLXSW_AFK_ELEMENT_ETHERTYPE,
422 					       n_proto_key, n_proto_mask);
423 
424 		ip_proto = match.key->ip_proto;
425 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
426 					       MLXSW_AFK_ELEMENT_IP_PROTO,
427 					       match.key->ip_proto,
428 					       match.mask->ip_proto);
429 	}
430 
431 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
432 		struct flow_match_eth_addrs match;
433 
434 		flow_rule_match_eth_addrs(rule, &match);
435 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
436 					       MLXSW_AFK_ELEMENT_DMAC_32_47,
437 					       match.key->dst,
438 					       match.mask->dst, 2);
439 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
440 					       MLXSW_AFK_ELEMENT_DMAC_0_31,
441 					       match.key->dst + 2,
442 					       match.mask->dst + 2, 4);
443 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
444 					       MLXSW_AFK_ELEMENT_SMAC_32_47,
445 					       match.key->src,
446 					       match.mask->src, 2);
447 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
448 					       MLXSW_AFK_ELEMENT_SMAC_0_31,
449 					       match.key->src + 2,
450 					       match.mask->src + 2, 4);
451 	}
452 
453 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
454 		struct flow_match_vlan match;
455 
456 		flow_rule_match_vlan(rule, &match);
457 		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
458 			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
459 			return -EOPNOTSUPP;
460 		}
461 
462 		/* Forbid block with this rulei to be bound
463 		 * to egress in future.
464 		 */
465 		rulei->egress_bind_blocker = 1;
466 
467 		if (match.mask->vlan_id != 0)
468 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
469 						       MLXSW_AFK_ELEMENT_VID,
470 						       match.key->vlan_id,
471 						       match.mask->vlan_id);
472 		if (match.mask->vlan_priority != 0)
473 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
474 						       MLXSW_AFK_ELEMENT_PCP,
475 						       match.key->vlan_priority,
476 						       match.mask->vlan_priority);
477 	}
478 
479 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
480 		mlxsw_sp_flower_parse_ipv4(rulei, f);
481 
482 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
483 		mlxsw_sp_flower_parse_ipv6(rulei, f);
484 
485 	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
486 	if (err)
487 		return err;
488 	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
489 	if (err)
490 		return err;
491 
492 	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
493 	if (err)
494 		return err;
495 
496 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
497 					     &f->rule->action,
498 					     f->common.extack);
499 }
500 
501 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
502 			    struct mlxsw_sp_acl_block *block,
503 			    struct flow_cls_offload *f)
504 {
505 	struct mlxsw_sp_acl_rule_info *rulei;
506 	struct mlxsw_sp_acl_ruleset *ruleset;
507 	struct mlxsw_sp_acl_rule *rule;
508 	int err;
509 
510 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
511 					   f->common.chain_index,
512 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
513 	if (IS_ERR(ruleset))
514 		return PTR_ERR(ruleset);
515 
516 	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
517 					f->common.extack);
518 	if (IS_ERR(rule)) {
519 		err = PTR_ERR(rule);
520 		goto err_rule_create;
521 	}
522 
523 	rulei = mlxsw_sp_acl_rule_rulei(rule);
524 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
525 	if (err)
526 		goto err_flower_parse;
527 
528 	err = mlxsw_sp_acl_rulei_commit(rulei);
529 	if (err)
530 		goto err_rulei_commit;
531 
532 	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
533 	if (err)
534 		goto err_rule_add;
535 
536 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
537 	return 0;
538 
539 err_rule_add:
540 err_rulei_commit:
541 err_flower_parse:
542 	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
543 err_rule_create:
544 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
545 	return err;
546 }
547 
548 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
549 			     struct mlxsw_sp_acl_block *block,
550 			     struct flow_cls_offload *f)
551 {
552 	struct mlxsw_sp_acl_ruleset *ruleset;
553 	struct mlxsw_sp_acl_rule *rule;
554 
555 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
556 					   f->common.chain_index,
557 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
558 	if (IS_ERR(ruleset))
559 		return;
560 
561 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
562 	if (rule) {
563 		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
564 		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
565 	}
566 
567 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
568 }
569 
570 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
571 			  struct mlxsw_sp_acl_block *block,
572 			  struct flow_cls_offload *f)
573 {
574 	enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
575 	struct mlxsw_sp_acl_ruleset *ruleset;
576 	struct mlxsw_sp_acl_rule *rule;
577 	u64 packets;
578 	u64 lastuse;
579 	u64 bytes;
580 	int err;
581 
582 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
583 					   f->common.chain_index,
584 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
585 	if (WARN_ON(IS_ERR(ruleset)))
586 		return -EINVAL;
587 
588 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
589 	if (!rule)
590 		return -EINVAL;
591 
592 	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
593 					  &lastuse, &used_hw_stats);
594 	if (err)
595 		goto err_rule_get_stats;
596 
597 	flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats);
598 
599 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
600 	return 0;
601 
602 err_rule_get_stats:
603 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
604 	return err;
605 }
606 
607 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
608 				 struct mlxsw_sp_acl_block *block,
609 				 struct flow_cls_offload *f)
610 {
611 	struct mlxsw_sp_acl_ruleset *ruleset;
612 	struct mlxsw_sp_acl_rule_info rulei;
613 	int err;
614 
615 	memset(&rulei, 0, sizeof(rulei));
616 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
617 	if (err)
618 		return err;
619 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
620 					   f->common.chain_index,
621 					   MLXSW_SP_ACL_PROFILE_FLOWER,
622 					   &rulei.values.elusage);
623 
624 	/* keep the reference to the ruleset */
625 	return PTR_ERR_OR_ZERO(ruleset);
626 }
627 
628 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
629 				   struct mlxsw_sp_acl_block *block,
630 				   struct flow_cls_offload *f)
631 {
632 	struct mlxsw_sp_acl_ruleset *ruleset;
633 
634 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
635 					   f->common.chain_index,
636 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
637 	if (IS_ERR(ruleset))
638 		return;
639 	/* put the reference to the ruleset kept in create */
640 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
641 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
642 }
643