1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <net/net_namespace.h> 8 #include <net/flow_dissector.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gact.h> 11 #include <net/tc_act/tc_mirred.h> 12 #include <net/tc_act/tc_vlan.h> 13 14 #include "spectrum.h" 15 #include "core_acl_flex_keys.h" 16 17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 18 struct mlxsw_sp_acl_block *block, 19 struct mlxsw_sp_acl_rule_info *rulei, 20 struct flow_action *flow_action, 21 struct netlink_ext_ack *extack) 22 { 23 const struct flow_action_entry *act; 24 int err, i; 25 26 if (!flow_action_has_entries(flow_action)) 27 return 0; 28 29 /* Count action is inserted first */ 30 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 31 if (err) 32 return err; 33 34 flow_action_for_each(i, act, flow_action) { 35 switch (act->id) { 36 case FLOW_ACTION_ACCEPT: 37 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 38 if (err) { 39 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 40 return err; 41 } 42 break; 43 case FLOW_ACTION_DROP: 44 err = mlxsw_sp_acl_rulei_act_drop(rulei); 45 if (err) { 46 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 47 return err; 48 } 49 break; 50 case FLOW_ACTION_TRAP: 51 err = mlxsw_sp_acl_rulei_act_trap(rulei); 52 if (err) { 53 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 54 return err; 55 } 56 break; 57 case FLOW_ACTION_GOTO: { 58 u32 chain_index = act->chain_index; 59 struct mlxsw_sp_acl_ruleset *ruleset; 60 u16 group_id; 61 62 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 63 chain_index, 64 MLXSW_SP_ACL_PROFILE_FLOWER); 65 if (IS_ERR(ruleset)) 66 return PTR_ERR(ruleset); 67 68 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 69 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 70 if (err) { 71 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 72 return err; 73 } 74 } 75 break; 76 case FLOW_ACTION_REDIRECT: { 77 struct net_device *out_dev; 78 struct mlxsw_sp_fid *fid; 79 u16 fid_index; 80 81 if (mlxsw_sp_acl_block_is_egress_bound(block)) { 82 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); 83 return -EOPNOTSUPP; 84 } 85 86 /* Forbid block with this rulei to be bound 87 * to egress in future. 88 */ 89 rulei->egress_bind_blocker = 1; 90 91 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 92 fid_index = mlxsw_sp_fid_index(fid); 93 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 94 fid_index, extack); 95 if (err) 96 return err; 97 98 out_dev = act->dev; 99 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 100 out_dev, extack); 101 if (err) 102 return err; 103 } 104 break; 105 case FLOW_ACTION_MIRRED: { 106 struct net_device *out_dev = act->dev; 107 108 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 109 block, out_dev, 110 extack); 111 if (err) 112 return err; 113 } 114 break; 115 case FLOW_ACTION_VLAN_MANGLE: { 116 u16 proto = be16_to_cpu(act->vlan.proto); 117 u8 prio = act->vlan.prio; 118 u16 vid = act->vlan.vid; 119 120 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 121 act->id, vid, 122 proto, prio, extack); 123 } 124 default: 125 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 126 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 127 return -EOPNOTSUPP; 128 } 129 } 130 return 0; 131 } 132 133 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, 134 struct flow_cls_offload *f, 135 struct mlxsw_sp_acl_block *block) 136 { 137 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 138 struct mlxsw_sp_port *mlxsw_sp_port; 139 struct net_device *ingress_dev; 140 struct flow_match_meta match; 141 142 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 143 return 0; 144 145 flow_rule_match_meta(rule, &match); 146 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 147 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); 148 return -EINVAL; 149 } 150 151 ingress_dev = __dev_get_by_index(block->net, 152 match.key->ingress_ifindex); 153 if (!ingress_dev) { 154 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); 155 return -EINVAL; 156 } 157 158 if (!mlxsw_sp_port_dev_check(ingress_dev)) { 159 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port"); 160 return -EINVAL; 161 } 162 163 mlxsw_sp_port = netdev_priv(ingress_dev); 164 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { 165 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device"); 166 return -EINVAL; 167 } 168 169 mlxsw_sp_acl_rulei_keymask_u32(rulei, 170 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 171 mlxsw_sp_port->local_port, 172 0xFFFFFFFF); 173 return 0; 174 } 175 176 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 177 struct flow_cls_offload *f) 178 { 179 struct flow_match_ipv4_addrs match; 180 181 flow_rule_match_ipv4_addrs(f->rule, &match); 182 183 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 184 (char *) &match.key->src, 185 (char *) &match.mask->src, 4); 186 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 187 (char *) &match.key->dst, 188 (char *) &match.mask->dst, 4); 189 } 190 191 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 192 struct flow_cls_offload *f) 193 { 194 struct flow_match_ipv6_addrs match; 195 196 flow_rule_match_ipv6_addrs(f->rule, &match); 197 198 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 199 &match.key->src.s6_addr[0x0], 200 &match.mask->src.s6_addr[0x0], 4); 201 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 202 &match.key->src.s6_addr[0x4], 203 &match.mask->src.s6_addr[0x4], 4); 204 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 205 &match.key->src.s6_addr[0x8], 206 &match.mask->src.s6_addr[0x8], 4); 207 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 208 &match.key->src.s6_addr[0xC], 209 &match.mask->src.s6_addr[0xC], 4); 210 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 211 &match.key->dst.s6_addr[0x0], 212 &match.mask->dst.s6_addr[0x0], 4); 213 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 214 &match.key->dst.s6_addr[0x4], 215 &match.mask->dst.s6_addr[0x4], 4); 216 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 217 &match.key->dst.s6_addr[0x8], 218 &match.mask->dst.s6_addr[0x8], 4); 219 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 220 &match.key->dst.s6_addr[0xC], 221 &match.mask->dst.s6_addr[0xC], 4); 222 } 223 224 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 225 struct mlxsw_sp_acl_rule_info *rulei, 226 struct flow_cls_offload *f, 227 u8 ip_proto) 228 { 229 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 230 struct flow_match_ports match; 231 232 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) 233 return 0; 234 235 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 236 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 237 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 238 return -EINVAL; 239 } 240 241 flow_rule_match_ports(rule, &match); 242 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 243 ntohs(match.key->dst), 244 ntohs(match.mask->dst)); 245 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 246 ntohs(match.key->src), 247 ntohs(match.mask->src)); 248 return 0; 249 } 250 251 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 252 struct mlxsw_sp_acl_rule_info *rulei, 253 struct flow_cls_offload *f, 254 u8 ip_proto) 255 { 256 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 257 struct flow_match_tcp match; 258 259 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) 260 return 0; 261 262 if (ip_proto != IPPROTO_TCP) { 263 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 264 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 265 return -EINVAL; 266 } 267 268 flow_rule_match_tcp(rule, &match); 269 270 if (match.mask->flags & htons(0x0E00)) { 271 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); 272 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); 273 return -EINVAL; 274 } 275 276 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 277 ntohs(match.key->flags), 278 ntohs(match.mask->flags)); 279 return 0; 280 } 281 282 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 283 struct mlxsw_sp_acl_rule_info *rulei, 284 struct flow_cls_offload *f, 285 u16 n_proto) 286 { 287 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 288 struct flow_match_ip match; 289 290 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) 291 return 0; 292 293 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 294 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 295 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 296 return -EINVAL; 297 } 298 299 flow_rule_match_ip(rule, &match); 300 301 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 302 match.key->ttl, match.mask->ttl); 303 304 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 305 match.key->tos & 0x3, 306 match.mask->tos & 0x3); 307 308 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 309 match.key->tos >> 2, 310 match.mask->tos >> 2); 311 312 return 0; 313 } 314 315 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 316 struct mlxsw_sp_acl_block *block, 317 struct mlxsw_sp_acl_rule_info *rulei, 318 struct flow_cls_offload *f) 319 { 320 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 321 struct flow_dissector *dissector = rule->match.dissector; 322 u16 n_proto_mask = 0; 323 u16 n_proto_key = 0; 324 u16 addr_type = 0; 325 u8 ip_proto = 0; 326 int err; 327 328 if (dissector->used_keys & 329 ~(BIT(FLOW_DISSECTOR_KEY_META) | 330 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 331 BIT(FLOW_DISSECTOR_KEY_BASIC) | 332 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 333 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 334 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 335 BIT(FLOW_DISSECTOR_KEY_PORTS) | 336 BIT(FLOW_DISSECTOR_KEY_TCP) | 337 BIT(FLOW_DISSECTOR_KEY_IP) | 338 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 339 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 340 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 341 return -EOPNOTSUPP; 342 } 343 344 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 345 346 err = mlxsw_sp_flower_parse_meta(rulei, f, block); 347 if (err) 348 return err; 349 350 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 351 struct flow_match_control match; 352 353 flow_rule_match_control(rule, &match); 354 addr_type = match.key->addr_type; 355 } 356 357 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 358 struct flow_match_basic match; 359 360 flow_rule_match_basic(rule, &match); 361 n_proto_key = ntohs(match.key->n_proto); 362 n_proto_mask = ntohs(match.mask->n_proto); 363 364 if (n_proto_key == ETH_P_ALL) { 365 n_proto_key = 0; 366 n_proto_mask = 0; 367 } 368 mlxsw_sp_acl_rulei_keymask_u32(rulei, 369 MLXSW_AFK_ELEMENT_ETHERTYPE, 370 n_proto_key, n_proto_mask); 371 372 ip_proto = match.key->ip_proto; 373 mlxsw_sp_acl_rulei_keymask_u32(rulei, 374 MLXSW_AFK_ELEMENT_IP_PROTO, 375 match.key->ip_proto, 376 match.mask->ip_proto); 377 } 378 379 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 380 struct flow_match_eth_addrs match; 381 382 flow_rule_match_eth_addrs(rule, &match); 383 mlxsw_sp_acl_rulei_keymask_buf(rulei, 384 MLXSW_AFK_ELEMENT_DMAC_32_47, 385 match.key->dst, 386 match.mask->dst, 2); 387 mlxsw_sp_acl_rulei_keymask_buf(rulei, 388 MLXSW_AFK_ELEMENT_DMAC_0_31, 389 match.key->dst + 2, 390 match.mask->dst + 2, 4); 391 mlxsw_sp_acl_rulei_keymask_buf(rulei, 392 MLXSW_AFK_ELEMENT_SMAC_32_47, 393 match.key->src, 394 match.mask->src, 2); 395 mlxsw_sp_acl_rulei_keymask_buf(rulei, 396 MLXSW_AFK_ELEMENT_SMAC_0_31, 397 match.key->src + 2, 398 match.mask->src + 2, 4); 399 } 400 401 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 402 struct flow_match_vlan match; 403 404 flow_rule_match_vlan(rule, &match); 405 if (mlxsw_sp_acl_block_is_egress_bound(block)) { 406 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); 407 return -EOPNOTSUPP; 408 } 409 410 /* Forbid block with this rulei to be bound 411 * to egress in future. 412 */ 413 rulei->egress_bind_blocker = 1; 414 415 if (match.mask->vlan_id != 0) 416 mlxsw_sp_acl_rulei_keymask_u32(rulei, 417 MLXSW_AFK_ELEMENT_VID, 418 match.key->vlan_id, 419 match.mask->vlan_id); 420 if (match.mask->vlan_priority != 0) 421 mlxsw_sp_acl_rulei_keymask_u32(rulei, 422 MLXSW_AFK_ELEMENT_PCP, 423 match.key->vlan_priority, 424 match.mask->vlan_priority); 425 } 426 427 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 428 mlxsw_sp_flower_parse_ipv4(rulei, f); 429 430 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 431 mlxsw_sp_flower_parse_ipv6(rulei, f); 432 433 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 434 if (err) 435 return err; 436 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 437 if (err) 438 return err; 439 440 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 441 if (err) 442 return err; 443 444 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, 445 &f->rule->action, 446 f->common.extack); 447 } 448 449 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 450 struct mlxsw_sp_acl_block *block, 451 struct flow_cls_offload *f) 452 { 453 struct mlxsw_sp_acl_rule_info *rulei; 454 struct mlxsw_sp_acl_ruleset *ruleset; 455 struct mlxsw_sp_acl_rule *rule; 456 int err; 457 458 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 459 f->common.chain_index, 460 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 461 if (IS_ERR(ruleset)) 462 return PTR_ERR(ruleset); 463 464 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, 465 f->common.extack); 466 if (IS_ERR(rule)) { 467 err = PTR_ERR(rule); 468 goto err_rule_create; 469 } 470 471 rulei = mlxsw_sp_acl_rule_rulei(rule); 472 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 473 if (err) 474 goto err_flower_parse; 475 476 err = mlxsw_sp_acl_rulei_commit(rulei); 477 if (err) 478 goto err_rulei_commit; 479 480 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 481 if (err) 482 goto err_rule_add; 483 484 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 485 return 0; 486 487 err_rule_add: 488 err_rulei_commit: 489 err_flower_parse: 490 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 491 err_rule_create: 492 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 493 return err; 494 } 495 496 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 497 struct mlxsw_sp_acl_block *block, 498 struct flow_cls_offload *f) 499 { 500 struct mlxsw_sp_acl_ruleset *ruleset; 501 struct mlxsw_sp_acl_rule *rule; 502 503 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 504 f->common.chain_index, 505 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 506 if (IS_ERR(ruleset)) 507 return; 508 509 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 510 if (rule) { 511 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 512 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 513 } 514 515 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 516 } 517 518 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 519 struct mlxsw_sp_acl_block *block, 520 struct flow_cls_offload *f) 521 { 522 struct mlxsw_sp_acl_ruleset *ruleset; 523 struct mlxsw_sp_acl_rule *rule; 524 u64 packets; 525 u64 lastuse; 526 u64 bytes; 527 int err; 528 529 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 530 f->common.chain_index, 531 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 532 if (WARN_ON(IS_ERR(ruleset))) 533 return -EINVAL; 534 535 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 536 if (!rule) 537 return -EINVAL; 538 539 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 540 &lastuse); 541 if (err) 542 goto err_rule_get_stats; 543 544 flow_stats_update(&f->stats, bytes, packets, lastuse); 545 546 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 547 return 0; 548 549 err_rule_get_stats: 550 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 551 return err; 552 } 553 554 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 555 struct mlxsw_sp_acl_block *block, 556 struct flow_cls_offload *f) 557 { 558 struct mlxsw_sp_acl_ruleset *ruleset; 559 struct mlxsw_sp_acl_rule_info rulei; 560 int err; 561 562 memset(&rulei, 0, sizeof(rulei)); 563 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 564 if (err) 565 return err; 566 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 567 f->common.chain_index, 568 MLXSW_SP_ACL_PROFILE_FLOWER, 569 &rulei.values.elusage); 570 571 /* keep the reference to the ruleset */ 572 return PTR_ERR_OR_ZERO(ruleset); 573 } 574 575 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 576 struct mlxsw_sp_acl_block *block, 577 struct flow_cls_offload *f) 578 { 579 struct mlxsw_sp_acl_ruleset *ruleset; 580 581 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 582 f->common.chain_index, 583 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 584 if (IS_ERR(ruleset)) 585 return; 586 /* put the reference to the ruleset kept in create */ 587 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 588 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 589 } 590