1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <net/net_namespace.h> 8 #include <net/flow_dissector.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gact.h> 11 #include <net/tc_act/tc_mirred.h> 12 #include <net/tc_act/tc_vlan.h> 13 14 #include "spectrum.h" 15 #include "core_acl_flex_keys.h" 16 17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 18 struct mlxsw_sp_acl_block *block, 19 struct mlxsw_sp_acl_rule_info *rulei, 20 struct flow_action *flow_action, 21 struct netlink_ext_ack *extack) 22 { 23 const struct flow_action_entry *act; 24 int err, i; 25 26 if (!flow_action_has_entries(flow_action)) 27 return 0; 28 29 /* Count action is inserted first */ 30 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 31 if (err) 32 return err; 33 34 flow_action_for_each(i, act, flow_action) { 35 switch (act->id) { 36 case FLOW_ACTION_ACCEPT: 37 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 38 if (err) { 39 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 40 return err; 41 } 42 break; 43 case FLOW_ACTION_DROP: 44 err = mlxsw_sp_acl_rulei_act_drop(rulei); 45 if (err) { 46 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 47 return err; 48 } 49 break; 50 case FLOW_ACTION_TRAP: 51 err = mlxsw_sp_acl_rulei_act_trap(rulei); 52 if (err) { 53 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 54 return err; 55 } 56 break; 57 case FLOW_ACTION_GOTO: { 58 u32 chain_index = act->chain_index; 59 struct mlxsw_sp_acl_ruleset *ruleset; 60 u16 group_id; 61 62 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 63 chain_index, 64 MLXSW_SP_ACL_PROFILE_FLOWER); 65 if (IS_ERR(ruleset)) 66 return PTR_ERR(ruleset); 67 68 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 69 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 70 if (err) { 71 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 72 return err; 73 } 74 } 75 break; 76 case FLOW_ACTION_REDIRECT: { 77 struct net_device *out_dev; 78 struct mlxsw_sp_fid *fid; 79 u16 fid_index; 80 81 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 82 fid_index = mlxsw_sp_fid_index(fid); 83 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 84 fid_index, extack); 85 if (err) 86 return err; 87 88 out_dev = act->dev; 89 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 90 out_dev, extack); 91 if (err) 92 return err; 93 } 94 break; 95 case FLOW_ACTION_MIRRED: { 96 struct net_device *out_dev = act->dev; 97 98 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 99 block, out_dev, 100 extack); 101 if (err) 102 return err; 103 } 104 break; 105 case FLOW_ACTION_VLAN_MANGLE: { 106 u16 proto = be16_to_cpu(act->vlan.proto); 107 u8 prio = act->vlan.prio; 108 u16 vid = act->vlan.vid; 109 110 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 111 act->id, vid, 112 proto, prio, extack); 113 } 114 default: 115 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 116 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 117 return -EOPNOTSUPP; 118 } 119 } 120 return 0; 121 } 122 123 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, 124 struct flow_cls_offload *f, 125 struct mlxsw_sp_acl_block *block) 126 { 127 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 128 struct mlxsw_sp_port *mlxsw_sp_port; 129 struct net_device *ingress_dev; 130 struct flow_match_meta match; 131 132 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 133 return 0; 134 135 flow_rule_match_meta(rule, &match); 136 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 137 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); 138 return -EINVAL; 139 } 140 141 ingress_dev = __dev_get_by_index(block->net, 142 match.key->ingress_ifindex); 143 if (!ingress_dev) { 144 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); 145 return -EINVAL; 146 } 147 148 if (!mlxsw_sp_port_dev_check(ingress_dev)) { 149 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port"); 150 return -EINVAL; 151 } 152 153 mlxsw_sp_port = netdev_priv(ingress_dev); 154 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { 155 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device"); 156 return -EINVAL; 157 } 158 159 mlxsw_sp_acl_rulei_keymask_u32(rulei, 160 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 161 mlxsw_sp_port->local_port, 162 0xFFFFFFFF); 163 return 0; 164 } 165 166 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 167 struct flow_cls_offload *f) 168 { 169 struct flow_match_ipv4_addrs match; 170 171 flow_rule_match_ipv4_addrs(f->rule, &match); 172 173 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 174 (char *) &match.key->src, 175 (char *) &match.mask->src, 4); 176 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 177 (char *) &match.key->dst, 178 (char *) &match.mask->dst, 4); 179 } 180 181 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 182 struct flow_cls_offload *f) 183 { 184 struct flow_match_ipv6_addrs match; 185 186 flow_rule_match_ipv6_addrs(f->rule, &match); 187 188 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 189 &match.key->src.s6_addr[0x0], 190 &match.mask->src.s6_addr[0x0], 4); 191 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 192 &match.key->src.s6_addr[0x4], 193 &match.mask->src.s6_addr[0x4], 4); 194 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 195 &match.key->src.s6_addr[0x8], 196 &match.mask->src.s6_addr[0x8], 4); 197 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 198 &match.key->src.s6_addr[0xC], 199 &match.mask->src.s6_addr[0xC], 4); 200 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 201 &match.key->dst.s6_addr[0x0], 202 &match.mask->dst.s6_addr[0x0], 4); 203 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 204 &match.key->dst.s6_addr[0x4], 205 &match.mask->dst.s6_addr[0x4], 4); 206 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 207 &match.key->dst.s6_addr[0x8], 208 &match.mask->dst.s6_addr[0x8], 4); 209 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 210 &match.key->dst.s6_addr[0xC], 211 &match.mask->dst.s6_addr[0xC], 4); 212 } 213 214 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 215 struct mlxsw_sp_acl_rule_info *rulei, 216 struct flow_cls_offload *f, 217 u8 ip_proto) 218 { 219 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 220 struct flow_match_ports match; 221 222 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) 223 return 0; 224 225 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 226 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 227 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 228 return -EINVAL; 229 } 230 231 flow_rule_match_ports(rule, &match); 232 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 233 ntohs(match.key->dst), 234 ntohs(match.mask->dst)); 235 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 236 ntohs(match.key->src), 237 ntohs(match.mask->src)); 238 return 0; 239 } 240 241 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 242 struct mlxsw_sp_acl_rule_info *rulei, 243 struct flow_cls_offload *f, 244 u8 ip_proto) 245 { 246 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 247 struct flow_match_tcp match; 248 249 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) 250 return 0; 251 252 if (ip_proto != IPPROTO_TCP) { 253 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 254 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 255 return -EINVAL; 256 } 257 258 flow_rule_match_tcp(rule, &match); 259 260 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 261 ntohs(match.key->flags), 262 ntohs(match.mask->flags)); 263 return 0; 264 } 265 266 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 267 struct mlxsw_sp_acl_rule_info *rulei, 268 struct flow_cls_offload *f, 269 u16 n_proto) 270 { 271 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 272 struct flow_match_ip match; 273 274 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) 275 return 0; 276 277 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 278 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 279 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 280 return -EINVAL; 281 } 282 283 flow_rule_match_ip(rule, &match); 284 285 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 286 match.key->ttl, match.mask->ttl); 287 288 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 289 match.key->tos & 0x3, 290 match.mask->tos & 0x3); 291 292 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 293 match.key->tos >> 2, 294 match.mask->tos >> 2); 295 296 return 0; 297 } 298 299 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 300 struct mlxsw_sp_acl_block *block, 301 struct mlxsw_sp_acl_rule_info *rulei, 302 struct flow_cls_offload *f) 303 { 304 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 305 struct flow_dissector *dissector = rule->match.dissector; 306 u16 n_proto_mask = 0; 307 u16 n_proto_key = 0; 308 u16 addr_type = 0; 309 u8 ip_proto = 0; 310 int err; 311 312 if (dissector->used_keys & 313 ~(BIT(FLOW_DISSECTOR_KEY_META) | 314 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 315 BIT(FLOW_DISSECTOR_KEY_BASIC) | 316 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 317 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 318 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 319 BIT(FLOW_DISSECTOR_KEY_PORTS) | 320 BIT(FLOW_DISSECTOR_KEY_TCP) | 321 BIT(FLOW_DISSECTOR_KEY_IP) | 322 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 323 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 324 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 325 return -EOPNOTSUPP; 326 } 327 328 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 329 330 err = mlxsw_sp_flower_parse_meta(rulei, f, block); 331 if (err) 332 return err; 333 334 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 335 struct flow_match_control match; 336 337 flow_rule_match_control(rule, &match); 338 addr_type = match.key->addr_type; 339 } 340 341 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 342 struct flow_match_basic match; 343 344 flow_rule_match_basic(rule, &match); 345 n_proto_key = ntohs(match.key->n_proto); 346 n_proto_mask = ntohs(match.mask->n_proto); 347 348 if (n_proto_key == ETH_P_ALL) { 349 n_proto_key = 0; 350 n_proto_mask = 0; 351 } 352 mlxsw_sp_acl_rulei_keymask_u32(rulei, 353 MLXSW_AFK_ELEMENT_ETHERTYPE, 354 n_proto_key, n_proto_mask); 355 356 ip_proto = match.key->ip_proto; 357 mlxsw_sp_acl_rulei_keymask_u32(rulei, 358 MLXSW_AFK_ELEMENT_IP_PROTO, 359 match.key->ip_proto, 360 match.mask->ip_proto); 361 } 362 363 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 364 struct flow_match_eth_addrs match; 365 366 flow_rule_match_eth_addrs(rule, &match); 367 mlxsw_sp_acl_rulei_keymask_buf(rulei, 368 MLXSW_AFK_ELEMENT_DMAC_32_47, 369 match.key->dst, 370 match.mask->dst, 2); 371 mlxsw_sp_acl_rulei_keymask_buf(rulei, 372 MLXSW_AFK_ELEMENT_DMAC_0_31, 373 match.key->dst + 2, 374 match.mask->dst + 2, 4); 375 mlxsw_sp_acl_rulei_keymask_buf(rulei, 376 MLXSW_AFK_ELEMENT_SMAC_32_47, 377 match.key->src, 378 match.mask->src, 2); 379 mlxsw_sp_acl_rulei_keymask_buf(rulei, 380 MLXSW_AFK_ELEMENT_SMAC_0_31, 381 match.key->src + 2, 382 match.mask->src + 2, 4); 383 } 384 385 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 386 struct flow_match_vlan match; 387 388 flow_rule_match_vlan(rule, &match); 389 if (mlxsw_sp_acl_block_is_egress_bound(block)) { 390 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); 391 return -EOPNOTSUPP; 392 } 393 if (match.mask->vlan_id != 0) 394 mlxsw_sp_acl_rulei_keymask_u32(rulei, 395 MLXSW_AFK_ELEMENT_VID, 396 match.key->vlan_id, 397 match.mask->vlan_id); 398 if (match.mask->vlan_priority != 0) 399 mlxsw_sp_acl_rulei_keymask_u32(rulei, 400 MLXSW_AFK_ELEMENT_PCP, 401 match.key->vlan_priority, 402 match.mask->vlan_priority); 403 } 404 405 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 406 mlxsw_sp_flower_parse_ipv4(rulei, f); 407 408 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 409 mlxsw_sp_flower_parse_ipv6(rulei, f); 410 411 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 412 if (err) 413 return err; 414 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 415 if (err) 416 return err; 417 418 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 419 if (err) 420 return err; 421 422 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, 423 &f->rule->action, 424 f->common.extack); 425 } 426 427 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 428 struct mlxsw_sp_acl_block *block, 429 struct flow_cls_offload *f) 430 { 431 struct mlxsw_sp_acl_rule_info *rulei; 432 struct mlxsw_sp_acl_ruleset *ruleset; 433 struct mlxsw_sp_acl_rule *rule; 434 int err; 435 436 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 437 f->common.chain_index, 438 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 439 if (IS_ERR(ruleset)) 440 return PTR_ERR(ruleset); 441 442 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, 443 f->common.extack); 444 if (IS_ERR(rule)) { 445 err = PTR_ERR(rule); 446 goto err_rule_create; 447 } 448 449 rulei = mlxsw_sp_acl_rule_rulei(rule); 450 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 451 if (err) 452 goto err_flower_parse; 453 454 err = mlxsw_sp_acl_rulei_commit(rulei); 455 if (err) 456 goto err_rulei_commit; 457 458 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 459 if (err) 460 goto err_rule_add; 461 462 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 463 return 0; 464 465 err_rule_add: 466 err_rulei_commit: 467 err_flower_parse: 468 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 469 err_rule_create: 470 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 471 return err; 472 } 473 474 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 475 struct mlxsw_sp_acl_block *block, 476 struct flow_cls_offload *f) 477 { 478 struct mlxsw_sp_acl_ruleset *ruleset; 479 struct mlxsw_sp_acl_rule *rule; 480 481 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 482 f->common.chain_index, 483 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 484 if (IS_ERR(ruleset)) 485 return; 486 487 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 488 if (rule) { 489 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 490 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 491 } 492 493 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 494 } 495 496 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 497 struct mlxsw_sp_acl_block *block, 498 struct flow_cls_offload *f) 499 { 500 struct mlxsw_sp_acl_ruleset *ruleset; 501 struct mlxsw_sp_acl_rule *rule; 502 u64 packets; 503 u64 lastuse; 504 u64 bytes; 505 int err; 506 507 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 508 f->common.chain_index, 509 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 510 if (WARN_ON(IS_ERR(ruleset))) 511 return -EINVAL; 512 513 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 514 if (!rule) 515 return -EINVAL; 516 517 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 518 &lastuse); 519 if (err) 520 goto err_rule_get_stats; 521 522 flow_stats_update(&f->stats, bytes, packets, lastuse); 523 524 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 525 return 0; 526 527 err_rule_get_stats: 528 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 529 return err; 530 } 531 532 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 533 struct mlxsw_sp_acl_block *block, 534 struct flow_cls_offload *f) 535 { 536 struct mlxsw_sp_acl_ruleset *ruleset; 537 struct mlxsw_sp_acl_rule_info rulei; 538 int err; 539 540 memset(&rulei, 0, sizeof(rulei)); 541 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 542 if (err) 543 return err; 544 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 545 f->common.chain_index, 546 MLXSW_SP_ACL_PROFILE_FLOWER, 547 &rulei.values.elusage); 548 549 /* keep the reference to the ruleset */ 550 return PTR_ERR_OR_ZERO(ruleset); 551 } 552 553 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 554 struct mlxsw_sp_acl_block *block, 555 struct flow_cls_offload *f) 556 { 557 struct mlxsw_sp_acl_ruleset *ruleset; 558 559 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 560 f->common.chain_index, 561 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 562 if (IS_ERR(ruleset)) 563 return; 564 /* put the reference to the ruleset kept in create */ 565 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 566 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 567 } 568