1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c 3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/errno.h> 37 #include <linux/netdevice.h> 38 #include <net/net_namespace.h> 39 #include <net/flow_dissector.h> 40 #include <net/pkt_cls.h> 41 #include <net/tc_act/tc_gact.h> 42 #include <net/tc_act/tc_mirred.h> 43 #include <net/tc_act/tc_vlan.h> 44 45 #include "spectrum.h" 46 #include "core_acl_flex_keys.h" 47 48 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 49 struct mlxsw_sp_acl_block *block, 50 struct mlxsw_sp_acl_rule_info *rulei, 51 struct tcf_exts *exts, 52 struct netlink_ext_ack *extack) 53 { 54 const struct tc_action *a; 55 LIST_HEAD(actions); 56 int err; 57 58 if (!tcf_exts_has_actions(exts)) 59 return 0; 60 61 /* Count action is inserted first */ 62 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 63 if (err) 64 return err; 65 66 tcf_exts_to_list(exts, &actions); 67 list_for_each_entry(a, &actions, list) { 68 if (is_tcf_gact_ok(a)) { 69 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 70 if (err) { 71 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 72 return err; 73 } 74 } else if (is_tcf_gact_shot(a)) { 75 err = mlxsw_sp_acl_rulei_act_drop(rulei); 76 if (err) { 77 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 78 return err; 79 } 80 } else if (is_tcf_gact_trap(a)) { 81 err = mlxsw_sp_acl_rulei_act_trap(rulei); 82 if (err) { 83 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 84 return err; 85 } 86 } else if (is_tcf_gact_goto_chain(a)) { 87 u32 chain_index = tcf_gact_goto_chain_index(a); 88 struct mlxsw_sp_acl_ruleset *ruleset; 89 u16 group_id; 90 91 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 92 chain_index, 93 MLXSW_SP_ACL_PROFILE_FLOWER); 94 if (IS_ERR(ruleset)) 95 return PTR_ERR(ruleset); 96 97 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 98 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 99 if (err) { 100 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 101 return err; 102 } 103 } else if (is_tcf_mirred_egress_redirect(a)) { 104 struct net_device *out_dev; 105 struct mlxsw_sp_fid *fid; 106 u16 fid_index; 107 108 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 109 fid_index = mlxsw_sp_fid_index(fid); 110 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 111 fid_index, extack); 112 if (err) 113 return err; 114 115 out_dev = tcf_mirred_dev(a); 116 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 117 out_dev, extack); 118 if (err) 119 return err; 120 } else if (is_tcf_mirred_egress_mirror(a)) { 121 struct net_device *out_dev = tcf_mirred_dev(a); 122 123 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 124 block, out_dev, 125 extack); 126 if (err) 127 return err; 128 } else if (is_tcf_vlan(a)) { 129 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); 130 u32 action = tcf_vlan_action(a); 131 u8 prio = tcf_vlan_push_prio(a); 132 u16 vid = tcf_vlan_push_vid(a); 133 134 return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 135 action, vid, 136 proto, prio, extack); 137 } else { 138 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 139 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 140 return -EOPNOTSUPP; 141 } 142 } 143 return 0; 144 } 145 146 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 147 struct tc_cls_flower_offload *f) 148 { 149 struct flow_dissector_key_ipv4_addrs *key = 150 skb_flow_dissector_target(f->dissector, 151 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 152 f->key); 153 struct flow_dissector_key_ipv4_addrs *mask = 154 skb_flow_dissector_target(f->dissector, 155 FLOW_DISSECTOR_KEY_IPV4_ADDRS, 156 f->mask); 157 158 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 159 (char *) &key->src, 160 (char *) &mask->src, 4); 161 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 162 (char *) &key->dst, 163 (char *) &mask->dst, 4); 164 } 165 166 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 167 struct tc_cls_flower_offload *f) 168 { 169 struct flow_dissector_key_ipv6_addrs *key = 170 skb_flow_dissector_target(f->dissector, 171 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 172 f->key); 173 struct flow_dissector_key_ipv6_addrs *mask = 174 skb_flow_dissector_target(f->dissector, 175 FLOW_DISSECTOR_KEY_IPV6_ADDRS, 176 f->mask); 177 178 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 179 &key->src.s6_addr[0x0], 180 &mask->src.s6_addr[0x0], 4); 181 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 182 &key->src.s6_addr[0x4], 183 &mask->src.s6_addr[0x4], 4); 184 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 185 &key->src.s6_addr[0x8], 186 &mask->src.s6_addr[0x8], 4); 187 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 188 &key->src.s6_addr[0xC], 189 &mask->src.s6_addr[0xC], 4); 190 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 191 &key->dst.s6_addr[0x0], 192 &mask->dst.s6_addr[0x0], 4); 193 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 194 &key->dst.s6_addr[0x4], 195 &mask->dst.s6_addr[0x4], 4); 196 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 197 &key->dst.s6_addr[0x8], 198 &mask->dst.s6_addr[0x8], 4); 199 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 200 &key->dst.s6_addr[0xC], 201 &mask->dst.s6_addr[0xC], 4); 202 } 203 204 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 205 struct mlxsw_sp_acl_rule_info *rulei, 206 struct tc_cls_flower_offload *f, 207 u8 ip_proto) 208 { 209 struct flow_dissector_key_ports *key, *mask; 210 211 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) 212 return 0; 213 214 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 215 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 216 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 217 return -EINVAL; 218 } 219 220 key = skb_flow_dissector_target(f->dissector, 221 FLOW_DISSECTOR_KEY_PORTS, 222 f->key); 223 mask = skb_flow_dissector_target(f->dissector, 224 FLOW_DISSECTOR_KEY_PORTS, 225 f->mask); 226 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 227 ntohs(key->dst), ntohs(mask->dst)); 228 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 229 ntohs(key->src), ntohs(mask->src)); 230 return 0; 231 } 232 233 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 234 struct mlxsw_sp_acl_rule_info *rulei, 235 struct tc_cls_flower_offload *f, 236 u8 ip_proto) 237 { 238 struct flow_dissector_key_tcp *key, *mask; 239 240 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) 241 return 0; 242 243 if (ip_proto != IPPROTO_TCP) { 244 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 245 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 246 return -EINVAL; 247 } 248 249 key = skb_flow_dissector_target(f->dissector, 250 FLOW_DISSECTOR_KEY_TCP, 251 f->key); 252 mask = skb_flow_dissector_target(f->dissector, 253 FLOW_DISSECTOR_KEY_TCP, 254 f->mask); 255 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 256 ntohs(key->flags), ntohs(mask->flags)); 257 return 0; 258 } 259 260 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 261 struct mlxsw_sp_acl_rule_info *rulei, 262 struct tc_cls_flower_offload *f, 263 u16 n_proto) 264 { 265 struct flow_dissector_key_ip *key, *mask; 266 267 if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) 268 return 0; 269 270 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 271 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 272 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 273 return -EINVAL; 274 } 275 276 key = skb_flow_dissector_target(f->dissector, 277 FLOW_DISSECTOR_KEY_IP, 278 f->key); 279 mask = skb_flow_dissector_target(f->dissector, 280 FLOW_DISSECTOR_KEY_IP, 281 f->mask); 282 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 283 key->ttl, mask->ttl); 284 285 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 286 key->tos & 0x3, mask->tos & 0x3); 287 288 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 289 key->tos >> 6, mask->tos >> 6); 290 291 return 0; 292 } 293 294 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 295 struct mlxsw_sp_acl_block *block, 296 struct mlxsw_sp_acl_rule_info *rulei, 297 struct tc_cls_flower_offload *f) 298 { 299 u16 n_proto_mask = 0; 300 u16 n_proto_key = 0; 301 u16 addr_type = 0; 302 u8 ip_proto = 0; 303 int err; 304 305 if (f->dissector->used_keys & 306 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 307 BIT(FLOW_DISSECTOR_KEY_BASIC) | 308 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 309 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 310 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 311 BIT(FLOW_DISSECTOR_KEY_PORTS) | 312 BIT(FLOW_DISSECTOR_KEY_TCP) | 313 BIT(FLOW_DISSECTOR_KEY_IP) | 314 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 315 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 316 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 317 return -EOPNOTSUPP; 318 } 319 320 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 321 322 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { 323 struct flow_dissector_key_control *key = 324 skb_flow_dissector_target(f->dissector, 325 FLOW_DISSECTOR_KEY_CONTROL, 326 f->key); 327 addr_type = key->addr_type; 328 } 329 330 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { 331 struct flow_dissector_key_basic *key = 332 skb_flow_dissector_target(f->dissector, 333 FLOW_DISSECTOR_KEY_BASIC, 334 f->key); 335 struct flow_dissector_key_basic *mask = 336 skb_flow_dissector_target(f->dissector, 337 FLOW_DISSECTOR_KEY_BASIC, 338 f->mask); 339 n_proto_key = ntohs(key->n_proto); 340 n_proto_mask = ntohs(mask->n_proto); 341 342 if (n_proto_key == ETH_P_ALL) { 343 n_proto_key = 0; 344 n_proto_mask = 0; 345 } 346 mlxsw_sp_acl_rulei_keymask_u32(rulei, 347 MLXSW_AFK_ELEMENT_ETHERTYPE, 348 n_proto_key, n_proto_mask); 349 350 ip_proto = key->ip_proto; 351 mlxsw_sp_acl_rulei_keymask_u32(rulei, 352 MLXSW_AFK_ELEMENT_IP_PROTO, 353 key->ip_proto, mask->ip_proto); 354 } 355 356 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 357 struct flow_dissector_key_eth_addrs *key = 358 skb_flow_dissector_target(f->dissector, 359 FLOW_DISSECTOR_KEY_ETH_ADDRS, 360 f->key); 361 struct flow_dissector_key_eth_addrs *mask = 362 skb_flow_dissector_target(f->dissector, 363 FLOW_DISSECTOR_KEY_ETH_ADDRS, 364 f->mask); 365 366 mlxsw_sp_acl_rulei_keymask_buf(rulei, 367 MLXSW_AFK_ELEMENT_DMAC_32_47, 368 key->dst, mask->dst, 2); 369 mlxsw_sp_acl_rulei_keymask_buf(rulei, 370 MLXSW_AFK_ELEMENT_DMAC_0_31, 371 key->dst + 2, mask->dst + 2, 4); 372 mlxsw_sp_acl_rulei_keymask_buf(rulei, 373 MLXSW_AFK_ELEMENT_SMAC_32_47, 374 key->src, mask->src, 2); 375 mlxsw_sp_acl_rulei_keymask_buf(rulei, 376 MLXSW_AFK_ELEMENT_SMAC_0_31, 377 key->src + 2, mask->src + 2, 4); 378 } 379 380 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { 381 struct flow_dissector_key_vlan *key = 382 skb_flow_dissector_target(f->dissector, 383 FLOW_DISSECTOR_KEY_VLAN, 384 f->key); 385 struct flow_dissector_key_vlan *mask = 386 skb_flow_dissector_target(f->dissector, 387 FLOW_DISSECTOR_KEY_VLAN, 388 f->mask); 389 if (mask->vlan_id != 0) 390 mlxsw_sp_acl_rulei_keymask_u32(rulei, 391 MLXSW_AFK_ELEMENT_VID, 392 key->vlan_id, 393 mask->vlan_id); 394 if (mask->vlan_priority != 0) 395 mlxsw_sp_acl_rulei_keymask_u32(rulei, 396 MLXSW_AFK_ELEMENT_PCP, 397 key->vlan_priority, 398 mask->vlan_priority); 399 } 400 401 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 402 mlxsw_sp_flower_parse_ipv4(rulei, f); 403 404 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 405 mlxsw_sp_flower_parse_ipv6(rulei, f); 406 407 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 408 if (err) 409 return err; 410 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 411 if (err) 412 return err; 413 414 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 415 if (err) 416 return err; 417 418 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts, 419 f->common.extack); 420 } 421 422 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 423 struct mlxsw_sp_acl_block *block, 424 struct tc_cls_flower_offload *f) 425 { 426 struct mlxsw_sp_acl_rule_info *rulei; 427 struct mlxsw_sp_acl_ruleset *ruleset; 428 struct mlxsw_sp_acl_rule *rule; 429 int err; 430 431 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 432 f->common.chain_index, 433 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 434 if (IS_ERR(ruleset)) 435 return PTR_ERR(ruleset); 436 437 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, 438 f->common.extack); 439 if (IS_ERR(rule)) { 440 err = PTR_ERR(rule); 441 goto err_rule_create; 442 } 443 444 rulei = mlxsw_sp_acl_rule_rulei(rule); 445 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 446 if (err) 447 goto err_flower_parse; 448 449 err = mlxsw_sp_acl_rulei_commit(rulei); 450 if (err) 451 goto err_rulei_commit; 452 453 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 454 if (err) 455 goto err_rule_add; 456 457 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 458 return 0; 459 460 err_rule_add: 461 err_rulei_commit: 462 err_flower_parse: 463 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 464 err_rule_create: 465 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 466 return err; 467 } 468 469 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 470 struct mlxsw_sp_acl_block *block, 471 struct tc_cls_flower_offload *f) 472 { 473 struct mlxsw_sp_acl_ruleset *ruleset; 474 struct mlxsw_sp_acl_rule *rule; 475 476 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 477 f->common.chain_index, 478 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 479 if (IS_ERR(ruleset)) 480 return; 481 482 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 483 if (rule) { 484 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 485 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 486 } 487 488 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 489 } 490 491 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 492 struct mlxsw_sp_acl_block *block, 493 struct tc_cls_flower_offload *f) 494 { 495 struct mlxsw_sp_acl_ruleset *ruleset; 496 struct mlxsw_sp_acl_rule *rule; 497 u64 packets; 498 u64 lastuse; 499 u64 bytes; 500 int err; 501 502 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 503 f->common.chain_index, 504 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 505 if (WARN_ON(IS_ERR(ruleset))) 506 return -EINVAL; 507 508 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 509 if (!rule) 510 return -EINVAL; 511 512 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 513 &lastuse); 514 if (err) 515 goto err_rule_get_stats; 516 517 tcf_exts_stats_update(f->exts, bytes, packets, lastuse); 518 519 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 520 return 0; 521 522 err_rule_get_stats: 523 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 524 return err; 525 } 526 527 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 528 struct mlxsw_sp_acl_block *block, 529 struct tc_cls_flower_offload *f) 530 { 531 struct mlxsw_sp_acl_ruleset *ruleset; 532 struct mlxsw_sp_acl_rule_info rulei; 533 int err; 534 535 memset(&rulei, 0, sizeof(rulei)); 536 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 537 if (err) 538 return err; 539 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 540 f->common.chain_index, 541 MLXSW_SP_ACL_PROFILE_FLOWER, 542 &rulei.values.elusage); 543 if (IS_ERR(ruleset)) 544 return PTR_ERR(ruleset); 545 /* keep the reference to the ruleset */ 546 return 0; 547 } 548 549 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 550 struct mlxsw_sp_acl_block *block, 551 struct tc_cls_flower_offload *f) 552 { 553 struct mlxsw_sp_acl_ruleset *ruleset; 554 555 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 556 f->common.chain_index, 557 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 558 if (IS_ERR(ruleset)) 559 return; 560 /* put the reference to the ruleset kept in create */ 561 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 562 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 563 } 564