1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <net/net_namespace.h> 8 #include <net/flow_dissector.h> 9 #include <net/pkt_cls.h> 10 #include <net/tc_act/tc_gact.h> 11 #include <net/tc_act/tc_mirred.h> 12 #include <net/tc_act/tc_vlan.h> 13 14 #include "spectrum.h" 15 #include "core_acl_flex_keys.h" 16 17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 18 struct mlxsw_sp_acl_block *block, 19 struct mlxsw_sp_acl_rule_info *rulei, 20 struct flow_action *flow_action, 21 struct netlink_ext_ack *extack) 22 { 23 const struct flow_action_entry *act; 24 int mirror_act_count = 0; 25 int err, i; 26 27 if (!flow_action_has_entries(flow_action)) 28 return 0; 29 if (!flow_action_mixed_hw_stats_check(flow_action, extack)) 30 return -EOPNOTSUPP; 31 32 act = flow_action_first_entry_get(flow_action); 33 if (act->hw_stats == FLOW_ACTION_HW_STATS_ANY || 34 act->hw_stats == FLOW_ACTION_HW_STATS_IMMEDIATE) { 35 /* Count action is inserted first */ 36 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 37 if (err) 38 return err; 39 } else if (act->hw_stats != FLOW_ACTION_HW_STATS_DISABLED) { 40 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); 41 return -EOPNOTSUPP; 42 } 43 44 flow_action_for_each(i, act, flow_action) { 45 switch (act->id) { 46 case FLOW_ACTION_ACCEPT: 47 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 48 if (err) { 49 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 50 return err; 51 } 52 break; 53 case FLOW_ACTION_DROP: { 54 bool ingress; 55 56 if (mlxsw_sp_acl_block_is_mixed_bound(block)) { 57 NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress"); 58 return -EOPNOTSUPP; 59 } 60 ingress = mlxsw_sp_acl_block_is_ingress_bound(block); 61 err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress, 62 act->cookie, extack); 63 if (err) { 64 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 65 return err; 66 } 67 68 /* Forbid block with this rulei to be bound 69 * to ingress/egress in future. Ingress rule is 70 * a blocker for egress and vice versa. 71 */ 72 if (ingress) 73 rulei->egress_bind_blocker = 1; 74 else 75 rulei->ingress_bind_blocker = 1; 76 } 77 break; 78 case FLOW_ACTION_TRAP: 79 err = mlxsw_sp_acl_rulei_act_trap(rulei); 80 if (err) { 81 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 82 return err; 83 } 84 break; 85 case FLOW_ACTION_GOTO: { 86 u32 chain_index = act->chain_index; 87 struct mlxsw_sp_acl_ruleset *ruleset; 88 u16 group_id; 89 90 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 91 chain_index, 92 MLXSW_SP_ACL_PROFILE_FLOWER); 93 if (IS_ERR(ruleset)) 94 return PTR_ERR(ruleset); 95 96 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 97 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 98 if (err) { 99 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 100 return err; 101 } 102 } 103 break; 104 case FLOW_ACTION_REDIRECT: { 105 struct net_device *out_dev; 106 struct mlxsw_sp_fid *fid; 107 u16 fid_index; 108 109 if (mlxsw_sp_acl_block_is_egress_bound(block)) { 110 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); 111 return -EOPNOTSUPP; 112 } 113 114 /* Forbid block with this rulei to be bound 115 * to egress in future. 116 */ 117 rulei->egress_bind_blocker = 1; 118 119 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 120 fid_index = mlxsw_sp_fid_index(fid); 121 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 122 fid_index, extack); 123 if (err) 124 return err; 125 126 out_dev = act->dev; 127 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 128 out_dev, extack); 129 if (err) 130 return err; 131 } 132 break; 133 case FLOW_ACTION_MIRRED: { 134 struct net_device *out_dev = act->dev; 135 136 if (mirror_act_count++) { 137 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported"); 138 return -EOPNOTSUPP; 139 } 140 141 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 142 block, out_dev, 143 extack); 144 if (err) 145 return err; 146 } 147 break; 148 case FLOW_ACTION_VLAN_MANGLE: { 149 u16 proto = be16_to_cpu(act->vlan.proto); 150 u8 prio = act->vlan.prio; 151 u16 vid = act->vlan.vid; 152 153 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 154 act->id, vid, 155 proto, prio, extack); 156 if (err) 157 return err; 158 break; 159 } 160 case FLOW_ACTION_PRIORITY: 161 err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, 162 act->priority, 163 extack); 164 if (err) 165 return err; 166 break; 167 case FLOW_ACTION_MANGLE: { 168 enum flow_action_mangle_base htype = act->mangle.htype; 169 __be32 be_mask = (__force __be32) act->mangle.mask; 170 __be32 be_val = (__force __be32) act->mangle.val; 171 u32 offset = act->mangle.offset; 172 u32 mask = be32_to_cpu(be_mask); 173 u32 val = be32_to_cpu(be_val); 174 175 err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei, 176 htype, offset, 177 mask, val, extack); 178 if (err) 179 return err; 180 break; 181 } 182 default: 183 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 184 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 185 return -EOPNOTSUPP; 186 } 187 } 188 return 0; 189 } 190 191 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, 192 struct flow_cls_offload *f, 193 struct mlxsw_sp_acl_block *block) 194 { 195 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 196 struct mlxsw_sp_port *mlxsw_sp_port; 197 struct net_device *ingress_dev; 198 struct flow_match_meta match; 199 200 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 201 return 0; 202 203 flow_rule_match_meta(rule, &match); 204 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 205 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); 206 return -EINVAL; 207 } 208 209 ingress_dev = __dev_get_by_index(block->net, 210 match.key->ingress_ifindex); 211 if (!ingress_dev) { 212 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); 213 return -EINVAL; 214 } 215 216 if (!mlxsw_sp_port_dev_check(ingress_dev)) { 217 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port"); 218 return -EINVAL; 219 } 220 221 mlxsw_sp_port = netdev_priv(ingress_dev); 222 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { 223 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device"); 224 return -EINVAL; 225 } 226 227 mlxsw_sp_acl_rulei_keymask_u32(rulei, 228 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 229 mlxsw_sp_port->local_port, 230 0xFFFFFFFF); 231 return 0; 232 } 233 234 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 235 struct flow_cls_offload *f) 236 { 237 struct flow_match_ipv4_addrs match; 238 239 flow_rule_match_ipv4_addrs(f->rule, &match); 240 241 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 242 (char *) &match.key->src, 243 (char *) &match.mask->src, 4); 244 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 245 (char *) &match.key->dst, 246 (char *) &match.mask->dst, 4); 247 } 248 249 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 250 struct flow_cls_offload *f) 251 { 252 struct flow_match_ipv6_addrs match; 253 254 flow_rule_match_ipv6_addrs(f->rule, &match); 255 256 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 257 &match.key->src.s6_addr[0x0], 258 &match.mask->src.s6_addr[0x0], 4); 259 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 260 &match.key->src.s6_addr[0x4], 261 &match.mask->src.s6_addr[0x4], 4); 262 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 263 &match.key->src.s6_addr[0x8], 264 &match.mask->src.s6_addr[0x8], 4); 265 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 266 &match.key->src.s6_addr[0xC], 267 &match.mask->src.s6_addr[0xC], 4); 268 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 269 &match.key->dst.s6_addr[0x0], 270 &match.mask->dst.s6_addr[0x0], 4); 271 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 272 &match.key->dst.s6_addr[0x4], 273 &match.mask->dst.s6_addr[0x4], 4); 274 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 275 &match.key->dst.s6_addr[0x8], 276 &match.mask->dst.s6_addr[0x8], 4); 277 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 278 &match.key->dst.s6_addr[0xC], 279 &match.mask->dst.s6_addr[0xC], 4); 280 } 281 282 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 283 struct mlxsw_sp_acl_rule_info *rulei, 284 struct flow_cls_offload *f, 285 u8 ip_proto) 286 { 287 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 288 struct flow_match_ports match; 289 290 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) 291 return 0; 292 293 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 294 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 295 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 296 return -EINVAL; 297 } 298 299 flow_rule_match_ports(rule, &match); 300 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 301 ntohs(match.key->dst), 302 ntohs(match.mask->dst)); 303 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 304 ntohs(match.key->src), 305 ntohs(match.mask->src)); 306 return 0; 307 } 308 309 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 310 struct mlxsw_sp_acl_rule_info *rulei, 311 struct flow_cls_offload *f, 312 u8 ip_proto) 313 { 314 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 315 struct flow_match_tcp match; 316 317 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) 318 return 0; 319 320 if (ip_proto != IPPROTO_TCP) { 321 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 322 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 323 return -EINVAL; 324 } 325 326 flow_rule_match_tcp(rule, &match); 327 328 if (match.mask->flags & htons(0x0E00)) { 329 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); 330 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); 331 return -EINVAL; 332 } 333 334 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 335 ntohs(match.key->flags), 336 ntohs(match.mask->flags)); 337 return 0; 338 } 339 340 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 341 struct mlxsw_sp_acl_rule_info *rulei, 342 struct flow_cls_offload *f, 343 u16 n_proto) 344 { 345 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 346 struct flow_match_ip match; 347 348 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) 349 return 0; 350 351 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 352 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 353 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 354 return -EINVAL; 355 } 356 357 flow_rule_match_ip(rule, &match); 358 359 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 360 match.key->ttl, match.mask->ttl); 361 362 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 363 match.key->tos & 0x3, 364 match.mask->tos & 0x3); 365 366 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 367 match.key->tos >> 2, 368 match.mask->tos >> 2); 369 370 return 0; 371 } 372 373 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 374 struct mlxsw_sp_acl_block *block, 375 struct mlxsw_sp_acl_rule_info *rulei, 376 struct flow_cls_offload *f) 377 { 378 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 379 struct flow_dissector *dissector = rule->match.dissector; 380 u16 n_proto_mask = 0; 381 u16 n_proto_key = 0; 382 u16 addr_type = 0; 383 u8 ip_proto = 0; 384 int err; 385 386 if (dissector->used_keys & 387 ~(BIT(FLOW_DISSECTOR_KEY_META) | 388 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 389 BIT(FLOW_DISSECTOR_KEY_BASIC) | 390 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 391 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 392 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 393 BIT(FLOW_DISSECTOR_KEY_PORTS) | 394 BIT(FLOW_DISSECTOR_KEY_TCP) | 395 BIT(FLOW_DISSECTOR_KEY_IP) | 396 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 397 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 398 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 399 return -EOPNOTSUPP; 400 } 401 402 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 403 404 err = mlxsw_sp_flower_parse_meta(rulei, f, block); 405 if (err) 406 return err; 407 408 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 409 struct flow_match_control match; 410 411 flow_rule_match_control(rule, &match); 412 addr_type = match.key->addr_type; 413 } 414 415 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 416 struct flow_match_basic match; 417 418 flow_rule_match_basic(rule, &match); 419 n_proto_key = ntohs(match.key->n_proto); 420 n_proto_mask = ntohs(match.mask->n_proto); 421 422 if (n_proto_key == ETH_P_ALL) { 423 n_proto_key = 0; 424 n_proto_mask = 0; 425 } 426 mlxsw_sp_acl_rulei_keymask_u32(rulei, 427 MLXSW_AFK_ELEMENT_ETHERTYPE, 428 n_proto_key, n_proto_mask); 429 430 ip_proto = match.key->ip_proto; 431 mlxsw_sp_acl_rulei_keymask_u32(rulei, 432 MLXSW_AFK_ELEMENT_IP_PROTO, 433 match.key->ip_proto, 434 match.mask->ip_proto); 435 } 436 437 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 438 struct flow_match_eth_addrs match; 439 440 flow_rule_match_eth_addrs(rule, &match); 441 mlxsw_sp_acl_rulei_keymask_buf(rulei, 442 MLXSW_AFK_ELEMENT_DMAC_32_47, 443 match.key->dst, 444 match.mask->dst, 2); 445 mlxsw_sp_acl_rulei_keymask_buf(rulei, 446 MLXSW_AFK_ELEMENT_DMAC_0_31, 447 match.key->dst + 2, 448 match.mask->dst + 2, 4); 449 mlxsw_sp_acl_rulei_keymask_buf(rulei, 450 MLXSW_AFK_ELEMENT_SMAC_32_47, 451 match.key->src, 452 match.mask->src, 2); 453 mlxsw_sp_acl_rulei_keymask_buf(rulei, 454 MLXSW_AFK_ELEMENT_SMAC_0_31, 455 match.key->src + 2, 456 match.mask->src + 2, 4); 457 } 458 459 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 460 struct flow_match_vlan match; 461 462 flow_rule_match_vlan(rule, &match); 463 if (mlxsw_sp_acl_block_is_egress_bound(block)) { 464 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); 465 return -EOPNOTSUPP; 466 } 467 468 /* Forbid block with this rulei to be bound 469 * to egress in future. 470 */ 471 rulei->egress_bind_blocker = 1; 472 473 if (match.mask->vlan_id != 0) 474 mlxsw_sp_acl_rulei_keymask_u32(rulei, 475 MLXSW_AFK_ELEMENT_VID, 476 match.key->vlan_id, 477 match.mask->vlan_id); 478 if (match.mask->vlan_priority != 0) 479 mlxsw_sp_acl_rulei_keymask_u32(rulei, 480 MLXSW_AFK_ELEMENT_PCP, 481 match.key->vlan_priority, 482 match.mask->vlan_priority); 483 } 484 485 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 486 mlxsw_sp_flower_parse_ipv4(rulei, f); 487 488 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 489 mlxsw_sp_flower_parse_ipv6(rulei, f); 490 491 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 492 if (err) 493 return err; 494 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 495 if (err) 496 return err; 497 498 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 499 if (err) 500 return err; 501 502 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, 503 &f->rule->action, 504 f->common.extack); 505 } 506 507 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 508 struct mlxsw_sp_acl_block *block, 509 struct flow_cls_offload *f) 510 { 511 struct mlxsw_sp_acl_rule_info *rulei; 512 struct mlxsw_sp_acl_ruleset *ruleset; 513 struct mlxsw_sp_acl_rule *rule; 514 int err; 515 516 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 517 f->common.chain_index, 518 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 519 if (IS_ERR(ruleset)) 520 return PTR_ERR(ruleset); 521 522 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, 523 f->common.extack); 524 if (IS_ERR(rule)) { 525 err = PTR_ERR(rule); 526 goto err_rule_create; 527 } 528 529 rulei = mlxsw_sp_acl_rule_rulei(rule); 530 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 531 if (err) 532 goto err_flower_parse; 533 534 err = mlxsw_sp_acl_rulei_commit(rulei); 535 if (err) 536 goto err_rulei_commit; 537 538 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 539 if (err) 540 goto err_rule_add; 541 542 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 543 return 0; 544 545 err_rule_add: 546 err_rulei_commit: 547 err_flower_parse: 548 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 549 err_rule_create: 550 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 551 return err; 552 } 553 554 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 555 struct mlxsw_sp_acl_block *block, 556 struct flow_cls_offload *f) 557 { 558 struct mlxsw_sp_acl_ruleset *ruleset; 559 struct mlxsw_sp_acl_rule *rule; 560 561 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 562 f->common.chain_index, 563 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 564 if (IS_ERR(ruleset)) 565 return; 566 567 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 568 if (rule) { 569 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 570 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 571 } 572 573 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 574 } 575 576 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 577 struct mlxsw_sp_acl_block *block, 578 struct flow_cls_offload *f) 579 { 580 enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED; 581 struct mlxsw_sp_acl_ruleset *ruleset; 582 struct mlxsw_sp_acl_rule *rule; 583 u64 packets; 584 u64 lastuse; 585 u64 bytes; 586 int err; 587 588 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 589 f->common.chain_index, 590 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 591 if (WARN_ON(IS_ERR(ruleset))) 592 return -EINVAL; 593 594 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 595 if (!rule) 596 return -EINVAL; 597 598 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 599 &lastuse, &used_hw_stats); 600 if (err) 601 goto err_rule_get_stats; 602 603 flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats); 604 605 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 606 return 0; 607 608 err_rule_get_stats: 609 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 610 return err; 611 } 612 613 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 614 struct mlxsw_sp_acl_block *block, 615 struct flow_cls_offload *f) 616 { 617 struct mlxsw_sp_acl_ruleset *ruleset; 618 struct mlxsw_sp_acl_rule_info rulei; 619 int err; 620 621 memset(&rulei, 0, sizeof(rulei)); 622 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 623 if (err) 624 return err; 625 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 626 f->common.chain_index, 627 MLXSW_SP_ACL_PROFILE_FLOWER, 628 &rulei.values.elusage); 629 630 /* keep the reference to the ruleset */ 631 return PTR_ERR_OR_ZERO(ruleset); 632 } 633 634 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 635 struct mlxsw_sp_acl_block *block, 636 struct flow_cls_offload *f) 637 { 638 struct mlxsw_sp_acl_ruleset *ruleset; 639 640 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 641 f->common.chain_index, 642 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 643 if (IS_ERR(ruleset)) 644 return; 645 /* put the reference to the ruleset kept in create */ 646 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 647 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 648 } 649