1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <linux/log2.h> 8 #include <net/net_namespace.h> 9 #include <net/flow_dissector.h> 10 #include <net/pkt_cls.h> 11 #include <net/tc_act/tc_gact.h> 12 #include <net/tc_act/tc_mirred.h> 13 #include <net/tc_act/tc_vlan.h> 14 15 #include "spectrum.h" 16 #include "core_acl_flex_keys.h" 17 18 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 19 struct mlxsw_sp_flow_block *block, 20 struct mlxsw_sp_acl_rule_info *rulei, 21 struct flow_action *flow_action, 22 struct netlink_ext_ack *extack) 23 { 24 const struct flow_action_entry *act; 25 int mirror_act_count = 0; 26 int police_act_count = 0; 27 int err, i; 28 29 if (!flow_action_has_entries(flow_action)) 30 return 0; 31 if (!flow_action_mixed_hw_stats_check(flow_action, extack)) 32 return -EOPNOTSUPP; 33 34 act = flow_action_first_entry_get(flow_action); 35 if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { 36 /* Nothing to do */ 37 } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) { 38 /* Count action is inserted first */ 39 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 40 if (err) 41 return err; 42 } else { 43 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); 44 return -EOPNOTSUPP; 45 } 46 47 flow_action_for_each(i, act, flow_action) { 48 switch (act->id) { 49 case FLOW_ACTION_ACCEPT: 50 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 51 if (err) { 52 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 53 return err; 54 } 55 break; 56 case FLOW_ACTION_DROP: { 57 bool ingress; 58 59 if (mlxsw_sp_flow_block_is_mixed_bound(block)) { 60 NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress"); 61 return -EOPNOTSUPP; 62 } 63 ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 64 err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress, 65 act->cookie, extack); 66 if (err) { 67 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 68 return err; 69 } 70 71 /* Forbid block with this rulei to be bound 72 * to ingress/egress in future. Ingress rule is 73 * a blocker for egress and vice versa. 74 */ 75 if (ingress) 76 rulei->egress_bind_blocker = 1; 77 else 78 rulei->ingress_bind_blocker = 1; 79 } 80 break; 81 case FLOW_ACTION_TRAP: 82 err = mlxsw_sp_acl_rulei_act_trap(rulei); 83 if (err) { 84 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 85 return err; 86 } 87 break; 88 case FLOW_ACTION_GOTO: { 89 u32 chain_index = act->chain_index; 90 struct mlxsw_sp_acl_ruleset *ruleset; 91 u16 group_id; 92 93 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 94 chain_index, 95 MLXSW_SP_ACL_PROFILE_FLOWER); 96 if (IS_ERR(ruleset)) 97 return PTR_ERR(ruleset); 98 99 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 100 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 101 if (err) { 102 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 103 return err; 104 } 105 } 106 break; 107 case FLOW_ACTION_REDIRECT: { 108 struct net_device *out_dev; 109 struct mlxsw_sp_fid *fid; 110 u16 fid_index; 111 112 if (mlxsw_sp_flow_block_is_egress_bound(block)) { 113 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); 114 return -EOPNOTSUPP; 115 } 116 117 /* Forbid block with this rulei to be bound 118 * to egress in future. 119 */ 120 rulei->egress_bind_blocker = 1; 121 122 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 123 fid_index = mlxsw_sp_fid_index(fid); 124 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 125 fid_index, extack); 126 if (err) 127 return err; 128 129 out_dev = act->dev; 130 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 131 out_dev, extack); 132 if (err) 133 return err; 134 } 135 break; 136 case FLOW_ACTION_MIRRED: { 137 struct net_device *out_dev = act->dev; 138 139 if (mirror_act_count++) { 140 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported"); 141 return -EOPNOTSUPP; 142 } 143 144 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 145 block, out_dev, 146 extack); 147 if (err) 148 return err; 149 } 150 break; 151 case FLOW_ACTION_VLAN_MANGLE: { 152 u16 proto = be16_to_cpu(act->vlan.proto); 153 u8 prio = act->vlan.prio; 154 u16 vid = act->vlan.vid; 155 156 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 157 act->id, vid, 158 proto, prio, extack); 159 if (err) 160 return err; 161 break; 162 } 163 case FLOW_ACTION_PRIORITY: 164 err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, 165 act->priority, 166 extack); 167 if (err) 168 return err; 169 break; 170 case FLOW_ACTION_MANGLE: { 171 enum flow_action_mangle_base htype = act->mangle.htype; 172 __be32 be_mask = (__force __be32) act->mangle.mask; 173 __be32 be_val = (__force __be32) act->mangle.val; 174 u32 offset = act->mangle.offset; 175 u32 mask = be32_to_cpu(be_mask); 176 u32 val = be32_to_cpu(be_val); 177 178 err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei, 179 htype, offset, 180 mask, val, extack); 181 if (err) 182 return err; 183 break; 184 } 185 case FLOW_ACTION_POLICE: { 186 u32 burst; 187 188 if (police_act_count++) { 189 NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported"); 190 return -EOPNOTSUPP; 191 } 192 193 /* The kernel might adjust the requested burst size so 194 * that it is not exactly a power of two. Re-adjust it 195 * here since the hardware only supports burst sizes 196 * that are a power of two. 197 */ 198 burst = roundup_pow_of_two(act->police.burst); 199 err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei, 200 act->police.index, 201 act->police.rate_bytes_ps, 202 burst, extack); 203 if (err) 204 return err; 205 break; 206 } 207 default: 208 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 209 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 210 return -EOPNOTSUPP; 211 } 212 } 213 return 0; 214 } 215 216 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, 217 struct flow_cls_offload *f, 218 struct mlxsw_sp_flow_block *block) 219 { 220 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 221 struct mlxsw_sp_port *mlxsw_sp_port; 222 struct net_device *ingress_dev; 223 struct flow_match_meta match; 224 225 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 226 return 0; 227 228 flow_rule_match_meta(rule, &match); 229 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 230 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); 231 return -EINVAL; 232 } 233 234 ingress_dev = __dev_get_by_index(block->net, 235 match.key->ingress_ifindex); 236 if (!ingress_dev) { 237 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); 238 return -EINVAL; 239 } 240 241 if (!mlxsw_sp_port_dev_check(ingress_dev)) { 242 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port"); 243 return -EINVAL; 244 } 245 246 mlxsw_sp_port = netdev_priv(ingress_dev); 247 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { 248 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device"); 249 return -EINVAL; 250 } 251 252 mlxsw_sp_acl_rulei_keymask_u32(rulei, 253 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 254 mlxsw_sp_port->local_port, 255 0xFFFFFFFF); 256 return 0; 257 } 258 259 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 260 struct flow_cls_offload *f) 261 { 262 struct flow_match_ipv4_addrs match; 263 264 flow_rule_match_ipv4_addrs(f->rule, &match); 265 266 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 267 (char *) &match.key->src, 268 (char *) &match.mask->src, 4); 269 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 270 (char *) &match.key->dst, 271 (char *) &match.mask->dst, 4); 272 } 273 274 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 275 struct flow_cls_offload *f) 276 { 277 struct flow_match_ipv6_addrs match; 278 279 flow_rule_match_ipv6_addrs(f->rule, &match); 280 281 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 282 &match.key->src.s6_addr[0x0], 283 &match.mask->src.s6_addr[0x0], 4); 284 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 285 &match.key->src.s6_addr[0x4], 286 &match.mask->src.s6_addr[0x4], 4); 287 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 288 &match.key->src.s6_addr[0x8], 289 &match.mask->src.s6_addr[0x8], 4); 290 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 291 &match.key->src.s6_addr[0xC], 292 &match.mask->src.s6_addr[0xC], 4); 293 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 294 &match.key->dst.s6_addr[0x0], 295 &match.mask->dst.s6_addr[0x0], 4); 296 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 297 &match.key->dst.s6_addr[0x4], 298 &match.mask->dst.s6_addr[0x4], 4); 299 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 300 &match.key->dst.s6_addr[0x8], 301 &match.mask->dst.s6_addr[0x8], 4); 302 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 303 &match.key->dst.s6_addr[0xC], 304 &match.mask->dst.s6_addr[0xC], 4); 305 } 306 307 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 308 struct mlxsw_sp_acl_rule_info *rulei, 309 struct flow_cls_offload *f, 310 u8 ip_proto) 311 { 312 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 313 struct flow_match_ports match; 314 315 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) 316 return 0; 317 318 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 319 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 320 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 321 return -EINVAL; 322 } 323 324 flow_rule_match_ports(rule, &match); 325 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 326 ntohs(match.key->dst), 327 ntohs(match.mask->dst)); 328 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 329 ntohs(match.key->src), 330 ntohs(match.mask->src)); 331 return 0; 332 } 333 334 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 335 struct mlxsw_sp_acl_rule_info *rulei, 336 struct flow_cls_offload *f, 337 u8 ip_proto) 338 { 339 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 340 struct flow_match_tcp match; 341 342 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) 343 return 0; 344 345 if (ip_proto != IPPROTO_TCP) { 346 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 347 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 348 return -EINVAL; 349 } 350 351 flow_rule_match_tcp(rule, &match); 352 353 if (match.mask->flags & htons(0x0E00)) { 354 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); 355 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); 356 return -EINVAL; 357 } 358 359 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 360 ntohs(match.key->flags), 361 ntohs(match.mask->flags)); 362 return 0; 363 } 364 365 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 366 struct mlxsw_sp_acl_rule_info *rulei, 367 struct flow_cls_offload *f, 368 u16 n_proto) 369 { 370 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 371 struct flow_match_ip match; 372 373 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) 374 return 0; 375 376 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 377 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 378 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 379 return -EINVAL; 380 } 381 382 flow_rule_match_ip(rule, &match); 383 384 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 385 match.key->ttl, match.mask->ttl); 386 387 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 388 match.key->tos & 0x3, 389 match.mask->tos & 0x3); 390 391 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 392 match.key->tos >> 2, 393 match.mask->tos >> 2); 394 395 return 0; 396 } 397 398 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 399 struct mlxsw_sp_flow_block *block, 400 struct mlxsw_sp_acl_rule_info *rulei, 401 struct flow_cls_offload *f) 402 { 403 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 404 struct flow_dissector *dissector = rule->match.dissector; 405 u16 n_proto_mask = 0; 406 u16 n_proto_key = 0; 407 u16 addr_type = 0; 408 u8 ip_proto = 0; 409 int err; 410 411 if (dissector->used_keys & 412 ~(BIT(FLOW_DISSECTOR_KEY_META) | 413 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 414 BIT(FLOW_DISSECTOR_KEY_BASIC) | 415 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 416 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 417 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 418 BIT(FLOW_DISSECTOR_KEY_PORTS) | 419 BIT(FLOW_DISSECTOR_KEY_TCP) | 420 BIT(FLOW_DISSECTOR_KEY_IP) | 421 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 422 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 423 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 424 return -EOPNOTSUPP; 425 } 426 427 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 428 429 err = mlxsw_sp_flower_parse_meta(rulei, f, block); 430 if (err) 431 return err; 432 433 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 434 struct flow_match_control match; 435 436 flow_rule_match_control(rule, &match); 437 addr_type = match.key->addr_type; 438 } 439 440 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 441 struct flow_match_basic match; 442 443 flow_rule_match_basic(rule, &match); 444 n_proto_key = ntohs(match.key->n_proto); 445 n_proto_mask = ntohs(match.mask->n_proto); 446 447 if (n_proto_key == ETH_P_ALL) { 448 n_proto_key = 0; 449 n_proto_mask = 0; 450 } 451 mlxsw_sp_acl_rulei_keymask_u32(rulei, 452 MLXSW_AFK_ELEMENT_ETHERTYPE, 453 n_proto_key, n_proto_mask); 454 455 ip_proto = match.key->ip_proto; 456 mlxsw_sp_acl_rulei_keymask_u32(rulei, 457 MLXSW_AFK_ELEMENT_IP_PROTO, 458 match.key->ip_proto, 459 match.mask->ip_proto); 460 } 461 462 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 463 struct flow_match_eth_addrs match; 464 465 flow_rule_match_eth_addrs(rule, &match); 466 mlxsw_sp_acl_rulei_keymask_buf(rulei, 467 MLXSW_AFK_ELEMENT_DMAC_32_47, 468 match.key->dst, 469 match.mask->dst, 2); 470 mlxsw_sp_acl_rulei_keymask_buf(rulei, 471 MLXSW_AFK_ELEMENT_DMAC_0_31, 472 match.key->dst + 2, 473 match.mask->dst + 2, 4); 474 mlxsw_sp_acl_rulei_keymask_buf(rulei, 475 MLXSW_AFK_ELEMENT_SMAC_32_47, 476 match.key->src, 477 match.mask->src, 2); 478 mlxsw_sp_acl_rulei_keymask_buf(rulei, 479 MLXSW_AFK_ELEMENT_SMAC_0_31, 480 match.key->src + 2, 481 match.mask->src + 2, 4); 482 } 483 484 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 485 struct flow_match_vlan match; 486 487 flow_rule_match_vlan(rule, &match); 488 if (mlxsw_sp_flow_block_is_egress_bound(block)) { 489 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); 490 return -EOPNOTSUPP; 491 } 492 493 /* Forbid block with this rulei to be bound 494 * to egress in future. 495 */ 496 rulei->egress_bind_blocker = 1; 497 498 if (match.mask->vlan_id != 0) 499 mlxsw_sp_acl_rulei_keymask_u32(rulei, 500 MLXSW_AFK_ELEMENT_VID, 501 match.key->vlan_id, 502 match.mask->vlan_id); 503 if (match.mask->vlan_priority != 0) 504 mlxsw_sp_acl_rulei_keymask_u32(rulei, 505 MLXSW_AFK_ELEMENT_PCP, 506 match.key->vlan_priority, 507 match.mask->vlan_priority); 508 } 509 510 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 511 mlxsw_sp_flower_parse_ipv4(rulei, f); 512 513 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 514 mlxsw_sp_flower_parse_ipv6(rulei, f); 515 516 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 517 if (err) 518 return err; 519 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 520 if (err) 521 return err; 522 523 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 524 if (err) 525 return err; 526 527 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, 528 &f->rule->action, 529 f->common.extack); 530 } 531 532 static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block, 533 struct flow_cls_offload *f) 534 { 535 bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 536 unsigned int mall_min_prio; 537 unsigned int mall_max_prio; 538 int err; 539 540 err = mlxsw_sp_mall_prio_get(block, f->common.chain_index, 541 &mall_min_prio, &mall_max_prio); 542 if (err) { 543 if (err == -ENOENT) 544 /* No matchall filters installed on this chain. */ 545 return 0; 546 NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities"); 547 return err; 548 } 549 if (ingress && f->common.prio <= mall_min_prio) { 550 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules"); 551 return -EOPNOTSUPP; 552 } 553 if (!ingress && f->common.prio >= mall_max_prio) { 554 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules"); 555 return -EOPNOTSUPP; 556 } 557 return 0; 558 } 559 560 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 561 struct mlxsw_sp_flow_block *block, 562 struct flow_cls_offload *f) 563 { 564 struct mlxsw_sp_acl_rule_info *rulei; 565 struct mlxsw_sp_acl_ruleset *ruleset; 566 struct mlxsw_sp_acl_rule *rule; 567 int err; 568 569 err = mlxsw_sp_flower_mall_prio_check(block, f); 570 if (err) 571 return err; 572 573 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 574 f->common.chain_index, 575 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 576 if (IS_ERR(ruleset)) 577 return PTR_ERR(ruleset); 578 579 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, 580 f->common.extack); 581 if (IS_ERR(rule)) { 582 err = PTR_ERR(rule); 583 goto err_rule_create; 584 } 585 586 rulei = mlxsw_sp_acl_rule_rulei(rule); 587 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 588 if (err) 589 goto err_flower_parse; 590 591 err = mlxsw_sp_acl_rulei_commit(rulei); 592 if (err) 593 goto err_rulei_commit; 594 595 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 596 if (err) 597 goto err_rule_add; 598 599 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 600 return 0; 601 602 err_rule_add: 603 err_rulei_commit: 604 err_flower_parse: 605 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 606 err_rule_create: 607 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 608 return err; 609 } 610 611 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 612 struct mlxsw_sp_flow_block *block, 613 struct flow_cls_offload *f) 614 { 615 struct mlxsw_sp_acl_ruleset *ruleset; 616 struct mlxsw_sp_acl_rule *rule; 617 618 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 619 f->common.chain_index, 620 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 621 if (IS_ERR(ruleset)) 622 return; 623 624 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 625 if (rule) { 626 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 627 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 628 } 629 630 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 631 } 632 633 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 634 struct mlxsw_sp_flow_block *block, 635 struct flow_cls_offload *f) 636 { 637 enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED; 638 struct mlxsw_sp_acl_ruleset *ruleset; 639 struct mlxsw_sp_acl_rule *rule; 640 u64 packets; 641 u64 lastuse; 642 u64 bytes; 643 u64 drops; 644 int err; 645 646 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 647 f->common.chain_index, 648 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 649 if (WARN_ON(IS_ERR(ruleset))) 650 return -EINVAL; 651 652 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 653 if (!rule) 654 return -EINVAL; 655 656 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 657 &drops, &lastuse, &used_hw_stats); 658 if (err) 659 goto err_rule_get_stats; 660 661 flow_stats_update(&f->stats, bytes, packets, drops, lastuse, 662 used_hw_stats); 663 664 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 665 return 0; 666 667 err_rule_get_stats: 668 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 669 return err; 670 } 671 672 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 673 struct mlxsw_sp_flow_block *block, 674 struct flow_cls_offload *f) 675 { 676 struct mlxsw_sp_acl_ruleset *ruleset; 677 struct mlxsw_sp_acl_rule_info rulei; 678 int err; 679 680 memset(&rulei, 0, sizeof(rulei)); 681 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 682 if (err) 683 return err; 684 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 685 f->common.chain_index, 686 MLXSW_SP_ACL_PROFILE_FLOWER, 687 &rulei.values.elusage); 688 689 /* keep the reference to the ruleset */ 690 return PTR_ERR_OR_ZERO(ruleset); 691 } 692 693 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 694 struct mlxsw_sp_flow_block *block, 695 struct flow_cls_offload *f) 696 { 697 struct mlxsw_sp_acl_ruleset *ruleset; 698 699 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 700 f->common.chain_index, 701 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 702 if (IS_ERR(ruleset)) 703 return; 704 /* put the reference to the ruleset kept in create */ 705 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 706 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 707 } 708 709 int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp, 710 struct mlxsw_sp_flow_block *block, 711 u32 chain_index, unsigned int *p_min_prio, 712 unsigned int *p_max_prio) 713 { 714 struct mlxsw_sp_acl_ruleset *ruleset; 715 716 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 717 chain_index, 718 MLXSW_SP_ACL_PROFILE_FLOWER); 719 if (IS_ERR(ruleset)) 720 /* In case there are no flower rules, the caller 721 * receives -ENOENT to indicate there is no need 722 * to check the priorities. 723 */ 724 return PTR_ERR(ruleset); 725 mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio); 726 return 0; 727 } 728