1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <linux/log2.h> 8 #include <net/net_namespace.h> 9 #include <net/flow_dissector.h> 10 #include <net/pkt_cls.h> 11 #include <net/tc_act/tc_gact.h> 12 #include <net/tc_act/tc_mirred.h> 13 #include <net/tc_act/tc_vlan.h> 14 15 #include "spectrum.h" 16 #include "core_acl_flex_keys.h" 17 18 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 19 struct mlxsw_sp_flow_block *block, 20 struct mlxsw_sp_acl_rule_info *rulei, 21 struct flow_action *flow_action, 22 struct netlink_ext_ack *extack) 23 { 24 const struct flow_action_entry *act; 25 int mirror_act_count = 0; 26 int police_act_count = 0; 27 int sample_act_count = 0; 28 int err, i; 29 30 if (!flow_action_has_entries(flow_action)) 31 return 0; 32 if (!flow_action_mixed_hw_stats_check(flow_action, extack)) 33 return -EOPNOTSUPP; 34 35 act = flow_action_first_entry_get(flow_action); 36 if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { 37 /* Nothing to do */ 38 } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) { 39 /* Count action is inserted first */ 40 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 41 if (err) 42 return err; 43 } else { 44 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); 45 return -EOPNOTSUPP; 46 } 47 48 flow_action_for_each(i, act, flow_action) { 49 switch (act->id) { 50 case FLOW_ACTION_ACCEPT: 51 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 52 if (err) { 53 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 54 return err; 55 } 56 break; 57 case FLOW_ACTION_DROP: { 58 bool ingress; 59 60 if (mlxsw_sp_flow_block_is_mixed_bound(block)) { 61 NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress"); 62 return -EOPNOTSUPP; 63 } 64 ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 65 err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress, 66 act->cookie, extack); 67 if (err) { 68 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 69 return err; 70 } 71 72 /* Forbid block with this rulei to be bound 73 * to ingress/egress in future. Ingress rule is 74 * a blocker for egress and vice versa. 75 */ 76 if (ingress) 77 rulei->egress_bind_blocker = 1; 78 else 79 rulei->ingress_bind_blocker = 1; 80 } 81 break; 82 case FLOW_ACTION_TRAP: 83 err = mlxsw_sp_acl_rulei_act_trap(rulei); 84 if (err) { 85 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 86 return err; 87 } 88 break; 89 case FLOW_ACTION_GOTO: { 90 u32 chain_index = act->chain_index; 91 struct mlxsw_sp_acl_ruleset *ruleset; 92 u16 group_id; 93 94 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 95 chain_index, 96 MLXSW_SP_ACL_PROFILE_FLOWER); 97 if (IS_ERR(ruleset)) 98 return PTR_ERR(ruleset); 99 100 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 101 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 102 if (err) { 103 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 104 return err; 105 } 106 } 107 break; 108 case FLOW_ACTION_REDIRECT: { 109 struct net_device *out_dev; 110 struct mlxsw_sp_fid *fid; 111 u16 fid_index; 112 113 if (mlxsw_sp_flow_block_is_egress_bound(block)) { 114 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); 115 return -EOPNOTSUPP; 116 } 117 118 /* Forbid block with this rulei to be bound 119 * to egress in future. 120 */ 121 rulei->egress_bind_blocker = 1; 122 123 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 124 fid_index = mlxsw_sp_fid_index(fid); 125 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 126 fid_index, extack); 127 if (err) 128 return err; 129 130 out_dev = act->dev; 131 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 132 out_dev, extack); 133 if (err) 134 return err; 135 } 136 break; 137 case FLOW_ACTION_MIRRED: { 138 struct net_device *out_dev = act->dev; 139 140 if (mirror_act_count++) { 141 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported"); 142 return -EOPNOTSUPP; 143 } 144 145 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 146 block, out_dev, 147 extack); 148 if (err) 149 return err; 150 } 151 break; 152 case FLOW_ACTION_VLAN_MANGLE: { 153 u16 proto = be16_to_cpu(act->vlan.proto); 154 u8 prio = act->vlan.prio; 155 u16 vid = act->vlan.vid; 156 157 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 158 act->id, vid, 159 proto, prio, extack); 160 if (err) 161 return err; 162 break; 163 } 164 case FLOW_ACTION_PRIORITY: 165 err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, 166 act->priority, 167 extack); 168 if (err) 169 return err; 170 break; 171 case FLOW_ACTION_MANGLE: { 172 enum flow_action_mangle_base htype = act->mangle.htype; 173 __be32 be_mask = (__force __be32) act->mangle.mask; 174 __be32 be_val = (__force __be32) act->mangle.val; 175 u32 offset = act->mangle.offset; 176 u32 mask = be32_to_cpu(be_mask); 177 u32 val = be32_to_cpu(be_val); 178 179 err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei, 180 htype, offset, 181 mask, val, extack); 182 if (err) 183 return err; 184 break; 185 } 186 case FLOW_ACTION_POLICE: { 187 u32 burst; 188 189 if (police_act_count++) { 190 NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported"); 191 return -EOPNOTSUPP; 192 } 193 194 if (act->police.rate_pkt_ps) { 195 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 196 return -EOPNOTSUPP; 197 } 198 199 /* The kernel might adjust the requested burst size so 200 * that it is not exactly a power of two. Re-adjust it 201 * here since the hardware only supports burst sizes 202 * that are a power of two. 203 */ 204 burst = roundup_pow_of_two(act->police.burst); 205 err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei, 206 act->police.index, 207 act->police.rate_bytes_ps, 208 burst, extack); 209 if (err) 210 return err; 211 break; 212 } 213 case FLOW_ACTION_SAMPLE: { 214 if (sample_act_count++) { 215 NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported"); 216 return -EOPNOTSUPP; 217 } 218 219 err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei, 220 block, 221 act->sample.psample_group, 222 act->sample.rate, 223 act->sample.trunc_size, 224 act->sample.truncate, 225 extack); 226 if (err) 227 return err; 228 break; 229 } 230 default: 231 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 232 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 233 return -EOPNOTSUPP; 234 } 235 } 236 return 0; 237 } 238 239 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, 240 struct flow_cls_offload *f, 241 struct mlxsw_sp_flow_block *block) 242 { 243 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 244 struct mlxsw_sp_port *mlxsw_sp_port; 245 struct net_device *ingress_dev; 246 struct flow_match_meta match; 247 248 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 249 return 0; 250 251 flow_rule_match_meta(rule, &match); 252 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 253 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); 254 return -EINVAL; 255 } 256 257 ingress_dev = __dev_get_by_index(block->net, 258 match.key->ingress_ifindex); 259 if (!ingress_dev) { 260 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); 261 return -EINVAL; 262 } 263 264 if (!mlxsw_sp_port_dev_check(ingress_dev)) { 265 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port"); 266 return -EINVAL; 267 } 268 269 mlxsw_sp_port = netdev_priv(ingress_dev); 270 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { 271 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device"); 272 return -EINVAL; 273 } 274 275 mlxsw_sp_acl_rulei_keymask_u32(rulei, 276 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 277 mlxsw_sp_port->local_port, 278 0xFFFFFFFF); 279 return 0; 280 } 281 282 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 283 struct flow_cls_offload *f) 284 { 285 struct flow_match_ipv4_addrs match; 286 287 flow_rule_match_ipv4_addrs(f->rule, &match); 288 289 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 290 (char *) &match.key->src, 291 (char *) &match.mask->src, 4); 292 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 293 (char *) &match.key->dst, 294 (char *) &match.mask->dst, 4); 295 } 296 297 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 298 struct flow_cls_offload *f) 299 { 300 struct flow_match_ipv6_addrs match; 301 302 flow_rule_match_ipv6_addrs(f->rule, &match); 303 304 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 305 &match.key->src.s6_addr[0x0], 306 &match.mask->src.s6_addr[0x0], 4); 307 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 308 &match.key->src.s6_addr[0x4], 309 &match.mask->src.s6_addr[0x4], 4); 310 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 311 &match.key->src.s6_addr[0x8], 312 &match.mask->src.s6_addr[0x8], 4); 313 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 314 &match.key->src.s6_addr[0xC], 315 &match.mask->src.s6_addr[0xC], 4); 316 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 317 &match.key->dst.s6_addr[0x0], 318 &match.mask->dst.s6_addr[0x0], 4); 319 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 320 &match.key->dst.s6_addr[0x4], 321 &match.mask->dst.s6_addr[0x4], 4); 322 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 323 &match.key->dst.s6_addr[0x8], 324 &match.mask->dst.s6_addr[0x8], 4); 325 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 326 &match.key->dst.s6_addr[0xC], 327 &match.mask->dst.s6_addr[0xC], 4); 328 } 329 330 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 331 struct mlxsw_sp_acl_rule_info *rulei, 332 struct flow_cls_offload *f, 333 u8 ip_proto) 334 { 335 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 336 struct flow_match_ports match; 337 338 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) 339 return 0; 340 341 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 342 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 343 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 344 return -EINVAL; 345 } 346 347 flow_rule_match_ports(rule, &match); 348 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 349 ntohs(match.key->dst), 350 ntohs(match.mask->dst)); 351 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 352 ntohs(match.key->src), 353 ntohs(match.mask->src)); 354 return 0; 355 } 356 357 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 358 struct mlxsw_sp_acl_rule_info *rulei, 359 struct flow_cls_offload *f, 360 u8 ip_proto) 361 { 362 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 363 struct flow_match_tcp match; 364 365 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) 366 return 0; 367 368 if (ip_proto != IPPROTO_TCP) { 369 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 370 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 371 return -EINVAL; 372 } 373 374 flow_rule_match_tcp(rule, &match); 375 376 if (match.mask->flags & htons(0x0E00)) { 377 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); 378 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); 379 return -EINVAL; 380 } 381 382 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 383 ntohs(match.key->flags), 384 ntohs(match.mask->flags)); 385 return 0; 386 } 387 388 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 389 struct mlxsw_sp_acl_rule_info *rulei, 390 struct flow_cls_offload *f, 391 u16 n_proto) 392 { 393 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 394 struct flow_match_ip match; 395 396 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) 397 return 0; 398 399 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 400 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 401 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 402 return -EINVAL; 403 } 404 405 flow_rule_match_ip(rule, &match); 406 407 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 408 match.key->ttl, match.mask->ttl); 409 410 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 411 match.key->tos & 0x3, 412 match.mask->tos & 0x3); 413 414 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 415 match.key->tos >> 2, 416 match.mask->tos >> 2); 417 418 return 0; 419 } 420 421 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 422 struct mlxsw_sp_flow_block *block, 423 struct mlxsw_sp_acl_rule_info *rulei, 424 struct flow_cls_offload *f) 425 { 426 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 427 struct flow_dissector *dissector = rule->match.dissector; 428 u16 n_proto_mask = 0; 429 u16 n_proto_key = 0; 430 u16 addr_type = 0; 431 u8 ip_proto = 0; 432 int err; 433 434 if (dissector->used_keys & 435 ~(BIT(FLOW_DISSECTOR_KEY_META) | 436 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 437 BIT(FLOW_DISSECTOR_KEY_BASIC) | 438 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 439 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 440 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 441 BIT(FLOW_DISSECTOR_KEY_PORTS) | 442 BIT(FLOW_DISSECTOR_KEY_TCP) | 443 BIT(FLOW_DISSECTOR_KEY_IP) | 444 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 445 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 446 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 447 return -EOPNOTSUPP; 448 } 449 450 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 451 452 err = mlxsw_sp_flower_parse_meta(rulei, f, block); 453 if (err) 454 return err; 455 456 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 457 struct flow_match_control match; 458 459 flow_rule_match_control(rule, &match); 460 addr_type = match.key->addr_type; 461 } 462 463 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 464 struct flow_match_basic match; 465 466 flow_rule_match_basic(rule, &match); 467 n_proto_key = ntohs(match.key->n_proto); 468 n_proto_mask = ntohs(match.mask->n_proto); 469 470 if (n_proto_key == ETH_P_ALL) { 471 n_proto_key = 0; 472 n_proto_mask = 0; 473 } 474 mlxsw_sp_acl_rulei_keymask_u32(rulei, 475 MLXSW_AFK_ELEMENT_ETHERTYPE, 476 n_proto_key, n_proto_mask); 477 478 ip_proto = match.key->ip_proto; 479 mlxsw_sp_acl_rulei_keymask_u32(rulei, 480 MLXSW_AFK_ELEMENT_IP_PROTO, 481 match.key->ip_proto, 482 match.mask->ip_proto); 483 } 484 485 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 486 struct flow_match_eth_addrs match; 487 488 flow_rule_match_eth_addrs(rule, &match); 489 mlxsw_sp_acl_rulei_keymask_buf(rulei, 490 MLXSW_AFK_ELEMENT_DMAC_32_47, 491 match.key->dst, 492 match.mask->dst, 2); 493 mlxsw_sp_acl_rulei_keymask_buf(rulei, 494 MLXSW_AFK_ELEMENT_DMAC_0_31, 495 match.key->dst + 2, 496 match.mask->dst + 2, 4); 497 mlxsw_sp_acl_rulei_keymask_buf(rulei, 498 MLXSW_AFK_ELEMENT_SMAC_32_47, 499 match.key->src, 500 match.mask->src, 2); 501 mlxsw_sp_acl_rulei_keymask_buf(rulei, 502 MLXSW_AFK_ELEMENT_SMAC_0_31, 503 match.key->src + 2, 504 match.mask->src + 2, 4); 505 } 506 507 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 508 struct flow_match_vlan match; 509 510 flow_rule_match_vlan(rule, &match); 511 if (mlxsw_sp_flow_block_is_egress_bound(block)) { 512 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); 513 return -EOPNOTSUPP; 514 } 515 516 /* Forbid block with this rulei to be bound 517 * to egress in future. 518 */ 519 rulei->egress_bind_blocker = 1; 520 521 if (match.mask->vlan_id != 0) 522 mlxsw_sp_acl_rulei_keymask_u32(rulei, 523 MLXSW_AFK_ELEMENT_VID, 524 match.key->vlan_id, 525 match.mask->vlan_id); 526 if (match.mask->vlan_priority != 0) 527 mlxsw_sp_acl_rulei_keymask_u32(rulei, 528 MLXSW_AFK_ELEMENT_PCP, 529 match.key->vlan_priority, 530 match.mask->vlan_priority); 531 } 532 533 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 534 mlxsw_sp_flower_parse_ipv4(rulei, f); 535 536 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 537 mlxsw_sp_flower_parse_ipv6(rulei, f); 538 539 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 540 if (err) 541 return err; 542 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 543 if (err) 544 return err; 545 546 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 547 if (err) 548 return err; 549 550 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, 551 &f->rule->action, 552 f->common.extack); 553 } 554 555 static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block, 556 struct flow_cls_offload *f) 557 { 558 bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 559 unsigned int mall_min_prio; 560 unsigned int mall_max_prio; 561 int err; 562 563 err = mlxsw_sp_mall_prio_get(block, f->common.chain_index, 564 &mall_min_prio, &mall_max_prio); 565 if (err) { 566 if (err == -ENOENT) 567 /* No matchall filters installed on this chain. */ 568 return 0; 569 NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities"); 570 return err; 571 } 572 if (ingress && f->common.prio <= mall_min_prio) { 573 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules"); 574 return -EOPNOTSUPP; 575 } 576 if (!ingress && f->common.prio >= mall_max_prio) { 577 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules"); 578 return -EOPNOTSUPP; 579 } 580 return 0; 581 } 582 583 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 584 struct mlxsw_sp_flow_block *block, 585 struct flow_cls_offload *f) 586 { 587 struct mlxsw_sp_acl_rule_info *rulei; 588 struct mlxsw_sp_acl_ruleset *ruleset; 589 struct mlxsw_sp_acl_rule *rule; 590 int err; 591 592 err = mlxsw_sp_flower_mall_prio_check(block, f); 593 if (err) 594 return err; 595 596 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 597 f->common.chain_index, 598 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 599 if (IS_ERR(ruleset)) 600 return PTR_ERR(ruleset); 601 602 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, 603 f->common.extack); 604 if (IS_ERR(rule)) { 605 err = PTR_ERR(rule); 606 goto err_rule_create; 607 } 608 609 rulei = mlxsw_sp_acl_rule_rulei(rule); 610 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 611 if (err) 612 goto err_flower_parse; 613 614 err = mlxsw_sp_acl_rulei_commit(rulei); 615 if (err) 616 goto err_rulei_commit; 617 618 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 619 if (err) 620 goto err_rule_add; 621 622 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 623 return 0; 624 625 err_rule_add: 626 err_rulei_commit: 627 err_flower_parse: 628 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 629 err_rule_create: 630 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 631 return err; 632 } 633 634 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 635 struct mlxsw_sp_flow_block *block, 636 struct flow_cls_offload *f) 637 { 638 struct mlxsw_sp_acl_ruleset *ruleset; 639 struct mlxsw_sp_acl_rule *rule; 640 641 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 642 f->common.chain_index, 643 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 644 if (IS_ERR(ruleset)) 645 return; 646 647 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 648 if (rule) { 649 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 650 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 651 } 652 653 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 654 } 655 656 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 657 struct mlxsw_sp_flow_block *block, 658 struct flow_cls_offload *f) 659 { 660 enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED; 661 struct mlxsw_sp_acl_ruleset *ruleset; 662 struct mlxsw_sp_acl_rule *rule; 663 u64 packets; 664 u64 lastuse; 665 u64 bytes; 666 u64 drops; 667 int err; 668 669 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 670 f->common.chain_index, 671 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 672 if (WARN_ON(IS_ERR(ruleset))) 673 return -EINVAL; 674 675 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 676 if (!rule) 677 return -EINVAL; 678 679 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 680 &drops, &lastuse, &used_hw_stats); 681 if (err) 682 goto err_rule_get_stats; 683 684 flow_stats_update(&f->stats, bytes, packets, drops, lastuse, 685 used_hw_stats); 686 687 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 688 return 0; 689 690 err_rule_get_stats: 691 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 692 return err; 693 } 694 695 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 696 struct mlxsw_sp_flow_block *block, 697 struct flow_cls_offload *f) 698 { 699 struct mlxsw_sp_acl_ruleset *ruleset; 700 struct mlxsw_sp_acl_rule_info rulei; 701 int err; 702 703 memset(&rulei, 0, sizeof(rulei)); 704 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 705 if (err) 706 return err; 707 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 708 f->common.chain_index, 709 MLXSW_SP_ACL_PROFILE_FLOWER, 710 &rulei.values.elusage); 711 712 /* keep the reference to the ruleset */ 713 return PTR_ERR_OR_ZERO(ruleset); 714 } 715 716 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 717 struct mlxsw_sp_flow_block *block, 718 struct flow_cls_offload *f) 719 { 720 struct mlxsw_sp_acl_ruleset *ruleset; 721 722 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 723 f->common.chain_index, 724 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 725 if (IS_ERR(ruleset)) 726 return; 727 /* put the reference to the ruleset kept in create */ 728 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 729 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 730 } 731 732 int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp, 733 struct mlxsw_sp_flow_block *block, 734 u32 chain_index, unsigned int *p_min_prio, 735 unsigned int *p_max_prio) 736 { 737 struct mlxsw_sp_acl_ruleset *ruleset; 738 739 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 740 chain_index, 741 MLXSW_SP_ACL_PROFILE_FLOWER); 742 if (IS_ERR(ruleset)) 743 /* In case there are no flower rules, the caller 744 * receives -ENOENT to indicate there is no need 745 * to check the priorities. 746 */ 747 return PTR_ERR(ruleset); 748 mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio); 749 return 0; 750 } 751