1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */ 3 4 #include <linux/kernel.h> 5 #include <linux/errno.h> 6 #include <linux/netdevice.h> 7 #include <linux/log2.h> 8 #include <net/net_namespace.h> 9 #include <net/flow_dissector.h> 10 #include <net/pkt_cls.h> 11 #include <net/tc_act/tc_gact.h> 12 #include <net/tc_act/tc_mirred.h> 13 #include <net/tc_act/tc_vlan.h> 14 15 #include "spectrum.h" 16 #include "core_acl_flex_keys.h" 17 18 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, 19 struct mlxsw_sp_flow_block *block, 20 struct mlxsw_sp_acl_rule_info *rulei, 21 struct flow_action *flow_action, 22 struct netlink_ext_ack *extack) 23 { 24 const struct flow_action_entry *act; 25 int mirror_act_count = 0; 26 int police_act_count = 0; 27 int sample_act_count = 0; 28 int err, i; 29 30 if (!flow_action_has_entries(flow_action)) 31 return 0; 32 if (!flow_action_mixed_hw_stats_check(flow_action, extack)) 33 return -EOPNOTSUPP; 34 35 act = flow_action_first_entry_get(flow_action); 36 if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) { 37 /* Nothing to do */ 38 } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) { 39 /* Count action is inserted first */ 40 err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack); 41 if (err) 42 return err; 43 } else { 44 NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type"); 45 return -EOPNOTSUPP; 46 } 47 48 flow_action_for_each(i, act, flow_action) { 49 switch (act->id) { 50 case FLOW_ACTION_ACCEPT: 51 err = mlxsw_sp_acl_rulei_act_terminate(rulei); 52 if (err) { 53 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action"); 54 return err; 55 } 56 break; 57 case FLOW_ACTION_DROP: { 58 bool ingress; 59 60 if (mlxsw_sp_flow_block_is_mixed_bound(block)) { 61 NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress"); 62 return -EOPNOTSUPP; 63 } 64 ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 65 err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress, 66 act->cookie, extack); 67 if (err) { 68 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action"); 69 return err; 70 } 71 72 /* Forbid block with this rulei to be bound 73 * to ingress/egress in future. Ingress rule is 74 * a blocker for egress and vice versa. 75 */ 76 if (ingress) 77 rulei->egress_bind_blocker = 1; 78 else 79 rulei->ingress_bind_blocker = 1; 80 } 81 break; 82 case FLOW_ACTION_TRAP: 83 err = mlxsw_sp_acl_rulei_act_trap(rulei); 84 if (err) { 85 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action"); 86 return err; 87 } 88 break; 89 case FLOW_ACTION_GOTO: { 90 u32 chain_index = act->chain_index; 91 struct mlxsw_sp_acl_ruleset *ruleset; 92 u16 group_id; 93 94 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 95 chain_index, 96 MLXSW_SP_ACL_PROFILE_FLOWER); 97 if (IS_ERR(ruleset)) 98 return PTR_ERR(ruleset); 99 100 group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); 101 err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); 102 if (err) { 103 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action"); 104 return err; 105 } 106 } 107 break; 108 case FLOW_ACTION_REDIRECT: { 109 struct net_device *out_dev; 110 struct mlxsw_sp_fid *fid; 111 u16 fid_index; 112 113 if (mlxsw_sp_flow_block_is_egress_bound(block)) { 114 NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress"); 115 return -EOPNOTSUPP; 116 } 117 118 /* Forbid block with this rulei to be bound 119 * to egress in future. 120 */ 121 rulei->egress_bind_blocker = 1; 122 123 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp); 124 fid_index = mlxsw_sp_fid_index(fid); 125 err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei, 126 fid_index, extack); 127 if (err) 128 return err; 129 130 out_dev = act->dev; 131 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei, 132 out_dev, extack); 133 if (err) 134 return err; 135 } 136 break; 137 case FLOW_ACTION_MIRRED: { 138 struct net_device *out_dev = act->dev; 139 140 if (mirror_act_count++) { 141 NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported"); 142 return -EOPNOTSUPP; 143 } 144 145 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei, 146 block, out_dev, 147 extack); 148 if (err) 149 return err; 150 } 151 break; 152 case FLOW_ACTION_VLAN_MANGLE: { 153 u16 proto = be16_to_cpu(act->vlan.proto); 154 u8 prio = act->vlan.prio; 155 u16 vid = act->vlan.vid; 156 157 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, 158 act->id, vid, 159 proto, prio, extack); 160 if (err) 161 return err; 162 break; 163 } 164 case FLOW_ACTION_PRIORITY: 165 err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, 166 act->priority, 167 extack); 168 if (err) 169 return err; 170 break; 171 case FLOW_ACTION_MANGLE: { 172 enum flow_action_mangle_base htype = act->mangle.htype; 173 __be32 be_mask = (__force __be32) act->mangle.mask; 174 __be32 be_val = (__force __be32) act->mangle.val; 175 u32 offset = act->mangle.offset; 176 u32 mask = be32_to_cpu(be_mask); 177 u32 val = be32_to_cpu(be_val); 178 179 err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei, 180 htype, offset, 181 mask, val, extack); 182 if (err) 183 return err; 184 break; 185 } 186 case FLOW_ACTION_POLICE: { 187 u32 burst; 188 189 if (police_act_count++) { 190 NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported"); 191 return -EOPNOTSUPP; 192 } 193 194 if (act->police.rate_pkt_ps) { 195 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second"); 196 return -EOPNOTSUPP; 197 } 198 199 /* The kernel might adjust the requested burst size so 200 * that it is not exactly a power of two. Re-adjust it 201 * here since the hardware only supports burst sizes 202 * that are a power of two. 203 */ 204 burst = roundup_pow_of_two(act->police.burst); 205 err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei, 206 act->hw_index, 207 act->police.rate_bytes_ps, 208 burst, extack); 209 if (err) 210 return err; 211 break; 212 } 213 case FLOW_ACTION_SAMPLE: { 214 if (sample_act_count++) { 215 NL_SET_ERR_MSG_MOD(extack, "Multiple sample actions per rule are not supported"); 216 return -EOPNOTSUPP; 217 } 218 219 err = mlxsw_sp_acl_rulei_act_sample(mlxsw_sp, rulei, 220 block, 221 act->sample.psample_group, 222 act->sample.rate, 223 act->sample.trunc_size, 224 act->sample.truncate, 225 extack); 226 if (err) 227 return err; 228 break; 229 } 230 default: 231 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 232 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); 233 return -EOPNOTSUPP; 234 } 235 } 236 return 0; 237 } 238 239 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei, 240 struct flow_cls_offload *f, 241 struct mlxsw_sp_flow_block *block) 242 { 243 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 244 struct mlxsw_sp_port *mlxsw_sp_port; 245 struct net_device *ingress_dev; 246 struct flow_match_meta match; 247 248 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) 249 return 0; 250 251 flow_rule_match_meta(rule, &match); 252 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 253 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask"); 254 return -EINVAL; 255 } 256 257 ingress_dev = __dev_get_by_index(block->net, 258 match.key->ingress_ifindex); 259 if (!ingress_dev) { 260 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on"); 261 return -EINVAL; 262 } 263 264 if (!mlxsw_sp_port_dev_check(ingress_dev)) { 265 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port"); 266 return -EINVAL; 267 } 268 269 mlxsw_sp_port = netdev_priv(ingress_dev); 270 if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) { 271 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device"); 272 return -EINVAL; 273 } 274 275 mlxsw_sp_acl_rulei_keymask_u32(rulei, 276 MLXSW_AFK_ELEMENT_SRC_SYS_PORT, 277 mlxsw_sp_port->local_port, 278 0xFFFFFFFF); 279 return 0; 280 } 281 282 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei, 283 struct flow_cls_offload *f) 284 { 285 struct flow_match_ipv4_addrs match; 286 287 flow_rule_match_ipv4_addrs(f->rule, &match); 288 289 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 290 (char *) &match.key->src, 291 (char *) &match.mask->src, 4); 292 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 293 (char *) &match.key->dst, 294 (char *) &match.mask->dst, 4); 295 } 296 297 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei, 298 struct flow_cls_offload *f) 299 { 300 struct flow_match_ipv6_addrs match; 301 302 flow_rule_match_ipv6_addrs(f->rule, &match); 303 304 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, 305 &match.key->src.s6_addr[0x0], 306 &match.mask->src.s6_addr[0x0], 4); 307 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, 308 &match.key->src.s6_addr[0x4], 309 &match.mask->src.s6_addr[0x4], 4); 310 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, 311 &match.key->src.s6_addr[0x8], 312 &match.mask->src.s6_addr[0x8], 4); 313 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, 314 &match.key->src.s6_addr[0xC], 315 &match.mask->src.s6_addr[0xC], 4); 316 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, 317 &match.key->dst.s6_addr[0x0], 318 &match.mask->dst.s6_addr[0x0], 4); 319 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, 320 &match.key->dst.s6_addr[0x4], 321 &match.mask->dst.s6_addr[0x4], 4); 322 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, 323 &match.key->dst.s6_addr[0x8], 324 &match.mask->dst.s6_addr[0x8], 4); 325 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, 326 &match.key->dst.s6_addr[0xC], 327 &match.mask->dst.s6_addr[0xC], 4); 328 } 329 330 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp, 331 struct mlxsw_sp_acl_rule_info *rulei, 332 struct flow_cls_offload *f, 333 u8 ip_proto) 334 { 335 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 336 struct flow_match_ports match; 337 338 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) 339 return 0; 340 341 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { 342 NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported"); 343 dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n"); 344 return -EINVAL; 345 } 346 347 flow_rule_match_ports(rule, &match); 348 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT, 349 ntohs(match.key->dst), 350 ntohs(match.mask->dst)); 351 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT, 352 ntohs(match.key->src), 353 ntohs(match.mask->src)); 354 return 0; 355 } 356 357 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, 358 struct mlxsw_sp_acl_rule_info *rulei, 359 struct flow_cls_offload *f, 360 u8 ip_proto) 361 { 362 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 363 struct flow_match_tcp match; 364 365 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) 366 return 0; 367 368 if (ip_proto != IPPROTO_TCP) { 369 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP"); 370 dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n"); 371 return -EINVAL; 372 } 373 374 flow_rule_match_tcp(rule, &match); 375 376 if (match.mask->flags & htons(0x0E00)) { 377 NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits"); 378 dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n"); 379 return -EINVAL; 380 } 381 382 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS, 383 ntohs(match.key->flags), 384 ntohs(match.mask->flags)); 385 return 0; 386 } 387 388 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, 389 struct mlxsw_sp_acl_rule_info *rulei, 390 struct flow_cls_offload *f, 391 u16 n_proto) 392 { 393 const struct flow_rule *rule = flow_cls_offload_flow_rule(f); 394 struct flow_match_ip match; 395 396 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) 397 return 0; 398 399 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { 400 NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6"); 401 dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); 402 return -EINVAL; 403 } 404 405 flow_rule_match_ip(rule, &match); 406 407 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, 408 match.key->ttl, match.mask->ttl); 409 410 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, 411 match.key->tos & 0x3, 412 match.mask->tos & 0x3); 413 414 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, 415 match.key->tos >> 2, 416 match.mask->tos >> 2); 417 418 return 0; 419 } 420 421 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, 422 struct mlxsw_sp_flow_block *block, 423 struct mlxsw_sp_acl_rule_info *rulei, 424 struct flow_cls_offload *f) 425 { 426 struct flow_rule *rule = flow_cls_offload_flow_rule(f); 427 struct flow_dissector *dissector = rule->match.dissector; 428 u16 n_proto_mask = 0; 429 u16 n_proto_key = 0; 430 u16 addr_type = 0; 431 u8 ip_proto = 0; 432 int err; 433 434 if (dissector->used_keys & 435 ~(BIT(FLOW_DISSECTOR_KEY_META) | 436 BIT(FLOW_DISSECTOR_KEY_CONTROL) | 437 BIT(FLOW_DISSECTOR_KEY_BASIC) | 438 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 439 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 440 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 441 BIT(FLOW_DISSECTOR_KEY_PORTS) | 442 BIT(FLOW_DISSECTOR_KEY_TCP) | 443 BIT(FLOW_DISSECTOR_KEY_IP) | 444 BIT(FLOW_DISSECTOR_KEY_VLAN))) { 445 dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); 446 NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key"); 447 return -EOPNOTSUPP; 448 } 449 450 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); 451 452 err = mlxsw_sp_flower_parse_meta(rulei, f, block); 453 if (err) 454 return err; 455 456 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 457 struct flow_match_control match; 458 459 flow_rule_match_control(rule, &match); 460 addr_type = match.key->addr_type; 461 } 462 463 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 464 struct flow_match_basic match; 465 466 flow_rule_match_basic(rule, &match); 467 n_proto_key = ntohs(match.key->n_proto); 468 n_proto_mask = ntohs(match.mask->n_proto); 469 470 if (n_proto_key == ETH_P_ALL) { 471 n_proto_key = 0; 472 n_proto_mask = 0; 473 } 474 mlxsw_sp_acl_rulei_keymask_u32(rulei, 475 MLXSW_AFK_ELEMENT_ETHERTYPE, 476 n_proto_key, n_proto_mask); 477 478 ip_proto = match.key->ip_proto; 479 mlxsw_sp_acl_rulei_keymask_u32(rulei, 480 MLXSW_AFK_ELEMENT_IP_PROTO, 481 match.key->ip_proto, 482 match.mask->ip_proto); 483 } 484 485 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 486 struct flow_match_eth_addrs match; 487 488 flow_rule_match_eth_addrs(rule, &match); 489 mlxsw_sp_acl_rulei_keymask_buf(rulei, 490 MLXSW_AFK_ELEMENT_DMAC_32_47, 491 match.key->dst, 492 match.mask->dst, 2); 493 mlxsw_sp_acl_rulei_keymask_buf(rulei, 494 MLXSW_AFK_ELEMENT_DMAC_0_31, 495 match.key->dst + 2, 496 match.mask->dst + 2, 4); 497 mlxsw_sp_acl_rulei_keymask_buf(rulei, 498 MLXSW_AFK_ELEMENT_SMAC_32_47, 499 match.key->src, 500 match.mask->src, 2); 501 mlxsw_sp_acl_rulei_keymask_buf(rulei, 502 MLXSW_AFK_ELEMENT_SMAC_0_31, 503 match.key->src + 2, 504 match.mask->src + 2, 4); 505 } 506 507 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 508 struct flow_match_vlan match; 509 510 flow_rule_match_vlan(rule, &match); 511 if (mlxsw_sp_flow_block_is_egress_bound(block) && 512 match.mask->vlan_id) { 513 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress"); 514 return -EOPNOTSUPP; 515 } 516 517 /* Forbid block with this rulei to be bound 518 * to egress in future. 519 */ 520 rulei->egress_bind_blocker = 1; 521 522 if (match.mask->vlan_id != 0) 523 mlxsw_sp_acl_rulei_keymask_u32(rulei, 524 MLXSW_AFK_ELEMENT_VID, 525 match.key->vlan_id, 526 match.mask->vlan_id); 527 if (match.mask->vlan_priority != 0) 528 mlxsw_sp_acl_rulei_keymask_u32(rulei, 529 MLXSW_AFK_ELEMENT_PCP, 530 match.key->vlan_priority, 531 match.mask->vlan_priority); 532 } 533 534 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) 535 mlxsw_sp_flower_parse_ipv4(rulei, f); 536 537 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) 538 mlxsw_sp_flower_parse_ipv6(rulei, f); 539 540 err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto); 541 if (err) 542 return err; 543 err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto); 544 if (err) 545 return err; 546 547 err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); 548 if (err) 549 return err; 550 551 return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, 552 &f->rule->action, 553 f->common.extack); 554 } 555 556 static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block, 557 struct flow_cls_offload *f) 558 { 559 bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block); 560 unsigned int mall_min_prio; 561 unsigned int mall_max_prio; 562 int err; 563 564 err = mlxsw_sp_mall_prio_get(block, f->common.chain_index, 565 &mall_min_prio, &mall_max_prio); 566 if (err) { 567 if (err == -ENOENT) 568 /* No matchall filters installed on this chain. */ 569 return 0; 570 NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities"); 571 return err; 572 } 573 if (ingress && f->common.prio <= mall_min_prio) { 574 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules"); 575 return -EOPNOTSUPP; 576 } 577 if (!ingress && f->common.prio >= mall_max_prio) { 578 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules"); 579 return -EOPNOTSUPP; 580 } 581 return 0; 582 } 583 584 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, 585 struct mlxsw_sp_flow_block *block, 586 struct flow_cls_offload *f) 587 { 588 struct mlxsw_sp_acl_rule_info *rulei; 589 struct mlxsw_sp_acl_ruleset *ruleset; 590 struct mlxsw_sp_acl_rule *rule; 591 int err; 592 593 err = mlxsw_sp_flower_mall_prio_check(block, f); 594 if (err) 595 return err; 596 597 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 598 f->common.chain_index, 599 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 600 if (IS_ERR(ruleset)) 601 return PTR_ERR(ruleset); 602 603 rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, 604 f->common.extack); 605 if (IS_ERR(rule)) { 606 err = PTR_ERR(rule); 607 goto err_rule_create; 608 } 609 610 rulei = mlxsw_sp_acl_rule_rulei(rule); 611 err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f); 612 if (err) 613 goto err_flower_parse; 614 615 err = mlxsw_sp_acl_rulei_commit(rulei); 616 if (err) 617 goto err_rulei_commit; 618 619 err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); 620 if (err) 621 goto err_rule_add; 622 623 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 624 return 0; 625 626 err_rule_add: 627 err_rulei_commit: 628 err_flower_parse: 629 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 630 err_rule_create: 631 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 632 return err; 633 } 634 635 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp, 636 struct mlxsw_sp_flow_block *block, 637 struct flow_cls_offload *f) 638 { 639 struct mlxsw_sp_acl_ruleset *ruleset; 640 struct mlxsw_sp_acl_rule *rule; 641 642 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 643 f->common.chain_index, 644 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 645 if (IS_ERR(ruleset)) 646 return; 647 648 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 649 if (rule) { 650 mlxsw_sp_acl_rule_del(mlxsw_sp, rule); 651 mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); 652 } 653 654 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 655 } 656 657 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, 658 struct mlxsw_sp_flow_block *block, 659 struct flow_cls_offload *f) 660 { 661 enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED; 662 struct mlxsw_sp_acl_ruleset *ruleset; 663 struct mlxsw_sp_acl_rule *rule; 664 u64 packets; 665 u64 lastuse; 666 u64 bytes; 667 u64 drops; 668 int err; 669 670 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 671 f->common.chain_index, 672 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 673 if (WARN_ON(IS_ERR(ruleset))) 674 return -EINVAL; 675 676 rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie); 677 if (!rule) 678 return -EINVAL; 679 680 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, 681 &drops, &lastuse, &used_hw_stats); 682 if (err) 683 goto err_rule_get_stats; 684 685 flow_stats_update(&f->stats, bytes, packets, drops, lastuse, 686 used_hw_stats); 687 688 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 689 return 0; 690 691 err_rule_get_stats: 692 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 693 return err; 694 } 695 696 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp, 697 struct mlxsw_sp_flow_block *block, 698 struct flow_cls_offload *f) 699 { 700 struct mlxsw_sp_acl_ruleset *ruleset; 701 struct mlxsw_sp_acl_rule_info rulei; 702 int err; 703 704 memset(&rulei, 0, sizeof(rulei)); 705 err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f); 706 if (err) 707 return err; 708 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 709 f->common.chain_index, 710 MLXSW_SP_ACL_PROFILE_FLOWER, 711 &rulei.values.elusage); 712 713 /* keep the reference to the ruleset */ 714 return PTR_ERR_OR_ZERO(ruleset); 715 } 716 717 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp, 718 struct mlxsw_sp_flow_block *block, 719 struct flow_cls_offload *f) 720 { 721 struct mlxsw_sp_acl_ruleset *ruleset; 722 723 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, 724 f->common.chain_index, 725 MLXSW_SP_ACL_PROFILE_FLOWER, NULL); 726 if (IS_ERR(ruleset)) 727 return; 728 /* put the reference to the ruleset kept in create */ 729 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 730 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); 731 } 732 733 int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp, 734 struct mlxsw_sp_flow_block *block, 735 u32 chain_index, unsigned int *p_min_prio, 736 unsigned int *p_max_prio) 737 { 738 struct mlxsw_sp_acl_ruleset *ruleset; 739 740 ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block, 741 chain_index, 742 MLXSW_SP_ACL_PROFILE_FLOWER); 743 if (IS_ERR(ruleset)) 744 /* In case there are no flower rules, the caller 745 * receives -ENOENT to indicate there is no need 746 * to check the priorities. 747 */ 748 return PTR_ERR(ruleset); 749 mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio); 750 return 0; 751 } 752