1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2019 Solarflare Communications Inc. 5 * Copyright 2020-2022 Xilinx Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation, incorporated herein by reference. 10 */ 11 12 #include <net/pkt_cls.h> 13 #include "tc.h" 14 #include "tc_bindings.h" 15 #include "mae.h" 16 #include "ef100_rep.h" 17 #include "efx.h" 18 19 #define EFX_EFV_PF NULL 20 /* Look up the representor information (efv) for a device. 21 * May return NULL for the PF (us), or an error pointer for a device that 22 * isn't supported as a TC offload endpoint 23 */ 24 static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx, 25 struct net_device *dev) 26 { 27 struct efx_rep *efv; 28 29 if (!dev) 30 return ERR_PTR(-EOPNOTSUPP); 31 /* Is it us (the PF)? */ 32 if (dev == efx->net_dev) 33 return EFX_EFV_PF; 34 /* Is it an efx vfrep at all? */ 35 if (dev->netdev_ops != &efx_ef100_rep_netdev_ops) 36 return ERR_PTR(-EOPNOTSUPP); 37 /* Is it ours? We don't support TC rules that include another 38 * EF100's netdevices (not even on another port of the same NIC). 39 */ 40 efv = netdev_priv(dev); 41 if (efv->parent != efx) 42 return ERR_PTR(-EOPNOTSUPP); 43 return efv; 44 } 45 46 /* Convert a driver-internal vport ID into an external device (wire or VF) */ 47 static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv) 48 { 49 u32 mport; 50 51 if (IS_ERR(efv)) 52 return PTR_ERR(efv); 53 if (!efv) /* device is PF (us) */ 54 efx_mae_mport_wire(efx, &mport); 55 else /* device is repr */ 56 efx_mae_mport_mport(efx, efv->mport, &mport); 57 return mport; 58 } 59 60 static const struct rhashtable_params efx_tc_match_action_ht_params = { 61 .key_len = sizeof(unsigned long), 62 .key_offset = offsetof(struct efx_tc_flow_rule, cookie), 63 .head_offset = offsetof(struct efx_tc_flow_rule, linkage), 64 }; 65 66 static void efx_tc_free_action_set(struct efx_nic *efx, 67 struct efx_tc_action_set *act, bool in_hw) 68 { 69 /* Failure paths calling this on the 'running action' set in_hw=false, 70 * because if the alloc had succeeded we'd've put it in acts.list and 71 * not still have it in act. 72 */ 73 if (in_hw) { 74 efx_mae_free_action_set(efx, act->fw_id); 75 /* in_hw is true iff we are on an acts.list; make sure to 76 * remove ourselves from that list before we are freed. 77 */ 78 list_del(&act->list); 79 } 80 if (act->count) 81 efx_tc_flower_put_counter_index(efx, act->count); 82 kfree(act); 83 } 84 85 static void efx_tc_free_action_set_list(struct efx_nic *efx, 86 struct efx_tc_action_set_list *acts, 87 bool in_hw) 88 { 89 struct efx_tc_action_set *act, *next; 90 91 /* Failure paths set in_hw=false, because usually the acts didn't get 92 * to efx_mae_alloc_action_set_list(); if they did, the failure tree 93 * has a separate efx_mae_free_action_set_list() before calling us. 94 */ 95 if (in_hw) 96 efx_mae_free_action_set_list(efx, acts); 97 /* Any act that's on the list will be in_hw even if the list isn't */ 98 list_for_each_entry_safe(act, next, &acts->list, list) 99 efx_tc_free_action_set(efx, act, true); 100 /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */ 101 } 102 103 static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) 104 { 105 efx_mae_delete_rule(efx, rule->fw_id); 106 107 /* Release entries in subsidiary tables */ 108 efx_tc_free_action_set_list(efx, &rule->acts, true); 109 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 110 } 111 112 static void efx_tc_flow_free(void *ptr, void *arg) 113 { 114 struct efx_tc_flow_rule *rule = ptr; 115 struct efx_nic *efx = arg; 116 117 netif_err(efx, drv, efx->net_dev, 118 "tc rule %lx still present at teardown, removing\n", 119 rule->cookie); 120 121 efx_mae_delete_rule(efx, rule->fw_id); 122 123 /* Release entries in subsidiary tables */ 124 efx_tc_free_action_set_list(efx, &rule->acts, true); 125 126 kfree(rule); 127 } 128 129 /* Boilerplate for the simple 'copy a field' cases */ 130 #define _MAP_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ 131 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_##_name)) { \ 132 struct flow_match_##_type fm; \ 133 \ 134 flow_rule_match_##_tcget(rule, &fm); \ 135 match->value._field = fm.key->_tcfield; \ 136 match->mask._field = fm.mask->_tcfield; \ 137 } 138 #define MAP_KEY_AND_MASK(_name, _type, _tcfield, _field) \ 139 _MAP_KEY_AND_MASK(_name, _type, _type, _tcfield, _field) 140 #define MAP_ENC_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ 141 _MAP_KEY_AND_MASK(ENC_##_name, _type, _tcget, _tcfield, _field) 142 143 static int efx_tc_flower_parse_match(struct efx_nic *efx, 144 struct flow_rule *rule, 145 struct efx_tc_match *match, 146 struct netlink_ext_ack *extack) 147 { 148 struct flow_dissector *dissector = rule->match.dissector; 149 unsigned char ipv = 0; 150 151 /* Owing to internal TC infelicities, the IPV6_ADDRS key might be set 152 * even on IPv4 filters; so rather than relying on dissector->used_keys 153 * we check the addr_type in the CONTROL key. If we don't find it (or 154 * it's masked, which should never happen), we treat both IPV4_ADDRS 155 * and IPV6_ADDRS as absent. 156 */ 157 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 158 struct flow_match_control fm; 159 160 flow_rule_match_control(rule, &fm); 161 if (IS_ALL_ONES(fm.mask->addr_type)) 162 switch (fm.key->addr_type) { 163 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 164 ipv = 4; 165 break; 166 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 167 ipv = 6; 168 break; 169 default: 170 break; 171 } 172 173 if (fm.mask->flags & FLOW_DIS_IS_FRAGMENT) { 174 match->value.ip_frag = fm.key->flags & FLOW_DIS_IS_FRAGMENT; 175 match->mask.ip_frag = true; 176 } 177 if (fm.mask->flags & FLOW_DIS_FIRST_FRAG) { 178 match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG; 179 match->mask.ip_firstfrag = true; 180 } 181 if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) { 182 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x", 183 fm.mask->flags); 184 return -EOPNOTSUPP; 185 } 186 } 187 if (dissector->used_keys & 188 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 189 BIT(FLOW_DISSECTOR_KEY_BASIC) | 190 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 191 BIT(FLOW_DISSECTOR_KEY_VLAN) | 192 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 193 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 194 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 195 BIT(FLOW_DISSECTOR_KEY_PORTS) | 196 BIT(FLOW_DISSECTOR_KEY_TCP) | 197 BIT(FLOW_DISSECTOR_KEY_IP))) { 198 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x", 199 dissector->used_keys); 200 return -EOPNOTSUPP; 201 } 202 203 MAP_KEY_AND_MASK(BASIC, basic, n_proto, eth_proto); 204 /* Make sure we're IP if any L3/L4 keys used. */ 205 if (!IS_ALL_ONES(match->mask.eth_proto) || 206 !(match->value.eth_proto == htons(ETH_P_IP) || 207 match->value.eth_proto == htons(ETH_P_IPV6))) 208 if (dissector->used_keys & 209 (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 210 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 211 BIT(FLOW_DISSECTOR_KEY_PORTS) | 212 BIT(FLOW_DISSECTOR_KEY_IP) | 213 BIT(FLOW_DISSECTOR_KEY_TCP))) { 214 NL_SET_ERR_MSG_FMT_MOD(extack, "L3/L4 flower keys %#x require protocol ipv[46]", 215 dissector->used_keys); 216 return -EINVAL; 217 } 218 219 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 220 struct flow_match_vlan fm; 221 222 flow_rule_match_vlan(rule, &fm); 223 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { 224 match->value.vlan_proto[0] = fm.key->vlan_tpid; 225 match->mask.vlan_proto[0] = fm.mask->vlan_tpid; 226 match->value.vlan_tci[0] = cpu_to_be16(fm.key->vlan_priority << 13 | 227 fm.key->vlan_id); 228 match->mask.vlan_tci[0] = cpu_to_be16(fm.mask->vlan_priority << 13 | 229 fm.mask->vlan_id); 230 } 231 } 232 233 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 234 struct flow_match_vlan fm; 235 236 flow_rule_match_cvlan(rule, &fm); 237 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { 238 match->value.vlan_proto[1] = fm.key->vlan_tpid; 239 match->mask.vlan_proto[1] = fm.mask->vlan_tpid; 240 match->value.vlan_tci[1] = cpu_to_be16(fm.key->vlan_priority << 13 | 241 fm.key->vlan_id); 242 match->mask.vlan_tci[1] = cpu_to_be16(fm.mask->vlan_priority << 13 | 243 fm.mask->vlan_id); 244 } 245 } 246 247 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 248 struct flow_match_eth_addrs fm; 249 250 flow_rule_match_eth_addrs(rule, &fm); 251 ether_addr_copy(match->value.eth_saddr, fm.key->src); 252 ether_addr_copy(match->value.eth_daddr, fm.key->dst); 253 ether_addr_copy(match->mask.eth_saddr, fm.mask->src); 254 ether_addr_copy(match->mask.eth_daddr, fm.mask->dst); 255 } 256 257 MAP_KEY_AND_MASK(BASIC, basic, ip_proto, ip_proto); 258 /* Make sure we're TCP/UDP if any L4 keys used. */ 259 if ((match->value.ip_proto != IPPROTO_UDP && 260 match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto)) 261 if (dissector->used_keys & 262 (BIT(FLOW_DISSECTOR_KEY_PORTS) | 263 BIT(FLOW_DISSECTOR_KEY_TCP))) { 264 NL_SET_ERR_MSG_FMT_MOD(extack, "L4 flower keys %#x require ipproto udp or tcp", 265 dissector->used_keys); 266 return -EINVAL; 267 } 268 MAP_KEY_AND_MASK(IP, ip, tos, ip_tos); 269 MAP_KEY_AND_MASK(IP, ip, ttl, ip_ttl); 270 if (ipv == 4) { 271 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, src, src_ip); 272 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, dst, dst_ip); 273 } 274 #ifdef CONFIG_IPV6 275 else if (ipv == 6) { 276 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, src, src_ip6); 277 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, dst, dst_ip6); 278 } 279 #endif 280 MAP_KEY_AND_MASK(PORTS, ports, src, l4_sport); 281 MAP_KEY_AND_MASK(PORTS, ports, dst, l4_dport); 282 MAP_KEY_AND_MASK(TCP, tcp, flags, tcp_flags); 283 284 return 0; 285 } 286 287 /* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */ 288 enum efx_tc_action_order { 289 EFX_TC_AO_VLAN_POP, 290 EFX_TC_AO_VLAN_PUSH, 291 EFX_TC_AO_COUNT, 292 EFX_TC_AO_DELIVER 293 }; 294 /* Determine whether we can add @new action without violating order */ 295 static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act, 296 enum efx_tc_action_order new) 297 { 298 switch (new) { 299 case EFX_TC_AO_VLAN_POP: 300 if (act->vlan_pop >= 2) 301 return false; 302 /* If we've already pushed a VLAN, we can't then pop it; 303 * the hardware would instead try to pop an existing VLAN 304 * before pushing the new one. 305 */ 306 if (act->vlan_push) 307 return false; 308 fallthrough; 309 case EFX_TC_AO_VLAN_PUSH: 310 if (act->vlan_push >= 2) 311 return false; 312 fallthrough; 313 case EFX_TC_AO_COUNT: 314 if (act->count) 315 return false; 316 fallthrough; 317 case EFX_TC_AO_DELIVER: 318 return !act->deliver; 319 default: 320 /* Bad caller. Whatever they wanted to do, say they can't. */ 321 WARN_ON_ONCE(1); 322 return false; 323 } 324 } 325 326 static int efx_tc_flower_replace(struct efx_nic *efx, 327 struct net_device *net_dev, 328 struct flow_cls_offload *tc, 329 struct efx_rep *efv) 330 { 331 struct flow_rule *fr = flow_cls_offload_flow_rule(tc); 332 struct netlink_ext_ack *extack = tc->common.extack; 333 struct efx_tc_flow_rule *rule = NULL, *old; 334 struct efx_tc_action_set *act = NULL; 335 const struct flow_action_entry *fa; 336 struct efx_rep *from_efv, *to_efv; 337 struct efx_tc_match match; 338 s64 rc; 339 int i; 340 341 if (!tc_can_offload_extack(efx->net_dev, extack)) 342 return -EOPNOTSUPP; 343 if (WARN_ON(!efx->tc)) 344 return -ENETDOWN; 345 if (WARN_ON(!efx->tc->up)) 346 return -ENETDOWN; 347 348 from_efv = efx_tc_flower_lookup_efv(efx, net_dev); 349 if (IS_ERR(from_efv)) { 350 /* Might be a tunnel decap rule from an indirect block. 351 * Support for those not implemented yet. 352 */ 353 return -EOPNOTSUPP; 354 } 355 356 if (efv != from_efv) { 357 /* can't happen */ 358 NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)", 359 netdev_name(net_dev), efv ? "non-" : "", 360 from_efv ? "non-" : ""); 361 return -EINVAL; 362 } 363 364 /* Parse match */ 365 memset(&match, 0, sizeof(match)); 366 rc = efx_tc_flower_external_mport(efx, from_efv); 367 if (rc < 0) { 368 NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port"); 369 return rc; 370 } 371 match.value.ingress_port = rc; 372 match.mask.ingress_port = ~0; 373 rc = efx_tc_flower_parse_match(efx, fr, &match, extack); 374 if (rc) 375 return rc; 376 377 if (tc->common.chain_index) { 378 NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index"); 379 return -EOPNOTSUPP; 380 } 381 match.mask.recirc_id = 0xff; 382 383 rc = efx_mae_match_check_caps(efx, &match.mask, extack); 384 if (rc) 385 return rc; 386 387 rule = kzalloc(sizeof(*rule), GFP_USER); 388 if (!rule) 389 return -ENOMEM; 390 INIT_LIST_HEAD(&rule->acts.list); 391 rule->cookie = tc->cookie; 392 old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, 393 &rule->linkage, 394 efx_tc_match_action_ht_params); 395 if (old) { 396 netif_dbg(efx, drv, efx->net_dev, 397 "Already offloaded rule (cookie %lx)\n", tc->cookie); 398 rc = -EEXIST; 399 NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); 400 goto release; 401 } 402 403 /* Parse actions */ 404 act = kzalloc(sizeof(*act), GFP_USER); 405 if (!act) { 406 rc = -ENOMEM; 407 goto release; 408 } 409 410 flow_action_for_each(i, fa, &fr->action) { 411 struct efx_tc_action_set save; 412 u16 tci; 413 414 if (!act) { 415 /* more actions after a non-pipe action */ 416 NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action"); 417 rc = -EINVAL; 418 goto release; 419 } 420 421 if ((fa->id == FLOW_ACTION_REDIRECT || 422 fa->id == FLOW_ACTION_MIRRED || 423 fa->id == FLOW_ACTION_DROP) && fa->hw_stats) { 424 struct efx_tc_counter_index *ctr; 425 426 /* Currently the only actions that want stats are 427 * mirred and gact (ok, shot, trap, goto-chain), which 428 * means we want stats just before delivery. Also, 429 * note that tunnel_key set shouldn't change the length 430 * — it's only the subsequent mirred that does that, 431 * and the stats are taken _before_ the mirred action 432 * happens. 433 */ 434 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { 435 /* All supported actions that count either steal 436 * (gact shot, mirred redirect) or clone act 437 * (mirred mirror), so we should never get two 438 * count actions on one action_set. 439 */ 440 NL_SET_ERR_MSG_MOD(extack, "Count-action conflict (can't happen)"); 441 rc = -EOPNOTSUPP; 442 goto release; 443 } 444 445 if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) { 446 NL_SET_ERR_MSG_FMT_MOD(extack, "hw_stats_type %u not supported (only 'delayed')", 447 fa->hw_stats); 448 rc = -EOPNOTSUPP; 449 goto release; 450 } 451 452 ctr = efx_tc_flower_get_counter_index(efx, tc->cookie, 453 EFX_TC_COUNTER_TYPE_AR); 454 if (IS_ERR(ctr)) { 455 rc = PTR_ERR(ctr); 456 NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter"); 457 goto release; 458 } 459 act->count = ctr; 460 } 461 462 switch (fa->id) { 463 case FLOW_ACTION_DROP: 464 rc = efx_mae_alloc_action_set(efx, act); 465 if (rc) { 466 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)"); 467 goto release; 468 } 469 list_add_tail(&act->list, &rule->acts.list); 470 act = NULL; /* end of the line */ 471 break; 472 case FLOW_ACTION_REDIRECT: 473 case FLOW_ACTION_MIRRED: 474 save = *act; 475 476 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) { 477 /* can't happen */ 478 rc = -EOPNOTSUPP; 479 NL_SET_ERR_MSG_MOD(extack, "Deliver action violates action order (can't happen)"); 480 goto release; 481 } 482 483 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); 484 if (IS_ERR(to_efv)) { 485 NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch"); 486 rc = PTR_ERR(to_efv); 487 goto release; 488 } 489 rc = efx_tc_flower_external_mport(efx, to_efv); 490 if (rc < 0) { 491 NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port"); 492 goto release; 493 } 494 act->dest_mport = rc; 495 act->deliver = 1; 496 rc = efx_mae_alloc_action_set(efx, act); 497 if (rc) { 498 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)"); 499 goto release; 500 } 501 list_add_tail(&act->list, &rule->acts.list); 502 act = NULL; 503 if (fa->id == FLOW_ACTION_REDIRECT) 504 break; /* end of the line */ 505 /* Mirror, so continue on with saved act */ 506 save.count = NULL; 507 act = kzalloc(sizeof(*act), GFP_USER); 508 if (!act) { 509 rc = -ENOMEM; 510 goto release; 511 } 512 *act = save; 513 break; 514 case FLOW_ACTION_VLAN_POP: 515 if (act->vlan_push) { 516 act->vlan_push--; 517 } else if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_POP)) { 518 act->vlan_pop++; 519 } else { 520 NL_SET_ERR_MSG_MOD(extack, 521 "More than two VLAN pops, or action order violated"); 522 rc = -EINVAL; 523 goto release; 524 } 525 break; 526 case FLOW_ACTION_VLAN_PUSH: 527 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_PUSH)) { 528 rc = -EINVAL; 529 NL_SET_ERR_MSG_MOD(extack, 530 "More than two VLAN pushes, or action order violated"); 531 goto release; 532 } 533 tci = fa->vlan.vid & VLAN_VID_MASK; 534 tci |= fa->vlan.prio << VLAN_PRIO_SHIFT; 535 act->vlan_tci[act->vlan_push] = cpu_to_be16(tci); 536 act->vlan_proto[act->vlan_push] = fa->vlan.proto; 537 act->vlan_push++; 538 break; 539 default: 540 NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", 541 fa->id); 542 rc = -EOPNOTSUPP; 543 goto release; 544 } 545 } 546 547 if (act) { 548 /* Not shot/redirected, so deliver to default dest */ 549 if (from_efv == EFX_EFV_PF) 550 /* Rule applies to traffic from the wire, 551 * and default dest is thus the PF 552 */ 553 efx_mae_mport_uplink(efx, &act->dest_mport); 554 else 555 /* Representor, so rule applies to traffic from 556 * representee, and default dest is thus the rep. 557 * All reps use the same mport for delivery 558 */ 559 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 560 &act->dest_mport); 561 act->deliver = 1; 562 rc = efx_mae_alloc_action_set(efx, act); 563 if (rc) { 564 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)"); 565 goto release; 566 } 567 list_add_tail(&act->list, &rule->acts.list); 568 act = NULL; /* Prevent double-free in error path */ 569 } 570 571 netif_dbg(efx, drv, efx->net_dev, 572 "Successfully parsed filter (cookie %lx)\n", 573 tc->cookie); 574 575 rule->match = match; 576 577 rc = efx_mae_alloc_action_set_list(efx, &rule->acts); 578 if (rc) { 579 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw"); 580 goto release; 581 } 582 rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, 583 rule->acts.fw_id, &rule->fw_id); 584 if (rc) { 585 NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); 586 goto release_acts; 587 } 588 return 0; 589 590 release_acts: 591 efx_mae_free_action_set_list(efx, &rule->acts); 592 release: 593 /* We failed to insert the rule, so free up any entries we created in 594 * subsidiary tables. 595 */ 596 if (act) 597 efx_tc_free_action_set(efx, act, false); 598 if (rule) { 599 rhashtable_remove_fast(&efx->tc->match_action_ht, 600 &rule->linkage, 601 efx_tc_match_action_ht_params); 602 efx_tc_free_action_set_list(efx, &rule->acts, false); 603 } 604 kfree(rule); 605 return rc; 606 } 607 608 static int efx_tc_flower_destroy(struct efx_nic *efx, 609 struct net_device *net_dev, 610 struct flow_cls_offload *tc) 611 { 612 struct netlink_ext_ack *extack = tc->common.extack; 613 struct efx_tc_flow_rule *rule; 614 615 rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie, 616 efx_tc_match_action_ht_params); 617 if (!rule) { 618 /* Only log a message if we're the ingress device. Otherwise 619 * it's a foreign filter and we might just not have been 620 * interested (e.g. we might not have been the egress device 621 * either). 622 */ 623 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) 624 netif_warn(efx, drv, efx->net_dev, 625 "Filter %lx not found to remove\n", tc->cookie); 626 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); 627 return -ENOENT; 628 } 629 630 /* Remove it from HW */ 631 efx_tc_delete_rule(efx, rule); 632 /* Delete it from SW */ 633 rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage, 634 efx_tc_match_action_ht_params); 635 netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie); 636 kfree(rule); 637 return 0; 638 } 639 640 static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev, 641 struct flow_cls_offload *tc) 642 { 643 struct netlink_ext_ack *extack = tc->common.extack; 644 struct efx_tc_counter_index *ctr; 645 struct efx_tc_counter *cnt; 646 u64 packets, bytes; 647 648 ctr = efx_tc_flower_find_counter_index(efx, tc->cookie); 649 if (!ctr) { 650 /* See comment in efx_tc_flower_destroy() */ 651 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) 652 if (net_ratelimit()) 653 netif_warn(efx, drv, efx->net_dev, 654 "Filter %lx not found for stats\n", 655 tc->cookie); 656 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); 657 return -ENOENT; 658 } 659 if (WARN_ON(!ctr->cnt)) /* can't happen */ 660 return -EIO; 661 cnt = ctr->cnt; 662 663 spin_lock_bh(&cnt->lock); 664 /* Report only new pkts/bytes since last time TC asked */ 665 packets = cnt->packets; 666 bytes = cnt->bytes; 667 flow_stats_update(&tc->stats, bytes - cnt->old_bytes, 668 packets - cnt->old_packets, 0, cnt->touched, 669 FLOW_ACTION_HW_STATS_DELAYED); 670 cnt->old_packets = packets; 671 cnt->old_bytes = bytes; 672 spin_unlock_bh(&cnt->lock); 673 return 0; 674 } 675 676 int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev, 677 struct flow_cls_offload *tc, struct efx_rep *efv) 678 { 679 int rc; 680 681 if (!efx->tc) 682 return -EOPNOTSUPP; 683 684 mutex_lock(&efx->tc->mutex); 685 switch (tc->command) { 686 case FLOW_CLS_REPLACE: 687 rc = efx_tc_flower_replace(efx, net_dev, tc, efv); 688 break; 689 case FLOW_CLS_DESTROY: 690 rc = efx_tc_flower_destroy(efx, net_dev, tc); 691 break; 692 case FLOW_CLS_STATS: 693 rc = efx_tc_flower_stats(efx, net_dev, tc); 694 break; 695 default: 696 rc = -EOPNOTSUPP; 697 break; 698 } 699 mutex_unlock(&efx->tc->mutex); 700 return rc; 701 } 702 703 static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, 704 u32 eg_port, struct efx_tc_flow_rule *rule) 705 { 706 struct efx_tc_action_set_list *acts = &rule->acts; 707 struct efx_tc_match *match = &rule->match; 708 struct efx_tc_action_set *act; 709 int rc; 710 711 match->value.ingress_port = ing_port; 712 match->mask.ingress_port = ~0; 713 act = kzalloc(sizeof(*act), GFP_KERNEL); 714 if (!act) 715 return -ENOMEM; 716 act->deliver = 1; 717 act->dest_mport = eg_port; 718 rc = efx_mae_alloc_action_set(efx, act); 719 if (rc) 720 goto fail1; 721 EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); 722 list_add_tail(&act->list, &acts->list); 723 rc = efx_mae_alloc_action_set_list(efx, acts); 724 if (rc) 725 goto fail2; 726 rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT, 727 acts->fw_id, &rule->fw_id); 728 if (rc) 729 goto fail3; 730 return 0; 731 fail3: 732 efx_mae_free_action_set_list(efx, acts); 733 fail2: 734 list_del(&act->list); 735 efx_mae_free_action_set(efx, act->fw_id); 736 fail1: 737 kfree(act); 738 return rc; 739 } 740 741 static int efx_tc_configure_default_rule_pf(struct efx_nic *efx) 742 { 743 struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf; 744 u32 ing_port, eg_port; 745 746 efx_mae_mport_uplink(efx, &ing_port); 747 efx_mae_mport_wire(efx, &eg_port); 748 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 749 } 750 751 static int efx_tc_configure_default_rule_wire(struct efx_nic *efx) 752 { 753 struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire; 754 u32 ing_port, eg_port; 755 756 efx_mae_mport_wire(efx, &ing_port); 757 efx_mae_mport_uplink(efx, &eg_port); 758 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 759 } 760 761 int efx_tc_configure_default_rule_rep(struct efx_rep *efv) 762 { 763 struct efx_tc_flow_rule *rule = &efv->dflt; 764 struct efx_nic *efx = efv->parent; 765 u32 ing_port, eg_port; 766 767 efx_mae_mport_mport(efx, efv->mport, &ing_port); 768 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); 769 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 770 } 771 772 void efx_tc_deconfigure_default_rule(struct efx_nic *efx, 773 struct efx_tc_flow_rule *rule) 774 { 775 if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) 776 efx_tc_delete_rule(efx, rule); 777 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 778 } 779 780 static int efx_tc_configure_rep_mport(struct efx_nic *efx) 781 { 782 u32 rep_mport_label; 783 int rc; 784 785 rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label); 786 if (rc) 787 return rc; 788 pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n", 789 efx->tc->reps_mport_id, rep_mport_label); 790 /* Use mport *selector* as vport ID */ 791 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 792 &efx->tc->reps_mport_vport_id); 793 return 0; 794 } 795 796 static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx) 797 { 798 efx_mae_free_mport(efx, efx->tc->reps_mport_id); 799 efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL; 800 } 801 802 int efx_tc_insert_rep_filters(struct efx_nic *efx) 803 { 804 struct efx_filter_spec promisc, allmulti; 805 int rc; 806 807 if (efx->type->is_vf) 808 return 0; 809 if (!efx->tc) 810 return 0; 811 efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0); 812 efx_filter_set_uc_def(&promisc); 813 efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id); 814 rc = efx_filter_insert_filter(efx, &promisc, false); 815 if (rc < 0) 816 return rc; 817 efx->tc->reps_filter_uc = rc; 818 efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0); 819 efx_filter_set_mc_def(&allmulti); 820 efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id); 821 rc = efx_filter_insert_filter(efx, &allmulti, false); 822 if (rc < 0) 823 return rc; 824 efx->tc->reps_filter_mc = rc; 825 return 0; 826 } 827 828 void efx_tc_remove_rep_filters(struct efx_nic *efx) 829 { 830 if (efx->type->is_vf) 831 return; 832 if (!efx->tc) 833 return; 834 if (efx->tc->reps_filter_mc >= 0) 835 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc); 836 efx->tc->reps_filter_mc = -1; 837 if (efx->tc->reps_filter_uc >= 0) 838 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc); 839 efx->tc->reps_filter_uc = -1; 840 } 841 842 int efx_init_tc(struct efx_nic *efx) 843 { 844 int rc; 845 846 rc = efx_mae_get_caps(efx, efx->tc->caps); 847 if (rc) 848 return rc; 849 if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS) 850 /* Firmware supports some match fields the driver doesn't know 851 * about. Not fatal, unless any of those fields are required 852 * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know. 853 */ 854 netif_warn(efx, probe, efx->net_dev, 855 "FW reports additional match fields %u\n", 856 efx->tc->caps->match_field_count); 857 if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) { 858 netif_err(efx, probe, efx->net_dev, 859 "Too few action prios supported (have %u, need %u)\n", 860 efx->tc->caps->action_prios, EFX_TC_PRIO__NUM); 861 return -EIO; 862 } 863 rc = efx_tc_configure_default_rule_pf(efx); 864 if (rc) 865 return rc; 866 rc = efx_tc_configure_default_rule_wire(efx); 867 if (rc) 868 return rc; 869 rc = efx_tc_configure_rep_mport(efx); 870 if (rc) 871 return rc; 872 efx->tc->up = true; 873 rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); 874 if (rc) 875 return rc; 876 return 0; 877 } 878 879 void efx_fini_tc(struct efx_nic *efx) 880 { 881 /* We can get called even if efx_init_struct_tc() failed */ 882 if (!efx->tc) 883 return; 884 if (efx->tc->up) 885 flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind); 886 efx_tc_deconfigure_rep_mport(efx); 887 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); 888 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); 889 efx->tc->up = false; 890 } 891 892 int efx_init_struct_tc(struct efx_nic *efx) 893 { 894 int rc; 895 896 if (efx->type->is_vf) 897 return 0; 898 899 efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); 900 if (!efx->tc) 901 return -ENOMEM; 902 efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL); 903 if (!efx->tc->caps) { 904 rc = -ENOMEM; 905 goto fail_alloc_caps; 906 } 907 INIT_LIST_HEAD(&efx->tc->block_list); 908 909 mutex_init(&efx->tc->mutex); 910 init_waitqueue_head(&efx->tc->flush_wq); 911 rc = efx_tc_init_counters(efx); 912 if (rc < 0) 913 goto fail_counters; 914 rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params); 915 if (rc < 0) 916 goto fail_match_action_ht; 917 efx->tc->reps_filter_uc = -1; 918 efx->tc->reps_filter_mc = -1; 919 INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); 920 efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 921 INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); 922 efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 923 efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type; 924 return 0; 925 fail_match_action_ht: 926 efx_tc_destroy_counters(efx); 927 fail_counters: 928 mutex_destroy(&efx->tc->mutex); 929 kfree(efx->tc->caps); 930 fail_alloc_caps: 931 kfree(efx->tc); 932 efx->tc = NULL; 933 return rc; 934 } 935 936 void efx_fini_struct_tc(struct efx_nic *efx) 937 { 938 if (!efx->tc) 939 return; 940 941 mutex_lock(&efx->tc->mutex); 942 EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != 943 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 944 EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != 945 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 946 rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free, 947 efx); 948 efx_tc_fini_counters(efx); 949 mutex_unlock(&efx->tc->mutex); 950 mutex_destroy(&efx->tc->mutex); 951 kfree(efx->tc->caps); 952 kfree(efx->tc); 953 efx->tc = NULL; 954 } 955