1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2019 Solarflare Communications Inc. 5 * Copyright 2020-2022 Xilinx Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation, incorporated herein by reference. 10 */ 11 12 #include <net/pkt_cls.h> 13 #include "tc.h" 14 #include "tc_bindings.h" 15 #include "mae.h" 16 #include "ef100_rep.h" 17 #include "efx.h" 18 19 #define EFX_EFV_PF NULL 20 /* Look up the representor information (efv) for a device. 21 * May return NULL for the PF (us), or an error pointer for a device that 22 * isn't supported as a TC offload endpoint 23 */ 24 static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx, 25 struct net_device *dev) 26 { 27 struct efx_rep *efv; 28 29 if (!dev) 30 return ERR_PTR(-EOPNOTSUPP); 31 /* Is it us (the PF)? */ 32 if (dev == efx->net_dev) 33 return EFX_EFV_PF; 34 /* Is it an efx vfrep at all? */ 35 if (dev->netdev_ops != &efx_ef100_rep_netdev_ops) 36 return ERR_PTR(-EOPNOTSUPP); 37 /* Is it ours? We don't support TC rules that include another 38 * EF100's netdevices (not even on another port of the same NIC). 39 */ 40 efv = netdev_priv(dev); 41 if (efv->parent != efx) 42 return ERR_PTR(-EOPNOTSUPP); 43 return efv; 44 } 45 46 /* Convert a driver-internal vport ID into an external device (wire or VF) */ 47 static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv) 48 { 49 u32 mport; 50 51 if (IS_ERR(efv)) 52 return PTR_ERR(efv); 53 if (!efv) /* device is PF (us) */ 54 efx_mae_mport_wire(efx, &mport); 55 else /* device is repr */ 56 efx_mae_mport_mport(efx, efv->mport, &mport); 57 return mport; 58 } 59 60 static const struct rhashtable_params efx_tc_match_action_ht_params = { 61 .key_len = sizeof(unsigned long), 62 .key_offset = offsetof(struct efx_tc_flow_rule, cookie), 63 .head_offset = offsetof(struct efx_tc_flow_rule, linkage), 64 }; 65 66 static void efx_tc_free_action_set(struct efx_nic *efx, 67 struct efx_tc_action_set *act, bool in_hw) 68 { 69 /* Failure paths calling this on the 'running action' set in_hw=false, 70 * because if the alloc had succeeded we'd've put it in acts.list and 71 * not still have it in act. 72 */ 73 if (in_hw) { 74 efx_mae_free_action_set(efx, act->fw_id); 75 /* in_hw is true iff we are on an acts.list; make sure to 76 * remove ourselves from that list before we are freed. 77 */ 78 list_del(&act->list); 79 } 80 kfree(act); 81 } 82 83 static void efx_tc_free_action_set_list(struct efx_nic *efx, 84 struct efx_tc_action_set_list *acts, 85 bool in_hw) 86 { 87 struct efx_tc_action_set *act, *next; 88 89 /* Failure paths set in_hw=false, because usually the acts didn't get 90 * to efx_mae_alloc_action_set_list(); if they did, the failure tree 91 * has a separate efx_mae_free_action_set_list() before calling us. 92 */ 93 if (in_hw) 94 efx_mae_free_action_set_list(efx, acts); 95 /* Any act that's on the list will be in_hw even if the list isn't */ 96 list_for_each_entry_safe(act, next, &acts->list, list) 97 efx_tc_free_action_set(efx, act, true); 98 /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */ 99 } 100 101 static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) 102 { 103 efx_mae_delete_rule(efx, rule->fw_id); 104 105 /* Release entries in subsidiary tables */ 106 efx_tc_free_action_set_list(efx, &rule->acts, true); 107 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 108 } 109 110 static void efx_tc_flow_free(void *ptr, void *arg) 111 { 112 struct efx_tc_flow_rule *rule = ptr; 113 struct efx_nic *efx = arg; 114 115 netif_err(efx, drv, efx->net_dev, 116 "tc rule %lx still present at teardown, removing\n", 117 rule->cookie); 118 119 efx_mae_delete_rule(efx, rule->fw_id); 120 121 /* Release entries in subsidiary tables */ 122 efx_tc_free_action_set_list(efx, &rule->acts, true); 123 124 kfree(rule); 125 } 126 127 /* Boilerplate for the simple 'copy a field' cases */ 128 #define _MAP_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ 129 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_##_name)) { \ 130 struct flow_match_##_type fm; \ 131 \ 132 flow_rule_match_##_tcget(rule, &fm); \ 133 match->value._field = fm.key->_tcfield; \ 134 match->mask._field = fm.mask->_tcfield; \ 135 } 136 #define MAP_KEY_AND_MASK(_name, _type, _tcfield, _field) \ 137 _MAP_KEY_AND_MASK(_name, _type, _type, _tcfield, _field) 138 #define MAP_ENC_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ 139 _MAP_KEY_AND_MASK(ENC_##_name, _type, _tcget, _tcfield, _field) 140 141 static int efx_tc_flower_parse_match(struct efx_nic *efx, 142 struct flow_rule *rule, 143 struct efx_tc_match *match, 144 struct netlink_ext_ack *extack) 145 { 146 struct flow_dissector *dissector = rule->match.dissector; 147 unsigned char ipv = 0; 148 149 /* Owing to internal TC infelicities, the IPV6_ADDRS key might be set 150 * even on IPv4 filters; so rather than relying on dissector->used_keys 151 * we check the addr_type in the CONTROL key. If we don't find it (or 152 * it's masked, which should never happen), we treat both IPV4_ADDRS 153 * and IPV6_ADDRS as absent. 154 */ 155 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 156 struct flow_match_control fm; 157 158 flow_rule_match_control(rule, &fm); 159 if (IS_ALL_ONES(fm.mask->addr_type)) 160 switch (fm.key->addr_type) { 161 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 162 ipv = 4; 163 break; 164 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 165 ipv = 6; 166 break; 167 default: 168 break; 169 } 170 171 if (fm.mask->flags & FLOW_DIS_IS_FRAGMENT) { 172 match->value.ip_frag = fm.key->flags & FLOW_DIS_IS_FRAGMENT; 173 match->mask.ip_frag = true; 174 } 175 if (fm.mask->flags & FLOW_DIS_FIRST_FRAG) { 176 match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG; 177 match->mask.ip_firstfrag = true; 178 } 179 if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) { 180 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x", 181 fm.mask->flags); 182 return -EOPNOTSUPP; 183 } 184 } 185 if (dissector->used_keys & 186 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 187 BIT(FLOW_DISSECTOR_KEY_BASIC) | 188 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 189 BIT(FLOW_DISSECTOR_KEY_VLAN) | 190 BIT(FLOW_DISSECTOR_KEY_CVLAN) | 191 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 192 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 193 BIT(FLOW_DISSECTOR_KEY_PORTS) | 194 BIT(FLOW_DISSECTOR_KEY_TCP) | 195 BIT(FLOW_DISSECTOR_KEY_IP))) { 196 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#x", 197 dissector->used_keys); 198 return -EOPNOTSUPP; 199 } 200 201 MAP_KEY_AND_MASK(BASIC, basic, n_proto, eth_proto); 202 /* Make sure we're IP if any L3/L4 keys used. */ 203 if (!IS_ALL_ONES(match->mask.eth_proto) || 204 !(match->value.eth_proto == htons(ETH_P_IP) || 205 match->value.eth_proto == htons(ETH_P_IPV6))) 206 if (dissector->used_keys & 207 (BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 208 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 209 BIT(FLOW_DISSECTOR_KEY_PORTS) | 210 BIT(FLOW_DISSECTOR_KEY_IP) | 211 BIT(FLOW_DISSECTOR_KEY_TCP))) { 212 NL_SET_ERR_MSG_FMT_MOD(extack, "L3/L4 flower keys %#x require protocol ipv[46]", 213 dissector->used_keys); 214 return -EINVAL; 215 } 216 217 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 218 struct flow_match_vlan fm; 219 220 flow_rule_match_vlan(rule, &fm); 221 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { 222 match->value.vlan_proto[0] = fm.key->vlan_tpid; 223 match->mask.vlan_proto[0] = fm.mask->vlan_tpid; 224 match->value.vlan_tci[0] = cpu_to_be16(fm.key->vlan_priority << 13 | 225 fm.key->vlan_id); 226 match->mask.vlan_tci[0] = cpu_to_be16(fm.mask->vlan_priority << 13 | 227 fm.mask->vlan_id); 228 } 229 } 230 231 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 232 struct flow_match_vlan fm; 233 234 flow_rule_match_cvlan(rule, &fm); 235 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { 236 match->value.vlan_proto[1] = fm.key->vlan_tpid; 237 match->mask.vlan_proto[1] = fm.mask->vlan_tpid; 238 match->value.vlan_tci[1] = cpu_to_be16(fm.key->vlan_priority << 13 | 239 fm.key->vlan_id); 240 match->mask.vlan_tci[1] = cpu_to_be16(fm.mask->vlan_priority << 13 | 241 fm.mask->vlan_id); 242 } 243 } 244 245 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 246 struct flow_match_eth_addrs fm; 247 248 flow_rule_match_eth_addrs(rule, &fm); 249 ether_addr_copy(match->value.eth_saddr, fm.key->src); 250 ether_addr_copy(match->value.eth_daddr, fm.key->dst); 251 ether_addr_copy(match->mask.eth_saddr, fm.mask->src); 252 ether_addr_copy(match->mask.eth_daddr, fm.mask->dst); 253 } 254 255 MAP_KEY_AND_MASK(BASIC, basic, ip_proto, ip_proto); 256 /* Make sure we're TCP/UDP if any L4 keys used. */ 257 if ((match->value.ip_proto != IPPROTO_UDP && 258 match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto)) 259 if (dissector->used_keys & 260 (BIT(FLOW_DISSECTOR_KEY_PORTS) | 261 BIT(FLOW_DISSECTOR_KEY_TCP))) { 262 NL_SET_ERR_MSG_FMT_MOD(extack, "L4 flower keys %#x require ipproto udp or tcp", 263 dissector->used_keys); 264 return -EINVAL; 265 } 266 MAP_KEY_AND_MASK(IP, ip, tos, ip_tos); 267 MAP_KEY_AND_MASK(IP, ip, ttl, ip_ttl); 268 if (ipv == 4) { 269 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, src, src_ip); 270 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, dst, dst_ip); 271 } 272 #ifdef CONFIG_IPV6 273 else if (ipv == 6) { 274 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, src, src_ip6); 275 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, dst, dst_ip6); 276 } 277 #endif 278 MAP_KEY_AND_MASK(PORTS, ports, src, l4_sport); 279 MAP_KEY_AND_MASK(PORTS, ports, dst, l4_dport); 280 MAP_KEY_AND_MASK(TCP, tcp, flags, tcp_flags); 281 282 return 0; 283 } 284 285 static int efx_tc_flower_replace(struct efx_nic *efx, 286 struct net_device *net_dev, 287 struct flow_cls_offload *tc, 288 struct efx_rep *efv) 289 { 290 struct flow_rule *fr = flow_cls_offload_flow_rule(tc); 291 struct netlink_ext_ack *extack = tc->common.extack; 292 struct efx_tc_flow_rule *rule = NULL, *old; 293 struct efx_tc_action_set *act = NULL; 294 const struct flow_action_entry *fa; 295 struct efx_rep *from_efv, *to_efv; 296 struct efx_tc_match match; 297 s64 rc; 298 int i; 299 300 if (!tc_can_offload_extack(efx->net_dev, extack)) 301 return -EOPNOTSUPP; 302 if (WARN_ON(!efx->tc)) 303 return -ENETDOWN; 304 if (WARN_ON(!efx->tc->up)) 305 return -ENETDOWN; 306 307 from_efv = efx_tc_flower_lookup_efv(efx, net_dev); 308 if (IS_ERR(from_efv)) { 309 /* Might be a tunnel decap rule from an indirect block. 310 * Support for those not implemented yet. 311 */ 312 return -EOPNOTSUPP; 313 } 314 315 if (efv != from_efv) { 316 /* can't happen */ 317 NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)", 318 netdev_name(net_dev), efv ? "non-" : "", 319 from_efv ? "non-" : ""); 320 return -EINVAL; 321 } 322 323 /* Parse match */ 324 memset(&match, 0, sizeof(match)); 325 rc = efx_tc_flower_external_mport(efx, from_efv); 326 if (rc < 0) { 327 NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port"); 328 return rc; 329 } 330 match.value.ingress_port = rc; 331 match.mask.ingress_port = ~0; 332 rc = efx_tc_flower_parse_match(efx, fr, &match, extack); 333 if (rc) 334 return rc; 335 336 if (tc->common.chain_index) { 337 NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index"); 338 return -EOPNOTSUPP; 339 } 340 match.mask.recirc_id = 0xff; 341 342 rc = efx_mae_match_check_caps(efx, &match.mask, extack); 343 if (rc) 344 return rc; 345 346 rule = kzalloc(sizeof(*rule), GFP_USER); 347 if (!rule) 348 return -ENOMEM; 349 INIT_LIST_HEAD(&rule->acts.list); 350 rule->cookie = tc->cookie; 351 old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, 352 &rule->linkage, 353 efx_tc_match_action_ht_params); 354 if (old) { 355 netif_dbg(efx, drv, efx->net_dev, 356 "Already offloaded rule (cookie %lx)\n", tc->cookie); 357 rc = -EEXIST; 358 NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); 359 goto release; 360 } 361 362 /* Parse actions */ 363 act = kzalloc(sizeof(*act), GFP_USER); 364 if (!act) { 365 rc = -ENOMEM; 366 goto release; 367 } 368 369 flow_action_for_each(i, fa, &fr->action) { 370 struct efx_tc_action_set save; 371 372 if (!act) { 373 /* more actions after a non-pipe action */ 374 NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action"); 375 rc = -EINVAL; 376 goto release; 377 } 378 379 switch (fa->id) { 380 case FLOW_ACTION_DROP: 381 rc = efx_mae_alloc_action_set(efx, act); 382 if (rc) { 383 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)"); 384 goto release; 385 } 386 list_add_tail(&act->list, &rule->acts.list); 387 act = NULL; /* end of the line */ 388 break; 389 case FLOW_ACTION_REDIRECT: 390 case FLOW_ACTION_MIRRED: 391 save = *act; 392 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); 393 if (IS_ERR(to_efv)) { 394 NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch"); 395 rc = PTR_ERR(to_efv); 396 goto release; 397 } 398 rc = efx_tc_flower_external_mport(efx, to_efv); 399 if (rc < 0) { 400 NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port"); 401 goto release; 402 } 403 act->dest_mport = rc; 404 act->deliver = 1; 405 rc = efx_mae_alloc_action_set(efx, act); 406 if (rc) { 407 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)"); 408 goto release; 409 } 410 list_add_tail(&act->list, &rule->acts.list); 411 act = NULL; 412 if (fa->id == FLOW_ACTION_REDIRECT) 413 break; /* end of the line */ 414 /* Mirror, so continue on with saved act */ 415 act = kzalloc(sizeof(*act), GFP_USER); 416 if (!act) { 417 rc = -ENOMEM; 418 goto release; 419 } 420 *act = save; 421 break; 422 default: 423 NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", 424 fa->id); 425 rc = -EOPNOTSUPP; 426 goto release; 427 } 428 } 429 430 if (act) { 431 /* Not shot/redirected, so deliver to default dest */ 432 if (from_efv == EFX_EFV_PF) 433 /* Rule applies to traffic from the wire, 434 * and default dest is thus the PF 435 */ 436 efx_mae_mport_uplink(efx, &act->dest_mport); 437 else 438 /* Representor, so rule applies to traffic from 439 * representee, and default dest is thus the rep. 440 * All reps use the same mport for delivery 441 */ 442 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 443 &act->dest_mport); 444 act->deliver = 1; 445 rc = efx_mae_alloc_action_set(efx, act); 446 if (rc) { 447 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)"); 448 goto release; 449 } 450 list_add_tail(&act->list, &rule->acts.list); 451 act = NULL; /* Prevent double-free in error path */ 452 } 453 454 netif_dbg(efx, drv, efx->net_dev, 455 "Successfully parsed filter (cookie %lx)\n", 456 tc->cookie); 457 458 rule->match = match; 459 460 rc = efx_mae_alloc_action_set_list(efx, &rule->acts); 461 if (rc) { 462 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw"); 463 goto release; 464 } 465 rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, 466 rule->acts.fw_id, &rule->fw_id); 467 if (rc) { 468 NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); 469 goto release_acts; 470 } 471 return 0; 472 473 release_acts: 474 efx_mae_free_action_set_list(efx, &rule->acts); 475 release: 476 /* We failed to insert the rule, so free up any entries we created in 477 * subsidiary tables. 478 */ 479 if (act) 480 efx_tc_free_action_set(efx, act, false); 481 if (rule) { 482 rhashtable_remove_fast(&efx->tc->match_action_ht, 483 &rule->linkage, 484 efx_tc_match_action_ht_params); 485 efx_tc_free_action_set_list(efx, &rule->acts, false); 486 } 487 kfree(rule); 488 return rc; 489 } 490 491 static int efx_tc_flower_destroy(struct efx_nic *efx, 492 struct net_device *net_dev, 493 struct flow_cls_offload *tc) 494 { 495 struct netlink_ext_ack *extack = tc->common.extack; 496 struct efx_tc_flow_rule *rule; 497 498 rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie, 499 efx_tc_match_action_ht_params); 500 if (!rule) { 501 /* Only log a message if we're the ingress device. Otherwise 502 * it's a foreign filter and we might just not have been 503 * interested (e.g. we might not have been the egress device 504 * either). 505 */ 506 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) 507 netif_warn(efx, drv, efx->net_dev, 508 "Filter %lx not found to remove\n", tc->cookie); 509 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); 510 return -ENOENT; 511 } 512 513 /* Remove it from HW */ 514 efx_tc_delete_rule(efx, rule); 515 /* Delete it from SW */ 516 rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage, 517 efx_tc_match_action_ht_params); 518 netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie); 519 kfree(rule); 520 return 0; 521 } 522 523 int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev, 524 struct flow_cls_offload *tc, struct efx_rep *efv) 525 { 526 int rc; 527 528 if (!efx->tc) 529 return -EOPNOTSUPP; 530 531 mutex_lock(&efx->tc->mutex); 532 switch (tc->command) { 533 case FLOW_CLS_REPLACE: 534 rc = efx_tc_flower_replace(efx, net_dev, tc, efv); 535 break; 536 case FLOW_CLS_DESTROY: 537 rc = efx_tc_flower_destroy(efx, net_dev, tc); 538 break; 539 default: 540 rc = -EOPNOTSUPP; 541 break; 542 } 543 mutex_unlock(&efx->tc->mutex); 544 return rc; 545 } 546 547 static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, 548 u32 eg_port, struct efx_tc_flow_rule *rule) 549 { 550 struct efx_tc_action_set_list *acts = &rule->acts; 551 struct efx_tc_match *match = &rule->match; 552 struct efx_tc_action_set *act; 553 int rc; 554 555 match->value.ingress_port = ing_port; 556 match->mask.ingress_port = ~0; 557 act = kzalloc(sizeof(*act), GFP_KERNEL); 558 if (!act) 559 return -ENOMEM; 560 act->deliver = 1; 561 act->dest_mport = eg_port; 562 rc = efx_mae_alloc_action_set(efx, act); 563 if (rc) 564 goto fail1; 565 EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); 566 list_add_tail(&act->list, &acts->list); 567 rc = efx_mae_alloc_action_set_list(efx, acts); 568 if (rc) 569 goto fail2; 570 rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT, 571 acts->fw_id, &rule->fw_id); 572 if (rc) 573 goto fail3; 574 return 0; 575 fail3: 576 efx_mae_free_action_set_list(efx, acts); 577 fail2: 578 list_del(&act->list); 579 efx_mae_free_action_set(efx, act->fw_id); 580 fail1: 581 kfree(act); 582 return rc; 583 } 584 585 static int efx_tc_configure_default_rule_pf(struct efx_nic *efx) 586 { 587 struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf; 588 u32 ing_port, eg_port; 589 590 efx_mae_mport_uplink(efx, &ing_port); 591 efx_mae_mport_wire(efx, &eg_port); 592 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 593 } 594 595 static int efx_tc_configure_default_rule_wire(struct efx_nic *efx) 596 { 597 struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire; 598 u32 ing_port, eg_port; 599 600 efx_mae_mport_wire(efx, &ing_port); 601 efx_mae_mport_uplink(efx, &eg_port); 602 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 603 } 604 605 int efx_tc_configure_default_rule_rep(struct efx_rep *efv) 606 { 607 struct efx_tc_flow_rule *rule = &efv->dflt; 608 struct efx_nic *efx = efv->parent; 609 u32 ing_port, eg_port; 610 611 efx_mae_mport_mport(efx, efv->mport, &ing_port); 612 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); 613 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 614 } 615 616 void efx_tc_deconfigure_default_rule(struct efx_nic *efx, 617 struct efx_tc_flow_rule *rule) 618 { 619 if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) 620 efx_tc_delete_rule(efx, rule); 621 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 622 } 623 624 static int efx_tc_configure_rep_mport(struct efx_nic *efx) 625 { 626 u32 rep_mport_label; 627 int rc; 628 629 rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label); 630 if (rc) 631 return rc; 632 pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n", 633 efx->tc->reps_mport_id, rep_mport_label); 634 /* Use mport *selector* as vport ID */ 635 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 636 &efx->tc->reps_mport_vport_id); 637 return 0; 638 } 639 640 static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx) 641 { 642 efx_mae_free_mport(efx, efx->tc->reps_mport_id); 643 efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL; 644 } 645 646 int efx_tc_insert_rep_filters(struct efx_nic *efx) 647 { 648 struct efx_filter_spec promisc, allmulti; 649 int rc; 650 651 if (efx->type->is_vf) 652 return 0; 653 if (!efx->tc) 654 return 0; 655 efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0); 656 efx_filter_set_uc_def(&promisc); 657 efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id); 658 rc = efx_filter_insert_filter(efx, &promisc, false); 659 if (rc < 0) 660 return rc; 661 efx->tc->reps_filter_uc = rc; 662 efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0); 663 efx_filter_set_mc_def(&allmulti); 664 efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id); 665 rc = efx_filter_insert_filter(efx, &allmulti, false); 666 if (rc < 0) 667 return rc; 668 efx->tc->reps_filter_mc = rc; 669 return 0; 670 } 671 672 void efx_tc_remove_rep_filters(struct efx_nic *efx) 673 { 674 if (efx->type->is_vf) 675 return; 676 if (!efx->tc) 677 return; 678 if (efx->tc->reps_filter_mc >= 0) 679 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc); 680 efx->tc->reps_filter_mc = -1; 681 if (efx->tc->reps_filter_uc >= 0) 682 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc); 683 efx->tc->reps_filter_uc = -1; 684 } 685 686 int efx_init_tc(struct efx_nic *efx) 687 { 688 int rc; 689 690 rc = efx_mae_get_caps(efx, efx->tc->caps); 691 if (rc) 692 return rc; 693 if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS) 694 /* Firmware supports some match fields the driver doesn't know 695 * about. Not fatal, unless any of those fields are required 696 * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know. 697 */ 698 netif_warn(efx, probe, efx->net_dev, 699 "FW reports additional match fields %u\n", 700 efx->tc->caps->match_field_count); 701 if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) { 702 netif_err(efx, probe, efx->net_dev, 703 "Too few action prios supported (have %u, need %u)\n", 704 efx->tc->caps->action_prios, EFX_TC_PRIO__NUM); 705 return -EIO; 706 } 707 rc = efx_tc_configure_default_rule_pf(efx); 708 if (rc) 709 return rc; 710 rc = efx_tc_configure_default_rule_wire(efx); 711 if (rc) 712 return rc; 713 rc = efx_tc_configure_rep_mport(efx); 714 if (rc) 715 return rc; 716 efx->tc->up = true; 717 rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); 718 if (rc) 719 return rc; 720 return 0; 721 } 722 723 void efx_fini_tc(struct efx_nic *efx) 724 { 725 /* We can get called even if efx_init_struct_tc() failed */ 726 if (!efx->tc) 727 return; 728 if (efx->tc->up) 729 flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind); 730 efx_tc_deconfigure_rep_mport(efx); 731 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); 732 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); 733 efx->tc->up = false; 734 } 735 736 int efx_init_struct_tc(struct efx_nic *efx) 737 { 738 int rc; 739 740 if (efx->type->is_vf) 741 return 0; 742 743 efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); 744 if (!efx->tc) 745 return -ENOMEM; 746 efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL); 747 if (!efx->tc->caps) { 748 rc = -ENOMEM; 749 goto fail_alloc_caps; 750 } 751 INIT_LIST_HEAD(&efx->tc->block_list); 752 753 mutex_init(&efx->tc->mutex); 754 rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params); 755 if (rc < 0) 756 goto fail_match_action_ht; 757 efx->tc->reps_filter_uc = -1; 758 efx->tc->reps_filter_mc = -1; 759 INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); 760 efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 761 INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); 762 efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 763 return 0; 764 fail_match_action_ht: 765 mutex_destroy(&efx->tc->mutex); 766 kfree(efx->tc->caps); 767 fail_alloc_caps: 768 kfree(efx->tc); 769 efx->tc = NULL; 770 return rc; 771 } 772 773 void efx_fini_struct_tc(struct efx_nic *efx) 774 { 775 if (!efx->tc) 776 return; 777 778 mutex_lock(&efx->tc->mutex); 779 EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != 780 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 781 EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != 782 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 783 rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free, 784 efx); 785 mutex_unlock(&efx->tc->mutex); 786 mutex_destroy(&efx->tc->mutex); 787 kfree(efx->tc->caps); 788 kfree(efx->tc); 789 efx->tc = NULL; 790 } 791