1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2019 Solarflare Communications Inc. 5 * Copyright 2020-2022 Xilinx Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation, incorporated herein by reference. 10 */ 11 12 #include <net/pkt_cls.h> 13 #include "tc.h" 14 #include "tc_bindings.h" 15 #include "mae.h" 16 #include "ef100_rep.h" 17 #include "efx.h" 18 19 #define EFX_EFV_PF NULL 20 /* Look up the representor information (efv) for a device. 21 * May return NULL for the PF (us), or an error pointer for a device that 22 * isn't supported as a TC offload endpoint 23 */ 24 static struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx, 25 struct net_device *dev) 26 { 27 struct efx_rep *efv; 28 29 if (!dev) 30 return ERR_PTR(-EOPNOTSUPP); 31 /* Is it us (the PF)? */ 32 if (dev == efx->net_dev) 33 return EFX_EFV_PF; 34 /* Is it an efx vfrep at all? */ 35 if (dev->netdev_ops != &efx_ef100_rep_netdev_ops) 36 return ERR_PTR(-EOPNOTSUPP); 37 /* Is it ours? We don't support TC rules that include another 38 * EF100's netdevices (not even on another port of the same NIC). 39 */ 40 efv = netdev_priv(dev); 41 if (efv->parent != efx) 42 return ERR_PTR(-EOPNOTSUPP); 43 return efv; 44 } 45 46 /* Convert a driver-internal vport ID into an external device (wire or VF) */ 47 static s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv) 48 { 49 u32 mport; 50 51 if (IS_ERR(efv)) 52 return PTR_ERR(efv); 53 if (!efv) /* device is PF (us) */ 54 efx_mae_mport_wire(efx, &mport); 55 else /* device is repr */ 56 efx_mae_mport_mport(efx, efv->mport, &mport); 57 return mport; 58 } 59 60 static const struct rhashtable_params efx_tc_match_action_ht_params = { 61 .key_len = sizeof(unsigned long), 62 .key_offset = offsetof(struct efx_tc_flow_rule, cookie), 63 .head_offset = offsetof(struct efx_tc_flow_rule, linkage), 64 }; 65 66 static void efx_tc_free_action_set(struct efx_nic *efx, 67 struct efx_tc_action_set *act, bool in_hw) 68 { 69 /* Failure paths calling this on the 'running action' set in_hw=false, 70 * because if the alloc had succeeded we'd've put it in acts.list and 71 * not still have it in act. 72 */ 73 if (in_hw) { 74 efx_mae_free_action_set(efx, act->fw_id); 75 /* in_hw is true iff we are on an acts.list; make sure to 76 * remove ourselves from that list before we are freed. 77 */ 78 list_del(&act->list); 79 } 80 kfree(act); 81 } 82 83 static void efx_tc_free_action_set_list(struct efx_nic *efx, 84 struct efx_tc_action_set_list *acts, 85 bool in_hw) 86 { 87 struct efx_tc_action_set *act, *next; 88 89 /* Failure paths set in_hw=false, because usually the acts didn't get 90 * to efx_mae_alloc_action_set_list(); if they did, the failure tree 91 * has a separate efx_mae_free_action_set_list() before calling us. 92 */ 93 if (in_hw) 94 efx_mae_free_action_set_list(efx, acts); 95 /* Any act that's on the list will be in_hw even if the list isn't */ 96 list_for_each_entry_safe(act, next, &acts->list, list) 97 efx_tc_free_action_set(efx, act, true); 98 /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */ 99 } 100 101 static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) 102 { 103 efx_mae_delete_rule(efx, rule->fw_id); 104 105 /* Release entries in subsidiary tables */ 106 efx_tc_free_action_set_list(efx, &rule->acts, true); 107 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 108 } 109 110 static void efx_tc_flow_free(void *ptr, void *arg) 111 { 112 struct efx_tc_flow_rule *rule = ptr; 113 struct efx_nic *efx = arg; 114 115 netif_err(efx, drv, efx->net_dev, 116 "tc rule %lx still present at teardown, removing\n", 117 rule->cookie); 118 119 efx_mae_delete_rule(efx, rule->fw_id); 120 121 /* Release entries in subsidiary tables */ 122 efx_tc_free_action_set_list(efx, &rule->acts, true); 123 124 kfree(rule); 125 } 126 127 static int efx_tc_flower_parse_match(struct efx_nic *efx, 128 struct flow_rule *rule, 129 struct efx_tc_match *match, 130 struct netlink_ext_ack *extack) 131 { 132 struct flow_dissector *dissector = rule->match.dissector; 133 134 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 135 struct flow_match_control fm; 136 137 flow_rule_match_control(rule, &fm); 138 139 if (fm.mask->flags) { 140 efx_tc_err(efx, "Unsupported match on control.flags %#x\n", 141 fm.mask->flags); 142 NL_SET_ERR_MSG_MOD(extack, "Unsupported match on control.flags"); 143 return -EOPNOTSUPP; 144 } 145 } 146 if (dissector->used_keys & 147 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 148 BIT(FLOW_DISSECTOR_KEY_BASIC))) { 149 efx_tc_err(efx, "Unsupported flower keys %#x\n", dissector->used_keys); 150 NL_SET_ERR_MSG_MOD(extack, "Unsupported flower keys encountered"); 151 return -EOPNOTSUPP; 152 } 153 154 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 155 struct flow_match_basic fm; 156 157 flow_rule_match_basic(rule, &fm); 158 if (fm.mask->n_proto) { 159 EFX_TC_ERR_MSG(efx, extack, "Unsupported eth_proto match\n"); 160 return -EOPNOTSUPP; 161 } 162 if (fm.mask->ip_proto) { 163 EFX_TC_ERR_MSG(efx, extack, "Unsupported ip_proto match\n"); 164 return -EOPNOTSUPP; 165 } 166 } 167 168 return 0; 169 } 170 171 static int efx_tc_flower_replace(struct efx_nic *efx, 172 struct net_device *net_dev, 173 struct flow_cls_offload *tc, 174 struct efx_rep *efv) 175 { 176 struct flow_rule *fr = flow_cls_offload_flow_rule(tc); 177 struct netlink_ext_ack *extack = tc->common.extack; 178 struct efx_tc_flow_rule *rule = NULL, *old; 179 struct efx_tc_action_set *act = NULL; 180 const struct flow_action_entry *fa; 181 struct efx_rep *from_efv, *to_efv; 182 struct efx_tc_match match; 183 s64 rc; 184 int i; 185 186 if (!tc_can_offload_extack(efx->net_dev, extack)) 187 return -EOPNOTSUPP; 188 if (WARN_ON(!efx->tc)) 189 return -ENETDOWN; 190 if (WARN_ON(!efx->tc->up)) 191 return -ENETDOWN; 192 193 from_efv = efx_tc_flower_lookup_efv(efx, net_dev); 194 if (IS_ERR(from_efv)) { 195 /* Might be a tunnel decap rule from an indirect block. 196 * Support for those not implemented yet. 197 */ 198 return -EOPNOTSUPP; 199 } 200 201 if (efv != from_efv) { 202 /* can't happen */ 203 efx_tc_err(efx, "for %s efv is %snull but from_efv is %snull\n", 204 netdev_name(net_dev), efv ? "non-" : "", 205 from_efv ? "non-" : ""); 206 if (efv) 207 NL_SET_ERR_MSG_MOD(extack, "vfrep filter has PF net_dev (can't happen)"); 208 else 209 NL_SET_ERR_MSG_MOD(extack, "PF filter has vfrep net_dev (can't happen)"); 210 return -EINVAL; 211 } 212 213 /* Parse match */ 214 memset(&match, 0, sizeof(match)); 215 rc = efx_tc_flower_external_mport(efx, from_efv); 216 if (rc < 0) { 217 EFX_TC_ERR_MSG(efx, extack, "Failed to identify ingress m-port"); 218 return rc; 219 } 220 match.value.ingress_port = rc; 221 match.mask.ingress_port = ~0; 222 rc = efx_tc_flower_parse_match(efx, fr, &match, extack); 223 if (rc) 224 return rc; 225 226 if (tc->common.chain_index) { 227 EFX_TC_ERR_MSG(efx, extack, "No support for nonzero chain_index"); 228 return -EOPNOTSUPP; 229 } 230 match.mask.recirc_id = 0xff; 231 232 rc = efx_mae_match_check_caps(efx, &match.mask, extack); 233 if (rc) 234 return rc; 235 236 rule = kzalloc(sizeof(*rule), GFP_USER); 237 if (!rule) 238 return -ENOMEM; 239 INIT_LIST_HEAD(&rule->acts.list); 240 rule->cookie = tc->cookie; 241 old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, 242 &rule->linkage, 243 efx_tc_match_action_ht_params); 244 if (old) { 245 netif_dbg(efx, drv, efx->net_dev, 246 "Already offloaded rule (cookie %lx)\n", tc->cookie); 247 rc = -EEXIST; 248 NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); 249 goto release; 250 } 251 252 /* Parse actions */ 253 act = kzalloc(sizeof(*act), GFP_USER); 254 if (!act) { 255 rc = -ENOMEM; 256 goto release; 257 } 258 259 flow_action_for_each(i, fa, &fr->action) { 260 struct efx_tc_action_set save; 261 262 if (!act) { 263 /* more actions after a non-pipe action */ 264 EFX_TC_ERR_MSG(efx, extack, "Action follows non-pipe action"); 265 rc = -EINVAL; 266 goto release; 267 } 268 269 switch (fa->id) { 270 case FLOW_ACTION_DROP: 271 rc = efx_mae_alloc_action_set(efx, act); 272 if (rc) { 273 EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (drop)"); 274 goto release; 275 } 276 list_add_tail(&act->list, &rule->acts.list); 277 act = NULL; /* end of the line */ 278 break; 279 case FLOW_ACTION_REDIRECT: 280 case FLOW_ACTION_MIRRED: 281 save = *act; 282 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); 283 if (IS_ERR(to_efv)) { 284 EFX_TC_ERR_MSG(efx, extack, "Mirred egress device not on switch"); 285 rc = PTR_ERR(to_efv); 286 goto release; 287 } 288 rc = efx_tc_flower_external_mport(efx, to_efv); 289 if (rc < 0) { 290 EFX_TC_ERR_MSG(efx, extack, "Failed to identify egress m-port"); 291 goto release; 292 } 293 act->dest_mport = rc; 294 act->deliver = 1; 295 rc = efx_mae_alloc_action_set(efx, act); 296 if (rc) { 297 EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (mirred)"); 298 goto release; 299 } 300 list_add_tail(&act->list, &rule->acts.list); 301 act = NULL; 302 if (fa->id == FLOW_ACTION_REDIRECT) 303 break; /* end of the line */ 304 /* Mirror, so continue on with saved act */ 305 act = kzalloc(sizeof(*act), GFP_USER); 306 if (!act) { 307 rc = -ENOMEM; 308 goto release; 309 } 310 *act = save; 311 break; 312 default: 313 efx_tc_err(efx, "Unhandled action %u\n", fa->id); 314 rc = -EOPNOTSUPP; 315 NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); 316 goto release; 317 } 318 } 319 320 if (act) { 321 /* Not shot/redirected, so deliver to default dest */ 322 if (from_efv == EFX_EFV_PF) 323 /* Rule applies to traffic from the wire, 324 * and default dest is thus the PF 325 */ 326 efx_mae_mport_uplink(efx, &act->dest_mport); 327 else 328 /* Representor, so rule applies to traffic from 329 * representee, and default dest is thus the rep. 330 * All reps use the same mport for delivery 331 */ 332 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 333 &act->dest_mport); 334 act->deliver = 1; 335 rc = efx_mae_alloc_action_set(efx, act); 336 if (rc) { 337 EFX_TC_ERR_MSG(efx, extack, "Failed to write action set to hw (deliver)"); 338 goto release; 339 } 340 list_add_tail(&act->list, &rule->acts.list); 341 act = NULL; /* Prevent double-free in error path */ 342 } 343 344 netif_dbg(efx, drv, efx->net_dev, 345 "Successfully parsed filter (cookie %lx)\n", 346 tc->cookie); 347 348 rule->match = match; 349 350 rc = efx_mae_alloc_action_set_list(efx, &rule->acts); 351 if (rc) { 352 EFX_TC_ERR_MSG(efx, extack, "Failed to write action set list to hw"); 353 goto release; 354 } 355 rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, 356 rule->acts.fw_id, &rule->fw_id); 357 if (rc) { 358 EFX_TC_ERR_MSG(efx, extack, "Failed to insert rule in hw"); 359 goto release_acts; 360 } 361 return 0; 362 363 release_acts: 364 efx_mae_free_action_set_list(efx, &rule->acts); 365 release: 366 /* We failed to insert the rule, so free up any entries we created in 367 * subsidiary tables. 368 */ 369 if (act) 370 efx_tc_free_action_set(efx, act, false); 371 if (rule) { 372 rhashtable_remove_fast(&efx->tc->match_action_ht, 373 &rule->linkage, 374 efx_tc_match_action_ht_params); 375 efx_tc_free_action_set_list(efx, &rule->acts, false); 376 } 377 kfree(rule); 378 return rc; 379 } 380 381 static int efx_tc_flower_destroy(struct efx_nic *efx, 382 struct net_device *net_dev, 383 struct flow_cls_offload *tc) 384 { 385 struct netlink_ext_ack *extack = tc->common.extack; 386 struct efx_tc_flow_rule *rule; 387 388 rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie, 389 efx_tc_match_action_ht_params); 390 if (!rule) { 391 /* Only log a message if we're the ingress device. Otherwise 392 * it's a foreign filter and we might just not have been 393 * interested (e.g. we might not have been the egress device 394 * either). 395 */ 396 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) 397 netif_warn(efx, drv, efx->net_dev, 398 "Filter %lx not found to remove\n", tc->cookie); 399 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); 400 return -ENOENT; 401 } 402 403 /* Remove it from HW */ 404 efx_tc_delete_rule(efx, rule); 405 /* Delete it from SW */ 406 rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage, 407 efx_tc_match_action_ht_params); 408 netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie); 409 kfree(rule); 410 return 0; 411 } 412 413 int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev, 414 struct flow_cls_offload *tc, struct efx_rep *efv) 415 { 416 int rc; 417 418 if (!efx->tc) 419 return -EOPNOTSUPP; 420 421 mutex_lock(&efx->tc->mutex); 422 switch (tc->command) { 423 case FLOW_CLS_REPLACE: 424 rc = efx_tc_flower_replace(efx, net_dev, tc, efv); 425 break; 426 case FLOW_CLS_DESTROY: 427 rc = efx_tc_flower_destroy(efx, net_dev, tc); 428 break; 429 default: 430 rc = -EOPNOTSUPP; 431 break; 432 } 433 mutex_unlock(&efx->tc->mutex); 434 return rc; 435 } 436 437 static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, 438 u32 eg_port, struct efx_tc_flow_rule *rule) 439 { 440 struct efx_tc_action_set_list *acts = &rule->acts; 441 struct efx_tc_match *match = &rule->match; 442 struct efx_tc_action_set *act; 443 int rc; 444 445 match->value.ingress_port = ing_port; 446 match->mask.ingress_port = ~0; 447 act = kzalloc(sizeof(*act), GFP_KERNEL); 448 if (!act) 449 return -ENOMEM; 450 act->deliver = 1; 451 act->dest_mport = eg_port; 452 rc = efx_mae_alloc_action_set(efx, act); 453 if (rc) 454 goto fail1; 455 EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); 456 list_add_tail(&act->list, &acts->list); 457 rc = efx_mae_alloc_action_set_list(efx, acts); 458 if (rc) 459 goto fail2; 460 rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT, 461 acts->fw_id, &rule->fw_id); 462 if (rc) 463 goto fail3; 464 return 0; 465 fail3: 466 efx_mae_free_action_set_list(efx, acts); 467 fail2: 468 list_del(&act->list); 469 efx_mae_free_action_set(efx, act->fw_id); 470 fail1: 471 kfree(act); 472 return rc; 473 } 474 475 static int efx_tc_configure_default_rule_pf(struct efx_nic *efx) 476 { 477 struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf; 478 u32 ing_port, eg_port; 479 480 efx_mae_mport_uplink(efx, &ing_port); 481 efx_mae_mport_wire(efx, &eg_port); 482 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 483 } 484 485 static int efx_tc_configure_default_rule_wire(struct efx_nic *efx) 486 { 487 struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire; 488 u32 ing_port, eg_port; 489 490 efx_mae_mport_wire(efx, &ing_port); 491 efx_mae_mport_uplink(efx, &eg_port); 492 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 493 } 494 495 int efx_tc_configure_default_rule_rep(struct efx_rep *efv) 496 { 497 struct efx_tc_flow_rule *rule = &efv->dflt; 498 struct efx_nic *efx = efv->parent; 499 u32 ing_port, eg_port; 500 501 efx_mae_mport_mport(efx, efv->mport, &ing_port); 502 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); 503 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 504 } 505 506 void efx_tc_deconfigure_default_rule(struct efx_nic *efx, 507 struct efx_tc_flow_rule *rule) 508 { 509 if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) 510 efx_tc_delete_rule(efx, rule); 511 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 512 } 513 514 static int efx_tc_configure_rep_mport(struct efx_nic *efx) 515 { 516 u32 rep_mport_label; 517 int rc; 518 519 rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label); 520 if (rc) 521 return rc; 522 pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n", 523 efx->tc->reps_mport_id, rep_mport_label); 524 /* Use mport *selector* as vport ID */ 525 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 526 &efx->tc->reps_mport_vport_id); 527 return 0; 528 } 529 530 static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx) 531 { 532 efx_mae_free_mport(efx, efx->tc->reps_mport_id); 533 efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL; 534 } 535 536 int efx_tc_insert_rep_filters(struct efx_nic *efx) 537 { 538 struct efx_filter_spec promisc, allmulti; 539 int rc; 540 541 if (efx->type->is_vf) 542 return 0; 543 if (!efx->tc) 544 return 0; 545 efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0); 546 efx_filter_set_uc_def(&promisc); 547 efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id); 548 rc = efx_filter_insert_filter(efx, &promisc, false); 549 if (rc < 0) 550 return rc; 551 efx->tc->reps_filter_uc = rc; 552 efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0); 553 efx_filter_set_mc_def(&allmulti); 554 efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id); 555 rc = efx_filter_insert_filter(efx, &allmulti, false); 556 if (rc < 0) 557 return rc; 558 efx->tc->reps_filter_mc = rc; 559 return 0; 560 } 561 562 void efx_tc_remove_rep_filters(struct efx_nic *efx) 563 { 564 if (efx->type->is_vf) 565 return; 566 if (!efx->tc) 567 return; 568 if (efx->tc->reps_filter_mc >= 0) 569 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc); 570 efx->tc->reps_filter_mc = -1; 571 if (efx->tc->reps_filter_uc >= 0) 572 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc); 573 efx->tc->reps_filter_uc = -1; 574 } 575 576 int efx_init_tc(struct efx_nic *efx) 577 { 578 int rc; 579 580 rc = efx_mae_get_caps(efx, efx->tc->caps); 581 if (rc) 582 return rc; 583 if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS) 584 /* Firmware supports some match fields the driver doesn't know 585 * about. Not fatal, unless any of those fields are required 586 * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know. 587 */ 588 netif_warn(efx, probe, efx->net_dev, 589 "FW reports additional match fields %u\n", 590 efx->tc->caps->match_field_count); 591 if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) { 592 netif_err(efx, probe, efx->net_dev, 593 "Too few action prios supported (have %u, need %u)\n", 594 efx->tc->caps->action_prios, EFX_TC_PRIO__NUM); 595 return -EIO; 596 } 597 rc = efx_tc_configure_default_rule_pf(efx); 598 if (rc) 599 return rc; 600 rc = efx_tc_configure_default_rule_wire(efx); 601 if (rc) 602 return rc; 603 rc = efx_tc_configure_rep_mport(efx); 604 if (rc) 605 return rc; 606 efx->tc->up = true; 607 rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); 608 if (rc) 609 return rc; 610 return 0; 611 } 612 613 void efx_fini_tc(struct efx_nic *efx) 614 { 615 /* We can get called even if efx_init_struct_tc() failed */ 616 if (!efx->tc) 617 return; 618 if (efx->tc->up) 619 flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind); 620 efx_tc_deconfigure_rep_mport(efx); 621 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); 622 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); 623 efx->tc->up = false; 624 } 625 626 int efx_init_struct_tc(struct efx_nic *efx) 627 { 628 int rc; 629 630 if (efx->type->is_vf) 631 return 0; 632 633 efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); 634 if (!efx->tc) 635 return -ENOMEM; 636 efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL); 637 if (!efx->tc->caps) { 638 rc = -ENOMEM; 639 goto fail_alloc_caps; 640 } 641 INIT_LIST_HEAD(&efx->tc->block_list); 642 643 mutex_init(&efx->tc->mutex); 644 rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params); 645 if (rc < 0) 646 goto fail_match_action_ht; 647 efx->tc->reps_filter_uc = -1; 648 efx->tc->reps_filter_mc = -1; 649 INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); 650 efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 651 INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); 652 efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 653 return 0; 654 fail_match_action_ht: 655 mutex_destroy(&efx->tc->mutex); 656 kfree(efx->tc->caps); 657 fail_alloc_caps: 658 kfree(efx->tc); 659 efx->tc = NULL; 660 return rc; 661 } 662 663 void efx_fini_struct_tc(struct efx_nic *efx) 664 { 665 if (!efx->tc) 666 return; 667 668 mutex_lock(&efx->tc->mutex); 669 EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != 670 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 671 EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != 672 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 673 rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free, 674 efx); 675 mutex_unlock(&efx->tc->mutex); 676 mutex_destroy(&efx->tc->mutex); 677 kfree(efx->tc->caps); 678 kfree(efx->tc); 679 efx->tc = NULL; 680 } 681