1 // SPDX-License-Identifier: GPL-2.0-only 2 /**************************************************************************** 3 * Driver for Solarflare network controllers and boards 4 * Copyright 2019 Solarflare Communications Inc. 5 * Copyright 2020-2022 Xilinx Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published 9 * by the Free Software Foundation, incorporated herein by reference. 10 */ 11 12 #include <net/pkt_cls.h> 13 #include <net/vxlan.h> 14 #include <net/geneve.h> 15 #include "tc.h" 16 #include "tc_bindings.h" 17 #include "tc_encap_actions.h" 18 #include "mae.h" 19 #include "ef100_rep.h" 20 #include "efx.h" 21 22 enum efx_encap_type efx_tc_indr_netdev_type(struct net_device *net_dev) 23 { 24 if (netif_is_vxlan(net_dev)) 25 return EFX_ENCAP_TYPE_VXLAN; 26 if (netif_is_geneve(net_dev)) 27 return EFX_ENCAP_TYPE_GENEVE; 28 29 return EFX_ENCAP_TYPE_NONE; 30 } 31 32 #define EFX_EFV_PF NULL 33 /* Look up the representor information (efv) for a device. 34 * May return NULL for the PF (us), or an error pointer for a device that 35 * isn't supported as a TC offload endpoint 36 */ 37 struct efx_rep *efx_tc_flower_lookup_efv(struct efx_nic *efx, 38 struct net_device *dev) 39 { 40 struct efx_rep *efv; 41 42 if (!dev) 43 return ERR_PTR(-EOPNOTSUPP); 44 /* Is it us (the PF)? */ 45 if (dev == efx->net_dev) 46 return EFX_EFV_PF; 47 /* Is it an efx vfrep at all? */ 48 if (dev->netdev_ops != &efx_ef100_rep_netdev_ops) 49 return ERR_PTR(-EOPNOTSUPP); 50 /* Is it ours? We don't support TC rules that include another 51 * EF100's netdevices (not even on another port of the same NIC). 52 */ 53 efv = netdev_priv(dev); 54 if (efv->parent != efx) 55 return ERR_PTR(-EOPNOTSUPP); 56 return efv; 57 } 58 59 /* Convert a driver-internal vport ID into an internal device (PF or VF) */ 60 static s64 efx_tc_flower_internal_mport(struct efx_nic *efx, struct efx_rep *efv) 61 { 62 u32 mport; 63 64 if (IS_ERR(efv)) 65 return PTR_ERR(efv); 66 if (!efv) /* device is PF (us) */ 67 efx_mae_mport_uplink(efx, &mport); 68 else /* device is repr */ 69 efx_mae_mport_mport(efx, efv->mport, &mport); 70 return mport; 71 } 72 73 /* Convert a driver-internal vport ID into an external device (wire or VF) */ 74 s64 efx_tc_flower_external_mport(struct efx_nic *efx, struct efx_rep *efv) 75 { 76 u32 mport; 77 78 if (IS_ERR(efv)) 79 return PTR_ERR(efv); 80 if (!efv) /* device is PF (us) */ 81 efx_mae_mport_wire(efx, &mport); 82 else /* device is repr */ 83 efx_mae_mport_mport(efx, efv->mport, &mport); 84 return mport; 85 } 86 87 static const struct rhashtable_params efx_tc_encap_match_ht_params = { 88 .key_len = offsetof(struct efx_tc_encap_match, linkage), 89 .key_offset = 0, 90 .head_offset = offsetof(struct efx_tc_encap_match, linkage), 91 }; 92 93 static const struct rhashtable_params efx_tc_match_action_ht_params = { 94 .key_len = sizeof(unsigned long), 95 .key_offset = offsetof(struct efx_tc_flow_rule, cookie), 96 .head_offset = offsetof(struct efx_tc_flow_rule, linkage), 97 }; 98 99 static void efx_tc_free_action_set(struct efx_nic *efx, 100 struct efx_tc_action_set *act, bool in_hw) 101 { 102 /* Failure paths calling this on the 'cursor' action set in_hw=false, 103 * because if the alloc had succeeded we'd've put it in acts.list and 104 * not still have it in act. 105 */ 106 if (in_hw) { 107 efx_mae_free_action_set(efx, act->fw_id); 108 /* in_hw is true iff we are on an acts.list; make sure to 109 * remove ourselves from that list before we are freed. 110 */ 111 list_del(&act->list); 112 } 113 if (act->count) { 114 spin_lock_bh(&act->count->cnt->lock); 115 if (!list_empty(&act->count_user)) 116 list_del(&act->count_user); 117 spin_unlock_bh(&act->count->cnt->lock); 118 efx_tc_flower_put_counter_index(efx, act->count); 119 } 120 if (act->encap_md) { 121 list_del(&act->encap_user); 122 efx_tc_flower_release_encap_md(efx, act->encap_md); 123 } 124 kfree(act); 125 } 126 127 static void efx_tc_free_action_set_list(struct efx_nic *efx, 128 struct efx_tc_action_set_list *acts, 129 bool in_hw) 130 { 131 struct efx_tc_action_set *act, *next; 132 133 /* Failure paths set in_hw=false, because usually the acts didn't get 134 * to efx_mae_alloc_action_set_list(); if they did, the failure tree 135 * has a separate efx_mae_free_action_set_list() before calling us. 136 */ 137 if (in_hw) 138 efx_mae_free_action_set_list(efx, acts); 139 /* Any act that's on the list will be in_hw even if the list isn't */ 140 list_for_each_entry_safe(act, next, &acts->list, list) 141 efx_tc_free_action_set(efx, act, true); 142 /* Don't kfree, as acts is embedded inside a struct efx_tc_flow_rule */ 143 } 144 145 /* Boilerplate for the simple 'copy a field' cases */ 146 #define _MAP_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ 147 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_##_name)) { \ 148 struct flow_match_##_type fm; \ 149 \ 150 flow_rule_match_##_tcget(rule, &fm); \ 151 match->value._field = fm.key->_tcfield; \ 152 match->mask._field = fm.mask->_tcfield; \ 153 } 154 #define MAP_KEY_AND_MASK(_name, _type, _tcfield, _field) \ 155 _MAP_KEY_AND_MASK(_name, _type, _type, _tcfield, _field) 156 #define MAP_ENC_KEY_AND_MASK(_name, _type, _tcget, _tcfield, _field) \ 157 _MAP_KEY_AND_MASK(ENC_##_name, _type, _tcget, _tcfield, _field) 158 159 static int efx_tc_flower_parse_match(struct efx_nic *efx, 160 struct flow_rule *rule, 161 struct efx_tc_match *match, 162 struct netlink_ext_ack *extack) 163 { 164 struct flow_dissector *dissector = rule->match.dissector; 165 unsigned char ipv = 0; 166 167 /* Owing to internal TC infelicities, the IPV6_ADDRS key might be set 168 * even on IPv4 filters; so rather than relying on dissector->used_keys 169 * we check the addr_type in the CONTROL key. If we don't find it (or 170 * it's masked, which should never happen), we treat both IPV4_ADDRS 171 * and IPV6_ADDRS as absent. 172 */ 173 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 174 struct flow_match_control fm; 175 176 flow_rule_match_control(rule, &fm); 177 if (IS_ALL_ONES(fm.mask->addr_type)) 178 switch (fm.key->addr_type) { 179 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 180 ipv = 4; 181 break; 182 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 183 ipv = 6; 184 break; 185 default: 186 break; 187 } 188 189 if (fm.mask->flags & FLOW_DIS_IS_FRAGMENT) { 190 match->value.ip_frag = fm.key->flags & FLOW_DIS_IS_FRAGMENT; 191 match->mask.ip_frag = true; 192 } 193 if (fm.mask->flags & FLOW_DIS_FIRST_FRAG) { 194 match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG; 195 match->mask.ip_firstfrag = true; 196 } 197 if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) { 198 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x", 199 fm.mask->flags); 200 return -EOPNOTSUPP; 201 } 202 } 203 if (dissector->used_keys & 204 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) | 205 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) | 206 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 207 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) | 208 BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) | 209 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 210 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 211 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 212 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | 213 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 214 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 215 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | 216 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) | 217 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) | 218 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) | 219 BIT_ULL(FLOW_DISSECTOR_KEY_IP))) { 220 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported flower keys %#llx", 221 dissector->used_keys); 222 return -EOPNOTSUPP; 223 } 224 225 MAP_KEY_AND_MASK(BASIC, basic, n_proto, eth_proto); 226 /* Make sure we're IP if any L3/L4 keys used. */ 227 if (!IS_ALL_ONES(match->mask.eth_proto) || 228 !(match->value.eth_proto == htons(ETH_P_IP) || 229 match->value.eth_proto == htons(ETH_P_IPV6))) 230 if (dissector->used_keys & 231 (BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 232 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 233 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 234 BIT_ULL(FLOW_DISSECTOR_KEY_IP) | 235 BIT_ULL(FLOW_DISSECTOR_KEY_TCP))) { 236 NL_SET_ERR_MSG_FMT_MOD(extack, 237 "L3/L4 flower keys %#llx require protocol ipv[46]", 238 dissector->used_keys); 239 return -EINVAL; 240 } 241 242 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 243 struct flow_match_vlan fm; 244 245 flow_rule_match_vlan(rule, &fm); 246 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { 247 match->value.vlan_proto[0] = fm.key->vlan_tpid; 248 match->mask.vlan_proto[0] = fm.mask->vlan_tpid; 249 match->value.vlan_tci[0] = cpu_to_be16(fm.key->vlan_priority << 13 | 250 fm.key->vlan_id); 251 match->mask.vlan_tci[0] = cpu_to_be16(fm.mask->vlan_priority << 13 | 252 fm.mask->vlan_id); 253 } 254 } 255 256 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 257 struct flow_match_vlan fm; 258 259 flow_rule_match_cvlan(rule, &fm); 260 if (fm.mask->vlan_id || fm.mask->vlan_priority || fm.mask->vlan_tpid) { 261 match->value.vlan_proto[1] = fm.key->vlan_tpid; 262 match->mask.vlan_proto[1] = fm.mask->vlan_tpid; 263 match->value.vlan_tci[1] = cpu_to_be16(fm.key->vlan_priority << 13 | 264 fm.key->vlan_id); 265 match->mask.vlan_tci[1] = cpu_to_be16(fm.mask->vlan_priority << 13 | 266 fm.mask->vlan_id); 267 } 268 } 269 270 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { 271 struct flow_match_eth_addrs fm; 272 273 flow_rule_match_eth_addrs(rule, &fm); 274 ether_addr_copy(match->value.eth_saddr, fm.key->src); 275 ether_addr_copy(match->value.eth_daddr, fm.key->dst); 276 ether_addr_copy(match->mask.eth_saddr, fm.mask->src); 277 ether_addr_copy(match->mask.eth_daddr, fm.mask->dst); 278 } 279 280 MAP_KEY_AND_MASK(BASIC, basic, ip_proto, ip_proto); 281 /* Make sure we're TCP/UDP if any L4 keys used. */ 282 if ((match->value.ip_proto != IPPROTO_UDP && 283 match->value.ip_proto != IPPROTO_TCP) || !IS_ALL_ONES(match->mask.ip_proto)) 284 if (dissector->used_keys & 285 (BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) | 286 BIT_ULL(FLOW_DISSECTOR_KEY_TCP))) { 287 NL_SET_ERR_MSG_FMT_MOD(extack, 288 "L4 flower keys %#llx require ipproto udp or tcp", 289 dissector->used_keys); 290 return -EINVAL; 291 } 292 MAP_KEY_AND_MASK(IP, ip, tos, ip_tos); 293 MAP_KEY_AND_MASK(IP, ip, ttl, ip_ttl); 294 if (ipv == 4) { 295 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, src, src_ip); 296 MAP_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, dst, dst_ip); 297 } 298 #ifdef CONFIG_IPV6 299 else if (ipv == 6) { 300 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, src, src_ip6); 301 MAP_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, dst, dst_ip6); 302 } 303 #endif 304 MAP_KEY_AND_MASK(PORTS, ports, src, l4_sport); 305 MAP_KEY_AND_MASK(PORTS, ports, dst, l4_dport); 306 MAP_KEY_AND_MASK(TCP, tcp, flags, tcp_flags); 307 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { 308 struct flow_match_control fm; 309 310 flow_rule_match_enc_control(rule, &fm); 311 if (fm.mask->flags) { 312 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on enc_control.flags %#x", 313 fm.mask->flags); 314 return -EOPNOTSUPP; 315 } 316 if (!IS_ALL_ONES(fm.mask->addr_type)) { 317 NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported enc addr_type mask %u (key %u)", 318 fm.mask->addr_type, 319 fm.key->addr_type); 320 return -EOPNOTSUPP; 321 } 322 switch (fm.key->addr_type) { 323 case FLOW_DISSECTOR_KEY_IPV4_ADDRS: 324 MAP_ENC_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, enc_ipv4_addrs, 325 src, enc_src_ip); 326 MAP_ENC_KEY_AND_MASK(IPV4_ADDRS, ipv4_addrs, enc_ipv4_addrs, 327 dst, enc_dst_ip); 328 break; 329 #ifdef CONFIG_IPV6 330 case FLOW_DISSECTOR_KEY_IPV6_ADDRS: 331 MAP_ENC_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, enc_ipv6_addrs, 332 src, enc_src_ip6); 333 MAP_ENC_KEY_AND_MASK(IPV6_ADDRS, ipv6_addrs, enc_ipv6_addrs, 334 dst, enc_dst_ip6); 335 break; 336 #endif 337 default: 338 NL_SET_ERR_MSG_FMT_MOD(extack, 339 "Unsupported enc addr_type %u (supported are IPv4, IPv6)", 340 fm.key->addr_type); 341 return -EOPNOTSUPP; 342 } 343 MAP_ENC_KEY_AND_MASK(IP, ip, enc_ip, tos, enc_ip_tos); 344 MAP_ENC_KEY_AND_MASK(IP, ip, enc_ip, ttl, enc_ip_ttl); 345 MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, src, enc_sport); 346 MAP_ENC_KEY_AND_MASK(PORTS, ports, enc_ports, dst, enc_dport); 347 MAP_ENC_KEY_AND_MASK(KEYID, enc_keyid, enc_keyid, keyid, enc_keyid); 348 } else if (dissector->used_keys & 349 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) | 350 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | 351 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | 352 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) | 353 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) { 354 NL_SET_ERR_MSG_FMT_MOD(extack, 355 "Flower enc keys require enc_control (keys: %#llx)", 356 dissector->used_keys); 357 return -EOPNOTSUPP; 358 } 359 360 return 0; 361 } 362 363 static void efx_tc_flower_release_encap_match(struct efx_nic *efx, 364 struct efx_tc_encap_match *encap) 365 { 366 int rc; 367 368 if (!refcount_dec_and_test(&encap->ref)) 369 return; /* still in use */ 370 371 if (encap->type == EFX_TC_EM_DIRECT) { 372 rc = efx_mae_unregister_encap_match(efx, encap); 373 if (rc) 374 /* Display message but carry on and remove entry from our 375 * SW tables, because there's not much we can do about it. 376 */ 377 netif_err(efx, drv, efx->net_dev, 378 "Failed to release encap match %#x, rc %d\n", 379 encap->fw_id, rc); 380 } 381 rhashtable_remove_fast(&efx->tc->encap_match_ht, &encap->linkage, 382 efx_tc_encap_match_ht_params); 383 if (encap->pseudo) 384 efx_tc_flower_release_encap_match(efx, encap->pseudo); 385 kfree(encap); 386 } 387 388 static int efx_tc_flower_record_encap_match(struct efx_nic *efx, 389 struct efx_tc_match *match, 390 enum efx_encap_type type, 391 enum efx_tc_em_pseudo_type em_type, 392 u8 child_ip_tos_mask, 393 __be16 child_udp_sport_mask, 394 struct netlink_ext_ack *extack) 395 { 396 struct efx_tc_encap_match *encap, *old, *pseudo = NULL; 397 bool ipv6 = false; 398 int rc; 399 400 /* We require that the socket-defining fields (IP addrs and UDP dest 401 * port) are present and exact-match. Other fields may only be used 402 * if the field-set (and any masks) are the same for all encap 403 * matches on the same <sip,dip,dport> tuple; this is enforced by 404 * pseudo encap matches. 405 */ 406 if (match->mask.enc_dst_ip | match->mask.enc_src_ip) { 407 if (!IS_ALL_ONES(match->mask.enc_dst_ip)) { 408 NL_SET_ERR_MSG_MOD(extack, 409 "Egress encap match is not exact on dst IP address"); 410 return -EOPNOTSUPP; 411 } 412 if (!IS_ALL_ONES(match->mask.enc_src_ip)) { 413 NL_SET_ERR_MSG_MOD(extack, 414 "Egress encap match is not exact on src IP address"); 415 return -EOPNOTSUPP; 416 } 417 #ifdef CONFIG_IPV6 418 if (!ipv6_addr_any(&match->mask.enc_dst_ip6) || 419 !ipv6_addr_any(&match->mask.enc_src_ip6)) { 420 NL_SET_ERR_MSG_MOD(extack, 421 "Egress encap match on both IPv4 and IPv6, don't understand"); 422 return -EOPNOTSUPP; 423 } 424 } else { 425 ipv6 = true; 426 if (!efx_ipv6_addr_all_ones(&match->mask.enc_dst_ip6)) { 427 NL_SET_ERR_MSG_MOD(extack, 428 "Egress encap match is not exact on dst IP address"); 429 return -EOPNOTSUPP; 430 } 431 if (!efx_ipv6_addr_all_ones(&match->mask.enc_src_ip6)) { 432 NL_SET_ERR_MSG_MOD(extack, 433 "Egress encap match is not exact on src IP address"); 434 return -EOPNOTSUPP; 435 } 436 #endif 437 } 438 if (!IS_ALL_ONES(match->mask.enc_dport)) { 439 NL_SET_ERR_MSG_MOD(extack, "Egress encap match is not exact on dst UDP port"); 440 return -EOPNOTSUPP; 441 } 442 if (match->mask.enc_sport || match->mask.enc_ip_tos) { 443 struct efx_tc_match pmatch = *match; 444 445 if (em_type == EFX_TC_EM_PSEUDO_MASK) { /* can't happen */ 446 NL_SET_ERR_MSG_MOD(extack, "Bad recursion in egress encap match handler"); 447 return -EOPNOTSUPP; 448 } 449 pmatch.value.enc_ip_tos = 0; 450 pmatch.mask.enc_ip_tos = 0; 451 pmatch.value.enc_sport = 0; 452 pmatch.mask.enc_sport = 0; 453 rc = efx_tc_flower_record_encap_match(efx, &pmatch, type, 454 EFX_TC_EM_PSEUDO_MASK, 455 match->mask.enc_ip_tos, 456 match->mask.enc_sport, 457 extack); 458 if (rc) 459 return rc; 460 pseudo = pmatch.encap; 461 } 462 if (match->mask.enc_ip_ttl) { 463 NL_SET_ERR_MSG_MOD(extack, "Egress encap match on IP TTL not supported"); 464 rc = -EOPNOTSUPP; 465 goto fail_pseudo; 466 } 467 468 rc = efx_mae_check_encap_match_caps(efx, ipv6, match->mask.enc_ip_tos, 469 match->mask.enc_sport, extack); 470 if (rc) 471 goto fail_pseudo; 472 473 encap = kzalloc(sizeof(*encap), GFP_USER); 474 if (!encap) { 475 rc = -ENOMEM; 476 goto fail_pseudo; 477 } 478 encap->src_ip = match->value.enc_src_ip; 479 encap->dst_ip = match->value.enc_dst_ip; 480 #ifdef CONFIG_IPV6 481 encap->src_ip6 = match->value.enc_src_ip6; 482 encap->dst_ip6 = match->value.enc_dst_ip6; 483 #endif 484 encap->udp_dport = match->value.enc_dport; 485 encap->tun_type = type; 486 encap->ip_tos = match->value.enc_ip_tos; 487 encap->ip_tos_mask = match->mask.enc_ip_tos; 488 encap->child_ip_tos_mask = child_ip_tos_mask; 489 encap->udp_sport = match->value.enc_sport; 490 encap->udp_sport_mask = match->mask.enc_sport; 491 encap->child_udp_sport_mask = child_udp_sport_mask; 492 encap->type = em_type; 493 encap->pseudo = pseudo; 494 old = rhashtable_lookup_get_insert_fast(&efx->tc->encap_match_ht, 495 &encap->linkage, 496 efx_tc_encap_match_ht_params); 497 if (old) { 498 /* don't need our new entry */ 499 kfree(encap); 500 if (pseudo) /* don't need our new pseudo either */ 501 efx_tc_flower_release_encap_match(efx, pseudo); 502 /* check old and new em_types are compatible */ 503 switch (old->type) { 504 case EFX_TC_EM_DIRECT: 505 /* old EM is in hardware, so mustn't overlap with a 506 * pseudo, but may be shared with another direct EM 507 */ 508 if (em_type == EFX_TC_EM_DIRECT) 509 break; 510 NL_SET_ERR_MSG_MOD(extack, "Pseudo encap match conflicts with existing direct entry"); 511 return -EEXIST; 512 case EFX_TC_EM_PSEUDO_MASK: 513 /* old EM is protecting a ToS- or src port-qualified 514 * filter, so may only be shared with another pseudo 515 * for the same ToS and src port masks. 516 */ 517 if (em_type != EFX_TC_EM_PSEUDO_MASK) { 518 NL_SET_ERR_MSG_FMT_MOD(extack, 519 "%s encap match conflicts with existing pseudo(MASK) entry", 520 em_type ? "Pseudo" : "Direct"); 521 return -EEXIST; 522 } 523 if (child_ip_tos_mask != old->child_ip_tos_mask) { 524 NL_SET_ERR_MSG_FMT_MOD(extack, 525 "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x", 526 child_ip_tos_mask, 527 old->child_ip_tos_mask); 528 return -EEXIST; 529 } 530 if (child_udp_sport_mask != old->child_udp_sport_mask) { 531 NL_SET_ERR_MSG_FMT_MOD(extack, 532 "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x", 533 child_udp_sport_mask, 534 old->child_udp_sport_mask); 535 return -EEXIST; 536 } 537 break; 538 default: /* Unrecognised pseudo-type. Just say no */ 539 NL_SET_ERR_MSG_FMT_MOD(extack, 540 "%s encap match conflicts with existing pseudo(%d) entry", 541 em_type ? "Pseudo" : "Direct", 542 old->type); 543 return -EEXIST; 544 } 545 /* check old and new tun_types are compatible */ 546 if (old->tun_type != type) { 547 NL_SET_ERR_MSG_FMT_MOD(extack, 548 "Egress encap match with conflicting tun_type %u != %u", 549 old->tun_type, type); 550 return -EEXIST; 551 } 552 if (!refcount_inc_not_zero(&old->ref)) 553 return -EAGAIN; 554 /* existing entry found */ 555 encap = old; 556 } else { 557 if (em_type == EFX_TC_EM_DIRECT) { 558 rc = efx_mae_register_encap_match(efx, encap); 559 if (rc) { 560 NL_SET_ERR_MSG_MOD(extack, "Failed to record egress encap match in HW"); 561 goto fail; 562 } 563 } 564 refcount_set(&encap->ref, 1); 565 } 566 match->encap = encap; 567 return 0; 568 fail: 569 rhashtable_remove_fast(&efx->tc->encap_match_ht, &encap->linkage, 570 efx_tc_encap_match_ht_params); 571 kfree(encap); 572 fail_pseudo: 573 if (pseudo) 574 efx_tc_flower_release_encap_match(efx, pseudo); 575 return rc; 576 } 577 578 static void efx_tc_delete_rule(struct efx_nic *efx, struct efx_tc_flow_rule *rule) 579 { 580 efx_mae_delete_rule(efx, rule->fw_id); 581 582 /* Release entries in subsidiary tables */ 583 efx_tc_free_action_set_list(efx, &rule->acts, true); 584 if (rule->match.encap) 585 efx_tc_flower_release_encap_match(efx, rule->match.encap); 586 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 587 } 588 589 static const char *efx_tc_encap_type_name(enum efx_encap_type typ) 590 { 591 switch (typ) { 592 case EFX_ENCAP_TYPE_NONE: 593 return "none"; 594 case EFX_ENCAP_TYPE_VXLAN: 595 return "vxlan"; 596 case EFX_ENCAP_TYPE_GENEVE: 597 return "geneve"; 598 default: 599 pr_warn_once("Unknown efx_encap_type %d encountered\n", typ); 600 return "unknown"; 601 } 602 } 603 604 /* For details of action order constraints refer to SF-123102-TC-1§12.6.1 */ 605 enum efx_tc_action_order { 606 EFX_TC_AO_DECAP, 607 EFX_TC_AO_VLAN_POP, 608 EFX_TC_AO_VLAN_PUSH, 609 EFX_TC_AO_COUNT, 610 EFX_TC_AO_ENCAP, 611 EFX_TC_AO_DELIVER 612 }; 613 /* Determine whether we can add @new action without violating order */ 614 static bool efx_tc_flower_action_order_ok(const struct efx_tc_action_set *act, 615 enum efx_tc_action_order new) 616 { 617 switch (new) { 618 case EFX_TC_AO_DECAP: 619 if (act->decap) 620 return false; 621 fallthrough; 622 case EFX_TC_AO_VLAN_POP: 623 if (act->vlan_pop >= 2) 624 return false; 625 /* If we've already pushed a VLAN, we can't then pop it; 626 * the hardware would instead try to pop an existing VLAN 627 * before pushing the new one. 628 */ 629 if (act->vlan_push) 630 return false; 631 fallthrough; 632 case EFX_TC_AO_VLAN_PUSH: 633 if (act->vlan_push >= 2) 634 return false; 635 fallthrough; 636 case EFX_TC_AO_COUNT: 637 if (act->count) 638 return false; 639 fallthrough; 640 case EFX_TC_AO_ENCAP: 641 if (act->encap_md) 642 return false; 643 fallthrough; 644 case EFX_TC_AO_DELIVER: 645 return !act->deliver; 646 default: 647 /* Bad caller. Whatever they wanted to do, say they can't. */ 648 WARN_ON_ONCE(1); 649 return false; 650 } 651 } 652 653 static int efx_tc_flower_replace_foreign(struct efx_nic *efx, 654 struct net_device *net_dev, 655 struct flow_cls_offload *tc) 656 { 657 struct flow_rule *fr = flow_cls_offload_flow_rule(tc); 658 struct netlink_ext_ack *extack = tc->common.extack; 659 struct efx_tc_flow_rule *rule = NULL, *old = NULL; 660 struct efx_tc_action_set *act = NULL; 661 bool found = false, uplinked = false; 662 const struct flow_action_entry *fa; 663 struct efx_tc_match match; 664 struct efx_rep *to_efv; 665 s64 rc; 666 int i; 667 668 /* Parse match */ 669 memset(&match, 0, sizeof(match)); 670 rc = efx_tc_flower_parse_match(efx, fr, &match, NULL); 671 if (rc) 672 return rc; 673 /* The rule as given to us doesn't specify a source netdevice. 674 * But, determining whether packets from a VF should match it is 675 * complicated, so leave those to the software slowpath: qualify 676 * the filter with source m-port == wire. 677 */ 678 rc = efx_tc_flower_external_mport(efx, EFX_EFV_PF); 679 if (rc < 0) { 680 NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port for foreign filter"); 681 return rc; 682 } 683 match.value.ingress_port = rc; 684 match.mask.ingress_port = ~0; 685 686 if (tc->common.chain_index) { 687 NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index"); 688 return -EOPNOTSUPP; 689 } 690 match.mask.recirc_id = 0xff; 691 692 flow_action_for_each(i, fa, &fr->action) { 693 switch (fa->id) { 694 case FLOW_ACTION_REDIRECT: 695 case FLOW_ACTION_MIRRED: /* mirred means mirror here */ 696 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); 697 if (IS_ERR(to_efv)) 698 continue; 699 found = true; 700 break; 701 default: 702 break; 703 } 704 } 705 if (!found) { /* We don't care. */ 706 netif_dbg(efx, drv, efx->net_dev, 707 "Ignoring foreign filter that doesn't egdev us\n"); 708 return -EOPNOTSUPP; 709 } 710 711 rc = efx_mae_match_check_caps(efx, &match.mask, NULL); 712 if (rc) 713 return rc; 714 715 if (efx_tc_match_is_encap(&match.mask)) { 716 enum efx_encap_type type; 717 718 type = efx_tc_indr_netdev_type(net_dev); 719 if (type == EFX_ENCAP_TYPE_NONE) { 720 NL_SET_ERR_MSG_MOD(extack, 721 "Egress encap match on unsupported tunnel device"); 722 return -EOPNOTSUPP; 723 } 724 725 rc = efx_mae_check_encap_type_supported(efx, type); 726 if (rc) { 727 NL_SET_ERR_MSG_FMT_MOD(extack, 728 "Firmware reports no support for %s encap match", 729 efx_tc_encap_type_name(type)); 730 return rc; 731 } 732 733 rc = efx_tc_flower_record_encap_match(efx, &match, type, 734 EFX_TC_EM_DIRECT, 0, 0, 735 extack); 736 if (rc) 737 return rc; 738 } else { 739 /* This is not a tunnel decap rule, ignore it */ 740 netif_dbg(efx, drv, efx->net_dev, 741 "Ignoring foreign filter without encap match\n"); 742 return -EOPNOTSUPP; 743 } 744 745 rule = kzalloc(sizeof(*rule), GFP_USER); 746 if (!rule) { 747 rc = -ENOMEM; 748 goto out_free; 749 } 750 INIT_LIST_HEAD(&rule->acts.list); 751 rule->cookie = tc->cookie; 752 old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, 753 &rule->linkage, 754 efx_tc_match_action_ht_params); 755 if (old) { 756 netif_dbg(efx, drv, efx->net_dev, 757 "Ignoring already-offloaded rule (cookie %lx)\n", 758 tc->cookie); 759 rc = -EEXIST; 760 goto out_free; 761 } 762 763 act = kzalloc(sizeof(*act), GFP_USER); 764 if (!act) { 765 rc = -ENOMEM; 766 goto release; 767 } 768 769 /* Parse actions. For foreign rules we only support decap & redirect. 770 * See corresponding code in efx_tc_flower_replace() for theory of 771 * operation & how 'act' cursor is used. 772 */ 773 flow_action_for_each(i, fa, &fr->action) { 774 struct efx_tc_action_set save; 775 776 switch (fa->id) { 777 case FLOW_ACTION_REDIRECT: 778 case FLOW_ACTION_MIRRED: 779 /* See corresponding code in efx_tc_flower_replace() for 780 * long explanations of what's going on here. 781 */ 782 save = *act; 783 if (fa->hw_stats) { 784 struct efx_tc_counter_index *ctr; 785 786 if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) { 787 NL_SET_ERR_MSG_FMT_MOD(extack, 788 "hw_stats_type %u not supported (only 'delayed')", 789 fa->hw_stats); 790 rc = -EOPNOTSUPP; 791 goto release; 792 } 793 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { 794 rc = -EOPNOTSUPP; 795 goto release; 796 } 797 798 ctr = efx_tc_flower_get_counter_index(efx, 799 tc->cookie, 800 EFX_TC_COUNTER_TYPE_AR); 801 if (IS_ERR(ctr)) { 802 rc = PTR_ERR(ctr); 803 NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter"); 804 goto release; 805 } 806 act->count = ctr; 807 INIT_LIST_HEAD(&act->count_user); 808 } 809 810 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) { 811 /* can't happen */ 812 rc = -EOPNOTSUPP; 813 NL_SET_ERR_MSG_MOD(extack, 814 "Deliver action violates action order (can't happen)"); 815 goto release; 816 } 817 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); 818 /* PF implies egdev is us, in which case we really 819 * want to deliver to the uplink (because this is an 820 * ingress filter). If we don't recognise the egdev 821 * at all, then we'd better trap so SW can handle it. 822 */ 823 if (IS_ERR(to_efv)) 824 to_efv = EFX_EFV_PF; 825 if (to_efv == EFX_EFV_PF) { 826 if (uplinked) 827 break; 828 uplinked = true; 829 } 830 rc = efx_tc_flower_internal_mport(efx, to_efv); 831 if (rc < 0) { 832 NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port"); 833 goto release; 834 } 835 act->dest_mport = rc; 836 act->deliver = 1; 837 rc = efx_mae_alloc_action_set(efx, act); 838 if (rc) { 839 NL_SET_ERR_MSG_MOD(extack, 840 "Failed to write action set to hw (mirred)"); 841 goto release; 842 } 843 list_add_tail(&act->list, &rule->acts.list); 844 act = NULL; 845 if (fa->id == FLOW_ACTION_REDIRECT) 846 break; /* end of the line */ 847 /* Mirror, so continue on with saved act */ 848 act = kzalloc(sizeof(*act), GFP_USER); 849 if (!act) { 850 rc = -ENOMEM; 851 goto release; 852 } 853 *act = save; 854 break; 855 case FLOW_ACTION_TUNNEL_DECAP: 856 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DECAP)) { 857 rc = -EINVAL; 858 NL_SET_ERR_MSG_MOD(extack, "Decap action violates action order"); 859 goto release; 860 } 861 act->decap = 1; 862 /* If we previously delivered/trapped to uplink, now 863 * that we've decapped we'll want another copy if we 864 * try to deliver/trap to uplink again. 865 */ 866 uplinked = false; 867 break; 868 default: 869 NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", 870 fa->id); 871 rc = -EOPNOTSUPP; 872 goto release; 873 } 874 } 875 876 if (act) { 877 if (!uplinked) { 878 /* Not shot/redirected, so deliver to default dest (which is 879 * the uplink, as this is an ingress filter) 880 */ 881 efx_mae_mport_uplink(efx, &act->dest_mport); 882 act->deliver = 1; 883 } 884 rc = efx_mae_alloc_action_set(efx, act); 885 if (rc) { 886 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)"); 887 goto release; 888 } 889 list_add_tail(&act->list, &rule->acts.list); 890 act = NULL; /* Prevent double-free in error path */ 891 } 892 893 rule->match = match; 894 895 netif_dbg(efx, drv, efx->net_dev, 896 "Successfully parsed foreign filter (cookie %lx)\n", 897 tc->cookie); 898 899 rc = efx_mae_alloc_action_set_list(efx, &rule->acts); 900 if (rc) { 901 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw"); 902 goto release; 903 } 904 rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, 905 rule->acts.fw_id, &rule->fw_id); 906 if (rc) { 907 NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); 908 goto release_acts; 909 } 910 return 0; 911 912 release_acts: 913 efx_mae_free_action_set_list(efx, &rule->acts); 914 release: 915 /* We failed to insert the rule, so free up any entries we created in 916 * subsidiary tables. 917 */ 918 if (act) 919 efx_tc_free_action_set(efx, act, false); 920 if (rule) { 921 rhashtable_remove_fast(&efx->tc->match_action_ht, 922 &rule->linkage, 923 efx_tc_match_action_ht_params); 924 efx_tc_free_action_set_list(efx, &rule->acts, false); 925 } 926 out_free: 927 kfree(rule); 928 if (match.encap) 929 efx_tc_flower_release_encap_match(efx, match.encap); 930 return rc; 931 } 932 933 static int efx_tc_flower_replace(struct efx_nic *efx, 934 struct net_device *net_dev, 935 struct flow_cls_offload *tc, 936 struct efx_rep *efv) 937 { 938 struct flow_rule *fr = flow_cls_offload_flow_rule(tc); 939 struct netlink_ext_ack *extack = tc->common.extack; 940 const struct ip_tunnel_info *encap_info = NULL; 941 struct efx_tc_flow_rule *rule = NULL, *old; 942 struct efx_tc_action_set *act = NULL; 943 const struct flow_action_entry *fa; 944 struct efx_rep *from_efv, *to_efv; 945 struct efx_tc_match match; 946 u32 acts_id; 947 s64 rc; 948 int i; 949 950 if (!tc_can_offload_extack(efx->net_dev, extack)) 951 return -EOPNOTSUPP; 952 if (WARN_ON(!efx->tc)) 953 return -ENETDOWN; 954 if (WARN_ON(!efx->tc->up)) 955 return -ENETDOWN; 956 957 from_efv = efx_tc_flower_lookup_efv(efx, net_dev); 958 if (IS_ERR(from_efv)) { 959 /* Not from our PF or representors, so probably a tunnel dev */ 960 return efx_tc_flower_replace_foreign(efx, net_dev, tc); 961 } 962 963 if (efv != from_efv) { 964 /* can't happen */ 965 NL_SET_ERR_MSG_FMT_MOD(extack, "for %s efv is %snull but from_efv is %snull (can't happen)", 966 netdev_name(net_dev), efv ? "non-" : "", 967 from_efv ? "non-" : ""); 968 return -EINVAL; 969 } 970 971 /* Parse match */ 972 memset(&match, 0, sizeof(match)); 973 rc = efx_tc_flower_external_mport(efx, from_efv); 974 if (rc < 0) { 975 NL_SET_ERR_MSG_MOD(extack, "Failed to identify ingress m-port"); 976 return rc; 977 } 978 match.value.ingress_port = rc; 979 match.mask.ingress_port = ~0; 980 rc = efx_tc_flower_parse_match(efx, fr, &match, extack); 981 if (rc) 982 return rc; 983 if (efx_tc_match_is_encap(&match.mask)) { 984 NL_SET_ERR_MSG_MOD(extack, "Ingress enc_key matches not supported"); 985 return -EOPNOTSUPP; 986 } 987 988 if (tc->common.chain_index) { 989 NL_SET_ERR_MSG_MOD(extack, "No support for nonzero chain_index"); 990 return -EOPNOTSUPP; 991 } 992 match.mask.recirc_id = 0xff; 993 994 rc = efx_mae_match_check_caps(efx, &match.mask, extack); 995 if (rc) 996 return rc; 997 998 rule = kzalloc(sizeof(*rule), GFP_USER); 999 if (!rule) 1000 return -ENOMEM; 1001 INIT_LIST_HEAD(&rule->acts.list); 1002 rule->cookie = tc->cookie; 1003 old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht, 1004 &rule->linkage, 1005 efx_tc_match_action_ht_params); 1006 if (old) { 1007 netif_dbg(efx, drv, efx->net_dev, 1008 "Already offloaded rule (cookie %lx)\n", tc->cookie); 1009 NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded"); 1010 kfree(rule); 1011 return -EEXIST; 1012 } 1013 1014 /* Parse actions */ 1015 act = kzalloc(sizeof(*act), GFP_USER); 1016 if (!act) { 1017 rc = -ENOMEM; 1018 goto release; 1019 } 1020 1021 /** 1022 * DOC: TC action translation 1023 * 1024 * Actions in TC are sequential and cumulative, with delivery actions 1025 * potentially anywhere in the order. The EF100 MAE, however, takes 1026 * an 'action set list' consisting of 'action sets', each of which is 1027 * applied to the _original_ packet, and consists of a set of optional 1028 * actions in a fixed order with delivery at the end. 1029 * To translate between these two models, we maintain a 'cursor', @act, 1030 * which describes the cumulative effect of all the packet-mutating 1031 * actions encountered so far; on handling a delivery (mirred or drop) 1032 * action, once the action-set has been inserted into hardware, we 1033 * append @act to the action-set list (@rule->acts); if this is a pipe 1034 * action (mirred mirror) we then allocate a new @act with a copy of 1035 * the cursor state _before_ the delivery action, otherwise we set @act 1036 * to %NULL. 1037 * This ensures that every allocated action-set is either attached to 1038 * @rule->acts or pointed to by @act (and never both), and that only 1039 * those action-sets in @rule->acts exist in hardware. Consequently, 1040 * in the failure path, @act only needs to be freed in memory, whereas 1041 * for @rule->acts we remove each action-set from hardware before 1042 * freeing it (efx_tc_free_action_set_list()), even if the action-set 1043 * list itself is not in hardware. 1044 */ 1045 flow_action_for_each(i, fa, &fr->action) { 1046 struct efx_tc_action_set save; 1047 u16 tci; 1048 1049 if (!act) { 1050 /* more actions after a non-pipe action */ 1051 NL_SET_ERR_MSG_MOD(extack, "Action follows non-pipe action"); 1052 rc = -EINVAL; 1053 goto release; 1054 } 1055 1056 if ((fa->id == FLOW_ACTION_REDIRECT || 1057 fa->id == FLOW_ACTION_MIRRED || 1058 fa->id == FLOW_ACTION_DROP) && fa->hw_stats) { 1059 struct efx_tc_counter_index *ctr; 1060 1061 /* Currently the only actions that want stats are 1062 * mirred and gact (ok, shot, trap, goto-chain), which 1063 * means we want stats just before delivery. Also, 1064 * note that tunnel_key set shouldn't change the length 1065 * — it's only the subsequent mirred that does that, 1066 * and the stats are taken _before_ the mirred action 1067 * happens. 1068 */ 1069 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) { 1070 /* All supported actions that count either steal 1071 * (gact shot, mirred redirect) or clone act 1072 * (mirred mirror), so we should never get two 1073 * count actions on one action_set. 1074 */ 1075 NL_SET_ERR_MSG_MOD(extack, "Count-action conflict (can't happen)"); 1076 rc = -EOPNOTSUPP; 1077 goto release; 1078 } 1079 1080 if (!(fa->hw_stats & FLOW_ACTION_HW_STATS_DELAYED)) { 1081 NL_SET_ERR_MSG_FMT_MOD(extack, "hw_stats_type %u not supported (only 'delayed')", 1082 fa->hw_stats); 1083 rc = -EOPNOTSUPP; 1084 goto release; 1085 } 1086 1087 ctr = efx_tc_flower_get_counter_index(efx, tc->cookie, 1088 EFX_TC_COUNTER_TYPE_AR); 1089 if (IS_ERR(ctr)) { 1090 rc = PTR_ERR(ctr); 1091 NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter"); 1092 goto release; 1093 } 1094 act->count = ctr; 1095 INIT_LIST_HEAD(&act->count_user); 1096 } 1097 1098 switch (fa->id) { 1099 case FLOW_ACTION_DROP: 1100 rc = efx_mae_alloc_action_set(efx, act); 1101 if (rc) { 1102 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (drop)"); 1103 goto release; 1104 } 1105 list_add_tail(&act->list, &rule->acts.list); 1106 act = NULL; /* end of the line */ 1107 break; 1108 case FLOW_ACTION_REDIRECT: 1109 case FLOW_ACTION_MIRRED: 1110 save = *act; 1111 1112 if (encap_info) { 1113 struct efx_tc_encap_action *encap; 1114 1115 if (!efx_tc_flower_action_order_ok(act, 1116 EFX_TC_AO_ENCAP)) { 1117 rc = -EOPNOTSUPP; 1118 NL_SET_ERR_MSG_MOD(extack, "Encap action violates action order"); 1119 goto release; 1120 } 1121 encap = efx_tc_flower_create_encap_md( 1122 efx, encap_info, fa->dev, extack); 1123 if (IS_ERR_OR_NULL(encap)) { 1124 rc = PTR_ERR(encap); 1125 if (!rc) 1126 rc = -EIO; /* arbitrary */ 1127 goto release; 1128 } 1129 act->encap_md = encap; 1130 list_add_tail(&act->encap_user, &encap->users); 1131 act->dest_mport = encap->dest_mport; 1132 act->deliver = 1; 1133 if (act->count && !WARN_ON(!act->count->cnt)) { 1134 /* This counter is used by an encap 1135 * action, which needs a reference back 1136 * so it can prod neighbouring whenever 1137 * traffic is seen. 1138 */ 1139 spin_lock_bh(&act->count->cnt->lock); 1140 list_add_tail(&act->count_user, 1141 &act->count->cnt->users); 1142 spin_unlock_bh(&act->count->cnt->lock); 1143 } 1144 rc = efx_mae_alloc_action_set(efx, act); 1145 if (rc) { 1146 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (encap)"); 1147 goto release; 1148 } 1149 list_add_tail(&act->list, &rule->acts.list); 1150 act->user = &rule->acts; 1151 act = NULL; 1152 if (fa->id == FLOW_ACTION_REDIRECT) 1153 break; /* end of the line */ 1154 /* Mirror, so continue on with saved act */ 1155 save.count = NULL; 1156 act = kzalloc(sizeof(*act), GFP_USER); 1157 if (!act) { 1158 rc = -ENOMEM; 1159 goto release; 1160 } 1161 *act = save; 1162 break; 1163 } 1164 1165 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_DELIVER)) { 1166 /* can't happen */ 1167 rc = -EOPNOTSUPP; 1168 NL_SET_ERR_MSG_MOD(extack, "Deliver action violates action order (can't happen)"); 1169 goto release; 1170 } 1171 1172 to_efv = efx_tc_flower_lookup_efv(efx, fa->dev); 1173 if (IS_ERR(to_efv)) { 1174 NL_SET_ERR_MSG_MOD(extack, "Mirred egress device not on switch"); 1175 rc = PTR_ERR(to_efv); 1176 goto release; 1177 } 1178 rc = efx_tc_flower_external_mport(efx, to_efv); 1179 if (rc < 0) { 1180 NL_SET_ERR_MSG_MOD(extack, "Failed to identify egress m-port"); 1181 goto release; 1182 } 1183 act->dest_mport = rc; 1184 act->deliver = 1; 1185 rc = efx_mae_alloc_action_set(efx, act); 1186 if (rc) { 1187 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (mirred)"); 1188 goto release; 1189 } 1190 list_add_tail(&act->list, &rule->acts.list); 1191 act = NULL; 1192 if (fa->id == FLOW_ACTION_REDIRECT) 1193 break; /* end of the line */ 1194 /* Mirror, so continue on with saved act */ 1195 save.count = NULL; 1196 act = kzalloc(sizeof(*act), GFP_USER); 1197 if (!act) { 1198 rc = -ENOMEM; 1199 goto release; 1200 } 1201 *act = save; 1202 break; 1203 case FLOW_ACTION_VLAN_POP: 1204 if (act->vlan_push) { 1205 act->vlan_push--; 1206 } else if (efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_POP)) { 1207 act->vlan_pop++; 1208 } else { 1209 NL_SET_ERR_MSG_MOD(extack, 1210 "More than two VLAN pops, or action order violated"); 1211 rc = -EINVAL; 1212 goto release; 1213 } 1214 break; 1215 case FLOW_ACTION_VLAN_PUSH: 1216 if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_VLAN_PUSH)) { 1217 rc = -EINVAL; 1218 NL_SET_ERR_MSG_MOD(extack, 1219 "More than two VLAN pushes, or action order violated"); 1220 goto release; 1221 } 1222 tci = fa->vlan.vid & VLAN_VID_MASK; 1223 tci |= fa->vlan.prio << VLAN_PRIO_SHIFT; 1224 act->vlan_tci[act->vlan_push] = cpu_to_be16(tci); 1225 act->vlan_proto[act->vlan_push] = fa->vlan.proto; 1226 act->vlan_push++; 1227 break; 1228 case FLOW_ACTION_TUNNEL_ENCAP: 1229 if (encap_info) { 1230 /* Can't specify encap multiple times. 1231 * If you want to overwrite an existing 1232 * encap_info, use an intervening 1233 * FLOW_ACTION_TUNNEL_DECAP to clear it. 1234 */ 1235 NL_SET_ERR_MSG_MOD(extack, "Tunnel key set when already set"); 1236 rc = -EINVAL; 1237 goto release; 1238 } 1239 if (!fa->tunnel) { 1240 NL_SET_ERR_MSG_MOD(extack, "Tunnel key set is missing key"); 1241 rc = -EOPNOTSUPP; 1242 goto release; 1243 } 1244 encap_info = fa->tunnel; 1245 break; 1246 case FLOW_ACTION_TUNNEL_DECAP: 1247 if (encap_info) { 1248 encap_info = NULL; 1249 break; 1250 } 1251 /* Since we don't support enc_key matches on ingress 1252 * (and if we did there'd be no tunnel-device to give 1253 * us a type), we can't offload a decap that's not 1254 * just undoing a previous encap action. 1255 */ 1256 NL_SET_ERR_MSG_MOD(extack, "Cannot offload tunnel decap action without tunnel device"); 1257 rc = -EOPNOTSUPP; 1258 goto release; 1259 default: 1260 NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u", 1261 fa->id); 1262 rc = -EOPNOTSUPP; 1263 goto release; 1264 } 1265 } 1266 1267 if (act) { 1268 /* Not shot/redirected, so deliver to default dest */ 1269 if (from_efv == EFX_EFV_PF) 1270 /* Rule applies to traffic from the wire, 1271 * and default dest is thus the PF 1272 */ 1273 efx_mae_mport_uplink(efx, &act->dest_mport); 1274 else 1275 /* Representor, so rule applies to traffic from 1276 * representee, and default dest is thus the rep. 1277 * All reps use the same mport for delivery 1278 */ 1279 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 1280 &act->dest_mport); 1281 act->deliver = 1; 1282 rc = efx_mae_alloc_action_set(efx, act); 1283 if (rc) { 1284 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set to hw (deliver)"); 1285 goto release; 1286 } 1287 list_add_tail(&act->list, &rule->acts.list); 1288 act = NULL; /* Prevent double-free in error path */ 1289 } 1290 1291 netif_dbg(efx, drv, efx->net_dev, 1292 "Successfully parsed filter (cookie %lx)\n", 1293 tc->cookie); 1294 1295 rule->match = match; 1296 1297 rc = efx_mae_alloc_action_set_list(efx, &rule->acts); 1298 if (rc) { 1299 NL_SET_ERR_MSG_MOD(extack, "Failed to write action set list to hw"); 1300 goto release; 1301 } 1302 if (from_efv == EFX_EFV_PF) 1303 /* PF netdev, so rule applies to traffic from wire */ 1304 rule->fallback = &efx->tc->facts.pf; 1305 else 1306 /* repdev, so rule applies to traffic from representee */ 1307 rule->fallback = &efx->tc->facts.reps; 1308 if (!efx_tc_check_ready(efx, rule)) { 1309 netif_dbg(efx, drv, efx->net_dev, "action not ready for hw\n"); 1310 acts_id = rule->fallback->fw_id; 1311 } else { 1312 netif_dbg(efx, drv, efx->net_dev, "ready for hw\n"); 1313 acts_id = rule->acts.fw_id; 1314 } 1315 rc = efx_mae_insert_rule(efx, &rule->match, EFX_TC_PRIO_TC, 1316 acts_id, &rule->fw_id); 1317 if (rc) { 1318 NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw"); 1319 goto release_acts; 1320 } 1321 return 0; 1322 1323 release_acts: 1324 efx_mae_free_action_set_list(efx, &rule->acts); 1325 release: 1326 /* We failed to insert the rule, so free up any entries we created in 1327 * subsidiary tables. 1328 */ 1329 if (act) 1330 efx_tc_free_action_set(efx, act, false); 1331 if (rule) { 1332 rhashtable_remove_fast(&efx->tc->match_action_ht, 1333 &rule->linkage, 1334 efx_tc_match_action_ht_params); 1335 efx_tc_free_action_set_list(efx, &rule->acts, false); 1336 } 1337 kfree(rule); 1338 return rc; 1339 } 1340 1341 static int efx_tc_flower_destroy(struct efx_nic *efx, 1342 struct net_device *net_dev, 1343 struct flow_cls_offload *tc) 1344 { 1345 struct netlink_ext_ack *extack = tc->common.extack; 1346 struct efx_tc_flow_rule *rule; 1347 1348 rule = rhashtable_lookup_fast(&efx->tc->match_action_ht, &tc->cookie, 1349 efx_tc_match_action_ht_params); 1350 if (!rule) { 1351 /* Only log a message if we're the ingress device. Otherwise 1352 * it's a foreign filter and we might just not have been 1353 * interested (e.g. we might not have been the egress device 1354 * either). 1355 */ 1356 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) 1357 netif_warn(efx, drv, efx->net_dev, 1358 "Filter %lx not found to remove\n", tc->cookie); 1359 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); 1360 return -ENOENT; 1361 } 1362 1363 /* Remove it from HW */ 1364 efx_tc_delete_rule(efx, rule); 1365 /* Delete it from SW */ 1366 rhashtable_remove_fast(&efx->tc->match_action_ht, &rule->linkage, 1367 efx_tc_match_action_ht_params); 1368 netif_dbg(efx, drv, efx->net_dev, "Removed filter %lx\n", rule->cookie); 1369 kfree(rule); 1370 return 0; 1371 } 1372 1373 static int efx_tc_flower_stats(struct efx_nic *efx, struct net_device *net_dev, 1374 struct flow_cls_offload *tc) 1375 { 1376 struct netlink_ext_ack *extack = tc->common.extack; 1377 struct efx_tc_counter_index *ctr; 1378 struct efx_tc_counter *cnt; 1379 u64 packets, bytes; 1380 1381 ctr = efx_tc_flower_find_counter_index(efx, tc->cookie); 1382 if (!ctr) { 1383 /* See comment in efx_tc_flower_destroy() */ 1384 if (!IS_ERR(efx_tc_flower_lookup_efv(efx, net_dev))) 1385 if (net_ratelimit()) 1386 netif_warn(efx, drv, efx->net_dev, 1387 "Filter %lx not found for stats\n", 1388 tc->cookie); 1389 NL_SET_ERR_MSG_MOD(extack, "Flow cookie not found in offloaded rules"); 1390 return -ENOENT; 1391 } 1392 if (WARN_ON(!ctr->cnt)) /* can't happen */ 1393 return -EIO; 1394 cnt = ctr->cnt; 1395 1396 spin_lock_bh(&cnt->lock); 1397 /* Report only new pkts/bytes since last time TC asked */ 1398 packets = cnt->packets; 1399 bytes = cnt->bytes; 1400 flow_stats_update(&tc->stats, bytes - cnt->old_bytes, 1401 packets - cnt->old_packets, 0, cnt->touched, 1402 FLOW_ACTION_HW_STATS_DELAYED); 1403 cnt->old_packets = packets; 1404 cnt->old_bytes = bytes; 1405 spin_unlock_bh(&cnt->lock); 1406 return 0; 1407 } 1408 1409 int efx_tc_flower(struct efx_nic *efx, struct net_device *net_dev, 1410 struct flow_cls_offload *tc, struct efx_rep *efv) 1411 { 1412 int rc; 1413 1414 if (!efx->tc) 1415 return -EOPNOTSUPP; 1416 1417 mutex_lock(&efx->tc->mutex); 1418 switch (tc->command) { 1419 case FLOW_CLS_REPLACE: 1420 rc = efx_tc_flower_replace(efx, net_dev, tc, efv); 1421 break; 1422 case FLOW_CLS_DESTROY: 1423 rc = efx_tc_flower_destroy(efx, net_dev, tc); 1424 break; 1425 case FLOW_CLS_STATS: 1426 rc = efx_tc_flower_stats(efx, net_dev, tc); 1427 break; 1428 default: 1429 rc = -EOPNOTSUPP; 1430 break; 1431 } 1432 mutex_unlock(&efx->tc->mutex); 1433 return rc; 1434 } 1435 1436 static int efx_tc_configure_default_rule(struct efx_nic *efx, u32 ing_port, 1437 u32 eg_port, struct efx_tc_flow_rule *rule) 1438 { 1439 struct efx_tc_action_set_list *acts = &rule->acts; 1440 struct efx_tc_match *match = &rule->match; 1441 struct efx_tc_action_set *act; 1442 int rc; 1443 1444 match->value.ingress_port = ing_port; 1445 match->mask.ingress_port = ~0; 1446 act = kzalloc(sizeof(*act), GFP_KERNEL); 1447 if (!act) 1448 return -ENOMEM; 1449 act->deliver = 1; 1450 act->dest_mport = eg_port; 1451 rc = efx_mae_alloc_action_set(efx, act); 1452 if (rc) 1453 goto fail1; 1454 EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); 1455 list_add_tail(&act->list, &acts->list); 1456 rc = efx_mae_alloc_action_set_list(efx, acts); 1457 if (rc) 1458 goto fail2; 1459 rc = efx_mae_insert_rule(efx, match, EFX_TC_PRIO_DFLT, 1460 acts->fw_id, &rule->fw_id); 1461 if (rc) 1462 goto fail3; 1463 return 0; 1464 fail3: 1465 efx_mae_free_action_set_list(efx, acts); 1466 fail2: 1467 list_del(&act->list); 1468 efx_mae_free_action_set(efx, act->fw_id); 1469 fail1: 1470 kfree(act); 1471 return rc; 1472 } 1473 1474 static int efx_tc_configure_default_rule_pf(struct efx_nic *efx) 1475 { 1476 struct efx_tc_flow_rule *rule = &efx->tc->dflt.pf; 1477 u32 ing_port, eg_port; 1478 1479 efx_mae_mport_uplink(efx, &ing_port); 1480 efx_mae_mport_wire(efx, &eg_port); 1481 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 1482 } 1483 1484 static int efx_tc_configure_default_rule_wire(struct efx_nic *efx) 1485 { 1486 struct efx_tc_flow_rule *rule = &efx->tc->dflt.wire; 1487 u32 ing_port, eg_port; 1488 1489 efx_mae_mport_wire(efx, &ing_port); 1490 efx_mae_mport_uplink(efx, &eg_port); 1491 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 1492 } 1493 1494 int efx_tc_configure_default_rule_rep(struct efx_rep *efv) 1495 { 1496 struct efx_tc_flow_rule *rule = &efv->dflt; 1497 struct efx_nic *efx = efv->parent; 1498 u32 ing_port, eg_port; 1499 1500 efx_mae_mport_mport(efx, efv->mport, &ing_port); 1501 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); 1502 return efx_tc_configure_default_rule(efx, ing_port, eg_port, rule); 1503 } 1504 1505 void efx_tc_deconfigure_default_rule(struct efx_nic *efx, 1506 struct efx_tc_flow_rule *rule) 1507 { 1508 if (rule->fw_id != MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL) 1509 efx_tc_delete_rule(efx, rule); 1510 rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 1511 } 1512 1513 static int efx_tc_configure_fallback_acts(struct efx_nic *efx, u32 eg_port, 1514 struct efx_tc_action_set_list *acts) 1515 { 1516 struct efx_tc_action_set *act; 1517 int rc; 1518 1519 act = kzalloc(sizeof(*act), GFP_KERNEL); 1520 if (!act) 1521 return -ENOMEM; 1522 act->deliver = 1; 1523 act->dest_mport = eg_port; 1524 rc = efx_mae_alloc_action_set(efx, act); 1525 if (rc) 1526 goto fail1; 1527 EFX_WARN_ON_PARANOID(!list_empty(&acts->list)); 1528 list_add_tail(&act->list, &acts->list); 1529 rc = efx_mae_alloc_action_set_list(efx, acts); 1530 if (rc) 1531 goto fail2; 1532 return 0; 1533 fail2: 1534 list_del(&act->list); 1535 efx_mae_free_action_set(efx, act->fw_id); 1536 fail1: 1537 kfree(act); 1538 return rc; 1539 } 1540 1541 static int efx_tc_configure_fallback_acts_pf(struct efx_nic *efx) 1542 { 1543 struct efx_tc_action_set_list *acts = &efx->tc->facts.pf; 1544 u32 eg_port; 1545 1546 efx_mae_mport_uplink(efx, &eg_port); 1547 return efx_tc_configure_fallback_acts(efx, eg_port, acts); 1548 } 1549 1550 static int efx_tc_configure_fallback_acts_reps(struct efx_nic *efx) 1551 { 1552 struct efx_tc_action_set_list *acts = &efx->tc->facts.reps; 1553 u32 eg_port; 1554 1555 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port); 1556 return efx_tc_configure_fallback_acts(efx, eg_port, acts); 1557 } 1558 1559 static void efx_tc_deconfigure_fallback_acts(struct efx_nic *efx, 1560 struct efx_tc_action_set_list *acts) 1561 { 1562 efx_tc_free_action_set_list(efx, acts, true); 1563 } 1564 1565 static int efx_tc_configure_rep_mport(struct efx_nic *efx) 1566 { 1567 u32 rep_mport_label; 1568 int rc; 1569 1570 rc = efx_mae_allocate_mport(efx, &efx->tc->reps_mport_id, &rep_mport_label); 1571 if (rc) 1572 return rc; 1573 pci_dbg(efx->pci_dev, "created rep mport 0x%08x (0x%04x)\n", 1574 efx->tc->reps_mport_id, rep_mport_label); 1575 /* Use mport *selector* as vport ID */ 1576 efx_mae_mport_mport(efx, efx->tc->reps_mport_id, 1577 &efx->tc->reps_mport_vport_id); 1578 return 0; 1579 } 1580 1581 static void efx_tc_deconfigure_rep_mport(struct efx_nic *efx) 1582 { 1583 efx_mae_free_mport(efx, efx->tc->reps_mport_id); 1584 efx->tc->reps_mport_id = MAE_MPORT_SELECTOR_NULL; 1585 } 1586 1587 int efx_tc_insert_rep_filters(struct efx_nic *efx) 1588 { 1589 struct efx_filter_spec promisc, allmulti; 1590 int rc; 1591 1592 if (efx->type->is_vf) 1593 return 0; 1594 if (!efx->tc) 1595 return 0; 1596 efx_filter_init_rx(&promisc, EFX_FILTER_PRI_REQUIRED, 0, 0); 1597 efx_filter_set_uc_def(&promisc); 1598 efx_filter_set_vport_id(&promisc, efx->tc->reps_mport_vport_id); 1599 rc = efx_filter_insert_filter(efx, &promisc, false); 1600 if (rc < 0) 1601 return rc; 1602 efx->tc->reps_filter_uc = rc; 1603 efx_filter_init_rx(&allmulti, EFX_FILTER_PRI_REQUIRED, 0, 0); 1604 efx_filter_set_mc_def(&allmulti); 1605 efx_filter_set_vport_id(&allmulti, efx->tc->reps_mport_vport_id); 1606 rc = efx_filter_insert_filter(efx, &allmulti, false); 1607 if (rc < 0) 1608 return rc; 1609 efx->tc->reps_filter_mc = rc; 1610 return 0; 1611 } 1612 1613 void efx_tc_remove_rep_filters(struct efx_nic *efx) 1614 { 1615 if (efx->type->is_vf) 1616 return; 1617 if (!efx->tc) 1618 return; 1619 if (efx->tc->reps_filter_mc >= 0) 1620 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_mc); 1621 efx->tc->reps_filter_mc = -1; 1622 if (efx->tc->reps_filter_uc >= 0) 1623 efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, efx->tc->reps_filter_uc); 1624 efx->tc->reps_filter_uc = -1; 1625 } 1626 1627 int efx_init_tc(struct efx_nic *efx) 1628 { 1629 int rc; 1630 1631 rc = efx_mae_get_caps(efx, efx->tc->caps); 1632 if (rc) 1633 return rc; 1634 if (efx->tc->caps->match_field_count > MAE_NUM_FIELDS) 1635 /* Firmware supports some match fields the driver doesn't know 1636 * about. Not fatal, unless any of those fields are required 1637 * (MAE_FIELD_SUPPORTED_MATCH_ALWAYS) but if so we don't know. 1638 */ 1639 netif_warn(efx, probe, efx->net_dev, 1640 "FW reports additional match fields %u\n", 1641 efx->tc->caps->match_field_count); 1642 if (efx->tc->caps->action_prios < EFX_TC_PRIO__NUM) { 1643 netif_err(efx, probe, efx->net_dev, 1644 "Too few action prios supported (have %u, need %u)\n", 1645 efx->tc->caps->action_prios, EFX_TC_PRIO__NUM); 1646 return -EIO; 1647 } 1648 rc = efx_tc_configure_default_rule_pf(efx); 1649 if (rc) 1650 return rc; 1651 rc = efx_tc_configure_default_rule_wire(efx); 1652 if (rc) 1653 return rc; 1654 rc = efx_tc_configure_rep_mport(efx); 1655 if (rc) 1656 return rc; 1657 rc = efx_tc_configure_fallback_acts_pf(efx); 1658 if (rc) 1659 return rc; 1660 rc = efx_tc_configure_fallback_acts_reps(efx); 1661 if (rc) 1662 return rc; 1663 efx->tc->up = true; 1664 rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx); 1665 if (rc) 1666 return rc; 1667 return 0; 1668 } 1669 1670 void efx_fini_tc(struct efx_nic *efx) 1671 { 1672 /* We can get called even if efx_init_struct_tc() failed */ 1673 if (!efx->tc) 1674 return; 1675 if (efx->tc->up) 1676 flow_indr_dev_unregister(efx_tc_indr_setup_cb, efx, efx_tc_block_unbind); 1677 efx_tc_deconfigure_rep_mport(efx); 1678 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf); 1679 efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire); 1680 efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf); 1681 efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps); 1682 efx->tc->up = false; 1683 } 1684 1685 /* At teardown time, all TC filter rules (and thus all resources they created) 1686 * should already have been removed. If we find any in our hashtables, make a 1687 * cursory attempt to clean up the software side. 1688 */ 1689 static void efx_tc_encap_match_free(void *ptr, void *__unused) 1690 { 1691 struct efx_tc_encap_match *encap = ptr; 1692 1693 WARN_ON(refcount_read(&encap->ref)); 1694 kfree(encap); 1695 } 1696 1697 static void efx_tc_flow_free(void *ptr, void *arg) 1698 { 1699 struct efx_tc_flow_rule *rule = ptr; 1700 struct efx_nic *efx = arg; 1701 1702 netif_err(efx, drv, efx->net_dev, 1703 "tc rule %lx still present at teardown, removing\n", 1704 rule->cookie); 1705 1706 /* Also releases entries in subsidiary tables */ 1707 efx_tc_delete_rule(efx, rule); 1708 1709 kfree(rule); 1710 } 1711 1712 int efx_init_struct_tc(struct efx_nic *efx) 1713 { 1714 int rc; 1715 1716 if (efx->type->is_vf) 1717 return 0; 1718 1719 efx->tc = kzalloc(sizeof(*efx->tc), GFP_KERNEL); 1720 if (!efx->tc) 1721 return -ENOMEM; 1722 efx->tc->caps = kzalloc(sizeof(struct mae_caps), GFP_KERNEL); 1723 if (!efx->tc->caps) { 1724 rc = -ENOMEM; 1725 goto fail_alloc_caps; 1726 } 1727 INIT_LIST_HEAD(&efx->tc->block_list); 1728 1729 mutex_init(&efx->tc->mutex); 1730 init_waitqueue_head(&efx->tc->flush_wq); 1731 rc = efx_tc_init_encap_actions(efx); 1732 if (rc < 0) 1733 goto fail_encap_actions; 1734 rc = efx_tc_init_counters(efx); 1735 if (rc < 0) 1736 goto fail_counters; 1737 rc = rhashtable_init(&efx->tc->encap_match_ht, &efx_tc_encap_match_ht_params); 1738 if (rc < 0) 1739 goto fail_encap_match_ht; 1740 rc = rhashtable_init(&efx->tc->match_action_ht, &efx_tc_match_action_ht_params); 1741 if (rc < 0) 1742 goto fail_match_action_ht; 1743 efx->tc->reps_filter_uc = -1; 1744 efx->tc->reps_filter_mc = -1; 1745 INIT_LIST_HEAD(&efx->tc->dflt.pf.acts.list); 1746 efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 1747 INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list); 1748 efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL; 1749 INIT_LIST_HEAD(&efx->tc->facts.pf.list); 1750 efx->tc->facts.pf.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL; 1751 INIT_LIST_HEAD(&efx->tc->facts.reps.list); 1752 efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL; 1753 efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type; 1754 return 0; 1755 fail_match_action_ht: 1756 rhashtable_destroy(&efx->tc->encap_match_ht); 1757 fail_encap_match_ht: 1758 efx_tc_destroy_counters(efx); 1759 fail_counters: 1760 efx_tc_destroy_encap_actions(efx); 1761 fail_encap_actions: 1762 mutex_destroy(&efx->tc->mutex); 1763 kfree(efx->tc->caps); 1764 fail_alloc_caps: 1765 kfree(efx->tc); 1766 efx->tc = NULL; 1767 return rc; 1768 } 1769 1770 void efx_fini_struct_tc(struct efx_nic *efx) 1771 { 1772 if (!efx->tc) 1773 return; 1774 1775 mutex_lock(&efx->tc->mutex); 1776 EFX_WARN_ON_PARANOID(efx->tc->dflt.pf.fw_id != 1777 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 1778 EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id != 1779 MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL); 1780 EFX_WARN_ON_PARANOID(efx->tc->facts.pf.fw_id != 1781 MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); 1782 EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id != 1783 MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL); 1784 rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free, 1785 efx); 1786 rhashtable_free_and_destroy(&efx->tc->encap_match_ht, 1787 efx_tc_encap_match_free, NULL); 1788 efx_tc_fini_counters(efx); 1789 efx_tc_fini_encap_actions(efx); 1790 mutex_unlock(&efx->tc->mutex); 1791 mutex_destroy(&efx->tc->mutex); 1792 kfree(efx->tc->caps); 1793 kfree(efx->tc); 1794 efx->tc = NULL; 1795 } 1796