1 #include "mcdi_filters.h" 2 #include "mcdi.h" 3 #include "nic.h" 4 #include "rx_common.h" 5 6 /* The maximum size of a shared RSS context */ 7 /* TODO: this should really be from the mcdi protocol export */ 8 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL 9 10 #define EFX_EF10_FILTER_ID_INVALID 0xffff 11 12 /* An arbitrary search limit for the software hash table */ 13 #define EFX_EF10_FILTER_SEARCH_LIMIT 200 14 15 static struct efx_filter_spec * 16 efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table, 17 unsigned int filter_idx) 18 { 19 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & 20 ~EFX_EF10_FILTER_FLAGS); 21 } 22 23 static unsigned int 24 efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table, 25 unsigned int filter_idx) 26 { 27 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; 28 } 29 30 static u32 efx_mcdi_filter_get_unsafe_id(u32 filter_id) 31 { 32 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); 33 return filter_id & (EFX_MCDI_FILTER_TBL_ROWS - 1); 34 } 35 36 static unsigned int efx_mcdi_filter_get_unsafe_pri(u32 filter_id) 37 { 38 return filter_id / (EFX_MCDI_FILTER_TBL_ROWS * 2); 39 } 40 41 static u32 efx_mcdi_filter_make_filter_id(unsigned int pri, u16 idx) 42 { 43 return pri * EFX_MCDI_FILTER_TBL_ROWS * 2 + idx; 44 } 45 46 /* 47 * Decide whether a filter should be exclusive or else should allow 48 * delivery to additional recipients. Currently we decide that 49 * filters for specific local unicast MAC and IP addresses are 50 * exclusive. 51 */ 52 static bool efx_mcdi_filter_is_exclusive(const struct efx_filter_spec *spec) 53 { 54 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && 55 !is_multicast_ether_addr(spec->loc_mac)) 56 return true; 57 58 if ((spec->match_flags & 59 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 60 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 61 if (spec->ether_type == htons(ETH_P_IP) && 62 !ipv4_is_multicast(spec->loc_host[0])) 63 return true; 64 if (spec->ether_type == htons(ETH_P_IPV6) && 65 ((const u8 *)spec->loc_host)[0] != 0xff) 66 return true; 67 } 68 69 return false; 70 } 71 72 static void 73 efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table, 74 unsigned int filter_idx, 75 const struct efx_filter_spec *spec, 76 unsigned int flags) 77 { 78 table->entry[filter_idx].spec = (unsigned long)spec | flags; 79 } 80 81 static void 82 efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx, 83 const struct efx_filter_spec *spec, 84 efx_dword_t *inbuf) 85 { 86 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 87 u32 match_fields = 0, uc_match, mc_match; 88 89 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 90 efx_mcdi_filter_is_exclusive(spec) ? 91 MC_CMD_FILTER_OP_IN_OP_INSERT : 92 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); 93 94 /* 95 * Convert match flags and values. Unlike almost 96 * everything else in MCDI, these fields are in 97 * network byte order. 98 */ 99 #define COPY_VALUE(value, mcdi_field) \ 100 do { \ 101 match_fields |= \ 102 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ 103 mcdi_field ## _LBN; \ 104 BUILD_BUG_ON( \ 105 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ 106 sizeof(value)); \ 107 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ 108 &value, sizeof(value)); \ 109 } while (0) 110 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ 111 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ 112 COPY_VALUE(spec->gen_field, mcdi_field); \ 113 } 114 /* 115 * Handle encap filters first. They will always be mismatch 116 * (unknown UC or MC) filters 117 */ 118 if (encap_type) { 119 /* 120 * ether_type and outer_ip_proto need to be variables 121 * because COPY_VALUE wants to memcpy them 122 */ 123 __be16 ether_type = 124 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? 125 ETH_P_IPV6 : ETH_P_IP); 126 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; 127 u8 outer_ip_proto; 128 129 switch (encap_type & EFX_ENCAP_TYPES_MASK) { 130 case EFX_ENCAP_TYPE_VXLAN: 131 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; 132 /* fallthrough */ 133 case EFX_ENCAP_TYPE_GENEVE: 134 COPY_VALUE(ether_type, ETHER_TYPE); 135 outer_ip_proto = IPPROTO_UDP; 136 COPY_VALUE(outer_ip_proto, IP_PROTO); 137 /* 138 * We always need to set the type field, even 139 * though we're not matching on the TNI. 140 */ 141 MCDI_POPULATE_DWORD_1(inbuf, 142 FILTER_OP_EXT_IN_VNI_OR_VSID, 143 FILTER_OP_EXT_IN_VNI_TYPE, 144 vni_type); 145 break; 146 case EFX_ENCAP_TYPE_NVGRE: 147 COPY_VALUE(ether_type, ETHER_TYPE); 148 outer_ip_proto = IPPROTO_GRE; 149 COPY_VALUE(outer_ip_proto, IP_PROTO); 150 break; 151 default: 152 WARN_ON(1); 153 } 154 155 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 156 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 157 } else { 158 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 159 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 160 } 161 162 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) 163 match_fields |= 164 is_multicast_ether_addr(spec->loc_mac) ? 165 1 << mc_match : 166 1 << uc_match; 167 COPY_FIELD(REM_HOST, rem_host, SRC_IP); 168 COPY_FIELD(LOC_HOST, loc_host, DST_IP); 169 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); 170 COPY_FIELD(REM_PORT, rem_port, SRC_PORT); 171 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); 172 COPY_FIELD(LOC_PORT, loc_port, DST_PORT); 173 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); 174 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); 175 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); 176 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); 177 #undef COPY_FIELD 178 #undef COPY_VALUE 179 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, 180 match_fields); 181 } 182 183 static void efx_mcdi_filter_push_prep(struct efx_nic *efx, 184 const struct efx_filter_spec *spec, 185 efx_dword_t *inbuf, u64 handle, 186 struct efx_rss_context *ctx, 187 bool replacing) 188 { 189 u32 flags = spec->flags; 190 191 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); 192 193 /* If RSS filter, caller better have given us an RSS context */ 194 if (flags & EFX_FILTER_FLAG_RX_RSS) { 195 /* 196 * We don't have the ability to return an error, so we'll just 197 * log a warning and disable RSS for the filter. 198 */ 199 if (WARN_ON_ONCE(!ctx)) 200 flags &= ~EFX_FILTER_FLAG_RX_RSS; 201 else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)) 202 flags &= ~EFX_FILTER_FLAG_RX_RSS; 203 } 204 205 if (replacing) { 206 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 207 MC_CMD_FILTER_OP_IN_OP_REPLACE); 208 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); 209 } else { 210 efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf); 211 } 212 213 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, efx->vport_id); 214 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, 215 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 216 MC_CMD_FILTER_OP_IN_RX_DEST_DROP : 217 MC_CMD_FILTER_OP_IN_RX_DEST_HOST); 218 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); 219 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, 220 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); 221 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, 222 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 223 0 : spec->dmaq_id); 224 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 225 (flags & EFX_FILTER_FLAG_RX_RSS) ? 226 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 227 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 228 if (flags & EFX_FILTER_FLAG_RX_RSS) 229 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id); 230 } 231 232 static int efx_mcdi_filter_push(struct efx_nic *efx, 233 const struct efx_filter_spec *spec, u64 *handle, 234 struct efx_rss_context *ctx, bool replacing) 235 { 236 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 237 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); 238 size_t outlen; 239 int rc; 240 241 efx_mcdi_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing); 242 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 243 outbuf, sizeof(outbuf), &outlen); 244 if (rc && spec->priority != EFX_FILTER_PRI_HINT) 245 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf), 246 outbuf, outlen, rc); 247 if (rc == 0) 248 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 249 if (rc == -ENOSPC) 250 rc = -EBUSY; /* to match efx_farch_filter_insert() */ 251 return rc; 252 } 253 254 static u32 efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) 255 { 256 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 257 unsigned int match_flags = spec->match_flags; 258 unsigned int uc_match, mc_match; 259 u32 mcdi_flags = 0; 260 261 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ 262 unsigned int old_match_flags = match_flags; \ 263 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ 264 if (match_flags != old_match_flags) \ 265 mcdi_flags |= \ 266 (1 << ((encap) ? \ 267 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ 268 mcdi_field ## _LBN : \ 269 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ 270 mcdi_field ## _LBN)); \ 271 } 272 /* inner or outer based on encap type */ 273 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); 274 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); 275 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); 276 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); 277 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); 278 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); 279 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); 280 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); 281 /* always outer */ 282 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); 283 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); 284 #undef MAP_FILTER_TO_MCDI_FLAG 285 286 /* special handling for encap type, and mismatch */ 287 if (encap_type) { 288 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; 289 mcdi_flags |= 290 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 291 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 292 293 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 294 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 295 } else { 296 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 297 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 298 } 299 300 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { 301 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; 302 mcdi_flags |= 303 is_multicast_ether_addr(spec->loc_mac) ? 304 1 << mc_match : 305 1 << uc_match; 306 } 307 308 /* Did we map them all? */ 309 WARN_ON_ONCE(match_flags); 310 311 return mcdi_flags; 312 } 313 314 static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table, 315 const struct efx_filter_spec *spec) 316 { 317 u32 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec); 318 unsigned int match_pri; 319 320 for (match_pri = 0; 321 match_pri < table->rx_match_count; 322 match_pri++) 323 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) 324 return match_pri; 325 326 return -EPROTONOSUPPORT; 327 } 328 329 static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, 330 struct efx_filter_spec *spec, 331 bool replace_equal) 332 { 333 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 334 struct efx_mcdi_filter_table *table; 335 struct efx_filter_spec *saved_spec; 336 struct efx_rss_context *ctx = NULL; 337 unsigned int match_pri, hash; 338 unsigned int priv_flags; 339 bool rss_locked = false; 340 bool replacing = false; 341 unsigned int depth, i; 342 int ins_index = -1; 343 DEFINE_WAIT(wait); 344 bool is_mc_recip; 345 s32 rc; 346 347 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 348 table = efx->filter_state; 349 down_write(&table->lock); 350 351 /* For now, only support RX filters */ 352 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != 353 EFX_FILTER_FLAG_RX) { 354 rc = -EINVAL; 355 goto out_unlock; 356 } 357 358 rc = efx_mcdi_filter_pri(table, spec); 359 if (rc < 0) 360 goto out_unlock; 361 match_pri = rc; 362 363 hash = efx_filter_spec_hash(spec); 364 is_mc_recip = efx_filter_is_mc_recipient(spec); 365 if (is_mc_recip) 366 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 367 368 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { 369 mutex_lock(&efx->rss_lock); 370 rss_locked = true; 371 if (spec->rss_context) 372 ctx = efx_find_rss_context_entry(efx, spec->rss_context); 373 else 374 ctx = &efx->rss_context; 375 if (!ctx) { 376 rc = -ENOENT; 377 goto out_unlock; 378 } 379 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { 380 rc = -EOPNOTSUPP; 381 goto out_unlock; 382 } 383 } 384 385 /* Find any existing filters with the same match tuple or 386 * else a free slot to insert at. 387 */ 388 for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 389 i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1); 390 saved_spec = efx_mcdi_filter_entry_spec(table, i); 391 392 if (!saved_spec) { 393 if (ins_index < 0) 394 ins_index = i; 395 } else if (efx_filter_spec_equal(spec, saved_spec)) { 396 if (spec->priority < saved_spec->priority && 397 spec->priority != EFX_FILTER_PRI_AUTO) { 398 rc = -EPERM; 399 goto out_unlock; 400 } 401 if (!is_mc_recip) { 402 /* This is the only one */ 403 if (spec->priority == 404 saved_spec->priority && 405 !replace_equal) { 406 rc = -EEXIST; 407 goto out_unlock; 408 } 409 ins_index = i; 410 break; 411 } else if (spec->priority > 412 saved_spec->priority || 413 (spec->priority == 414 saved_spec->priority && 415 replace_equal)) { 416 if (ins_index < 0) 417 ins_index = i; 418 else 419 __set_bit(depth, mc_rem_map); 420 } 421 } 422 } 423 424 /* Once we reach the maximum search depth, use the first suitable 425 * slot, or return -EBUSY if there was none 426 */ 427 if (ins_index < 0) { 428 rc = -EBUSY; 429 goto out_unlock; 430 } 431 432 /* Create a software table entry if necessary. */ 433 saved_spec = efx_mcdi_filter_entry_spec(table, ins_index); 434 if (saved_spec) { 435 if (spec->priority == EFX_FILTER_PRI_AUTO && 436 saved_spec->priority >= EFX_FILTER_PRI_AUTO) { 437 /* Just make sure it won't be removed */ 438 if (saved_spec->priority > EFX_FILTER_PRI_AUTO) 439 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 440 table->entry[ins_index].spec &= 441 ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 442 rc = ins_index; 443 goto out_unlock; 444 } 445 replacing = true; 446 priv_flags = efx_mcdi_filter_entry_flags(table, ins_index); 447 } else { 448 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 449 if (!saved_spec) { 450 rc = -ENOMEM; 451 goto out_unlock; 452 } 453 *saved_spec = *spec; 454 priv_flags = 0; 455 } 456 efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags); 457 458 /* Actually insert the filter on the HW */ 459 rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle, 460 ctx, replacing); 461 462 if (rc == -EINVAL && efx->must_realloc_vis) 463 /* The MC rebooted under us, causing it to reject our filter 464 * insertion as pointing to an invalid VI (spec->dmaq_id). 465 */ 466 rc = -EAGAIN; 467 468 /* Finalise the software table entry */ 469 if (rc == 0) { 470 if (replacing) { 471 /* Update the fields that may differ */ 472 if (saved_spec->priority == EFX_FILTER_PRI_AUTO) 473 saved_spec->flags |= 474 EFX_FILTER_FLAG_RX_OVER_AUTO; 475 saved_spec->priority = spec->priority; 476 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; 477 saved_spec->flags |= spec->flags; 478 saved_spec->rss_context = spec->rss_context; 479 saved_spec->dmaq_id = spec->dmaq_id; 480 } 481 } else if (!replacing) { 482 kfree(saved_spec); 483 saved_spec = NULL; 484 } else { 485 /* We failed to replace, so the old filter is still present. 486 * Roll back the software table to reflect this. In fact the 487 * efx_mcdi_filter_set_entry() call below will do the right 488 * thing, so nothing extra is needed here. 489 */ 490 } 491 efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags); 492 493 /* Remove and finalise entries for lower-priority multicast 494 * recipients 495 */ 496 if (is_mc_recip) { 497 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 498 unsigned int depth, i; 499 500 memset(inbuf, 0, sizeof(inbuf)); 501 502 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 503 if (!test_bit(depth, mc_rem_map)) 504 continue; 505 506 i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1); 507 saved_spec = efx_mcdi_filter_entry_spec(table, i); 508 priv_flags = efx_mcdi_filter_entry_flags(table, i); 509 510 if (rc == 0) { 511 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 512 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 513 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 514 table->entry[i].handle); 515 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, 516 inbuf, sizeof(inbuf), 517 NULL, 0, NULL); 518 } 519 520 if (rc == 0) { 521 kfree(saved_spec); 522 saved_spec = NULL; 523 priv_flags = 0; 524 } 525 efx_mcdi_filter_set_entry(table, i, saved_spec, 526 priv_flags); 527 } 528 } 529 530 /* If successful, return the inserted filter ID */ 531 if (rc == 0) 532 rc = efx_mcdi_filter_make_filter_id(match_pri, ins_index); 533 534 out_unlock: 535 if (rss_locked) 536 mutex_unlock(&efx->rss_lock); 537 up_write(&table->lock); 538 return rc; 539 } 540 541 s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, 542 bool replace_equal) 543 { 544 s32 ret; 545 546 down_read(&efx->filter_sem); 547 ret = efx_mcdi_filter_insert_locked(efx, spec, replace_equal); 548 up_read(&efx->filter_sem); 549 550 return ret; 551 } 552 553 /* 554 * Remove a filter. 555 * If !by_index, remove by ID 556 * If by_index, remove by index 557 * Filter ID may come from userland and must be range-checked. 558 * Caller must hold efx->filter_sem for read, and efx->filter_state->lock 559 * for write. 560 */ 561 static int efx_mcdi_filter_remove_internal(struct efx_nic *efx, 562 unsigned int priority_mask, 563 u32 filter_id, bool by_index) 564 { 565 unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id); 566 struct efx_mcdi_filter_table *table = efx->filter_state; 567 MCDI_DECLARE_BUF(inbuf, 568 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 569 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 570 struct efx_filter_spec *spec; 571 DEFINE_WAIT(wait); 572 int rc; 573 574 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 575 if (!spec || 576 (!by_index && 577 efx_mcdi_filter_pri(table, spec) != 578 efx_mcdi_filter_get_unsafe_pri(filter_id))) 579 return -ENOENT; 580 581 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && 582 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { 583 /* Just remove flags */ 584 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; 585 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 586 return 0; 587 } 588 589 if (!(priority_mask & (1U << spec->priority))) 590 return -ENOENT; 591 592 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 593 /* Reset to an automatic filter */ 594 595 struct efx_filter_spec new_spec = *spec; 596 597 new_spec.priority = EFX_FILTER_PRI_AUTO; 598 new_spec.flags = (EFX_FILTER_FLAG_RX | 599 (efx_rss_active(&efx->rss_context) ? 600 EFX_FILTER_FLAG_RX_RSS : 0)); 601 new_spec.dmaq_id = 0; 602 new_spec.rss_context = 0; 603 rc = efx_mcdi_filter_push(efx, &new_spec, 604 &table->entry[filter_idx].handle, 605 &efx->rss_context, 606 true); 607 608 if (rc == 0) 609 *spec = new_spec; 610 } else { 611 /* Really remove the filter */ 612 613 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 614 efx_mcdi_filter_is_exclusive(spec) ? 615 MC_CMD_FILTER_OP_IN_OP_REMOVE : 616 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 617 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 618 table->entry[filter_idx].handle); 619 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, 620 inbuf, sizeof(inbuf), NULL, 0, NULL); 621 622 if ((rc == 0) || (rc == -ENOENT)) { 623 /* Filter removed OK or didn't actually exist */ 624 kfree(spec); 625 efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0); 626 } else { 627 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 628 MC_CMD_FILTER_OP_EXT_IN_LEN, 629 NULL, 0, rc); 630 } 631 } 632 633 return rc; 634 } 635 636 /* Remove filters that weren't renewed. */ 637 static void efx_mcdi_filter_remove_old(struct efx_nic *efx) 638 { 639 struct efx_mcdi_filter_table *table = efx->filter_state; 640 int remove_failed = 0; 641 int remove_noent = 0; 642 int rc; 643 int i; 644 645 down_write(&table->lock); 646 for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) { 647 if (READ_ONCE(table->entry[i].spec) & 648 EFX_EF10_FILTER_FLAG_AUTO_OLD) { 649 rc = efx_mcdi_filter_remove_internal(efx, 650 1U << EFX_FILTER_PRI_AUTO, i, true); 651 if (rc == -ENOENT) 652 remove_noent++; 653 else if (rc) 654 remove_failed++; 655 } 656 } 657 up_write(&table->lock); 658 659 if (remove_failed) 660 netif_info(efx, drv, efx->net_dev, 661 "%s: failed to remove %d filters\n", 662 __func__, remove_failed); 663 if (remove_noent) 664 netif_info(efx, drv, efx->net_dev, 665 "%s: failed to remove %d non-existent filters\n", 666 __func__, remove_noent); 667 } 668 669 int efx_mcdi_filter_remove_safe(struct efx_nic *efx, 670 enum efx_filter_priority priority, 671 u32 filter_id) 672 { 673 struct efx_mcdi_filter_table *table; 674 int rc; 675 676 down_read(&efx->filter_sem); 677 table = efx->filter_state; 678 down_write(&table->lock); 679 rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id, 680 false); 681 up_write(&table->lock); 682 up_read(&efx->filter_sem); 683 return rc; 684 } 685 686 /* Caller must hold efx->filter_sem for read */ 687 static void efx_mcdi_filter_remove_unsafe(struct efx_nic *efx, 688 enum efx_filter_priority priority, 689 u32 filter_id) 690 { 691 struct efx_mcdi_filter_table *table = efx->filter_state; 692 693 if (filter_id == EFX_EF10_FILTER_ID_INVALID) 694 return; 695 696 down_write(&table->lock); 697 efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id, 698 true); 699 up_write(&table->lock); 700 } 701 702 int efx_mcdi_filter_get_safe(struct efx_nic *efx, 703 enum efx_filter_priority priority, 704 u32 filter_id, struct efx_filter_spec *spec) 705 { 706 unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id); 707 const struct efx_filter_spec *saved_spec; 708 struct efx_mcdi_filter_table *table; 709 int rc; 710 711 down_read(&efx->filter_sem); 712 table = efx->filter_state; 713 down_read(&table->lock); 714 saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx); 715 if (saved_spec && saved_spec->priority == priority && 716 efx_mcdi_filter_pri(table, saved_spec) == 717 efx_mcdi_filter_get_unsafe_pri(filter_id)) { 718 *spec = *saved_spec; 719 rc = 0; 720 } else { 721 rc = -ENOENT; 722 } 723 up_read(&table->lock); 724 up_read(&efx->filter_sem); 725 return rc; 726 } 727 728 static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx, 729 struct efx_mcdi_filter_vlan *vlan, 730 bool multicast, bool rollback) 731 { 732 struct efx_mcdi_filter_table *table = efx->filter_state; 733 struct efx_mcdi_dev_addr *addr_list; 734 enum efx_filter_flags filter_flags; 735 struct efx_filter_spec spec; 736 u8 baddr[ETH_ALEN]; 737 unsigned int i, j; 738 int addr_count; 739 u16 *ids; 740 int rc; 741 742 if (multicast) { 743 addr_list = table->dev_mc_list; 744 addr_count = table->dev_mc_count; 745 ids = vlan->mc; 746 } else { 747 addr_list = table->dev_uc_list; 748 addr_count = table->dev_uc_count; 749 ids = vlan->uc; 750 } 751 752 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 753 754 /* Insert/renew filters */ 755 for (i = 0; i < addr_count; i++) { 756 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 757 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 758 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 759 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 760 if (rc < 0) { 761 if (rollback) { 762 netif_info(efx, drv, efx->net_dev, 763 "efx_mcdi_filter_insert failed rc=%d\n", 764 rc); 765 /* Fall back to promiscuous */ 766 for (j = 0; j < i; j++) { 767 efx_mcdi_filter_remove_unsafe( 768 efx, EFX_FILTER_PRI_AUTO, 769 ids[j]); 770 ids[j] = EFX_EF10_FILTER_ID_INVALID; 771 } 772 return rc; 773 } else { 774 /* keep invalid ID, and carry on */ 775 } 776 } else { 777 ids[i] = efx_mcdi_filter_get_unsafe_id(rc); 778 } 779 } 780 781 if (multicast && rollback) { 782 /* Also need an Ethernet broadcast filter */ 783 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != 784 EFX_EF10_FILTER_ID_INVALID); 785 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 786 eth_broadcast_addr(baddr); 787 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 788 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 789 if (rc < 0) { 790 netif_warn(efx, drv, efx->net_dev, 791 "Broadcast filter insert failed rc=%d\n", rc); 792 /* Fall back to promiscuous */ 793 for (j = 0; j < i; j++) { 794 efx_mcdi_filter_remove_unsafe( 795 efx, EFX_FILTER_PRI_AUTO, 796 ids[j]); 797 ids[j] = EFX_EF10_FILTER_ID_INVALID; 798 } 799 return rc; 800 } else { 801 vlan->default_filters[EFX_EF10_BCAST] = 802 efx_mcdi_filter_get_unsafe_id(rc); 803 } 804 } 805 806 return 0; 807 } 808 809 static int efx_mcdi_filter_insert_def(struct efx_nic *efx, 810 struct efx_mcdi_filter_vlan *vlan, 811 enum efx_encap_type encap_type, 812 bool multicast, bool rollback) 813 { 814 struct efx_mcdi_filter_table *table = efx->filter_state; 815 enum efx_filter_flags filter_flags; 816 struct efx_filter_spec spec; 817 u8 baddr[ETH_ALEN]; 818 int rc; 819 u16 *id; 820 821 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 822 823 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 824 825 if (multicast) 826 efx_filter_set_mc_def(&spec); 827 else 828 efx_filter_set_uc_def(&spec); 829 830 if (encap_type) { 831 if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1)) 832 efx_filter_set_encap_type(&spec, encap_type); 833 else 834 /* 835 * don't insert encap filters on non-supporting 836 * platforms. ID will be left as INVALID. 837 */ 838 return 0; 839 } 840 841 if (vlan->vid != EFX_FILTER_VID_UNSPEC) 842 efx_filter_set_eth_local(&spec, vlan->vid, NULL); 843 844 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 845 if (rc < 0) { 846 const char *um = multicast ? "Multicast" : "Unicast"; 847 const char *encap_name = ""; 848 const char *encap_ipv = ""; 849 850 if ((encap_type & EFX_ENCAP_TYPES_MASK) == 851 EFX_ENCAP_TYPE_VXLAN) 852 encap_name = "VXLAN "; 853 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 854 EFX_ENCAP_TYPE_NVGRE) 855 encap_name = "NVGRE "; 856 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 857 EFX_ENCAP_TYPE_GENEVE) 858 encap_name = "GENEVE "; 859 if (encap_type & EFX_ENCAP_FLAG_IPV6) 860 encap_ipv = "IPv6 "; 861 else if (encap_type) 862 encap_ipv = "IPv4 "; 863 864 /* 865 * unprivileged functions can't insert mismatch filters 866 * for encapsulated or unicast traffic, so downgrade 867 * those warnings to debug. 868 */ 869 netif_cond_dbg(efx, drv, efx->net_dev, 870 rc == -EPERM && (encap_type || !multicast), warn, 871 "%s%s%s mismatch filter insert failed rc=%d\n", 872 encap_name, encap_ipv, um, rc); 873 } else if (multicast) { 874 /* mapping from encap types to default filter IDs (multicast) */ 875 static enum efx_mcdi_filter_default_filters map[] = { 876 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, 877 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, 878 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, 879 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, 880 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 881 EFX_EF10_VXLAN6_MCDEF, 882 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 883 EFX_EF10_NVGRE6_MCDEF, 884 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 885 EFX_EF10_GENEVE6_MCDEF, 886 }; 887 888 /* quick bounds check (BCAST result impossible) */ 889 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 890 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 891 WARN_ON(1); 892 return -EINVAL; 893 } 894 /* then follow map */ 895 id = &vlan->default_filters[map[encap_type]]; 896 897 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 898 *id = efx_mcdi_filter_get_unsafe_id(rc); 899 if (!table->mc_chaining && !encap_type) { 900 /* Also need an Ethernet broadcast filter */ 901 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 902 filter_flags, 0); 903 eth_broadcast_addr(baddr); 904 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 905 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 906 if (rc < 0) { 907 netif_warn(efx, drv, efx->net_dev, 908 "Broadcast filter insert failed rc=%d\n", 909 rc); 910 if (rollback) { 911 /* Roll back the mc_def filter */ 912 efx_mcdi_filter_remove_unsafe( 913 efx, EFX_FILTER_PRI_AUTO, 914 *id); 915 *id = EFX_EF10_FILTER_ID_INVALID; 916 return rc; 917 } 918 } else { 919 EFX_WARN_ON_PARANOID( 920 vlan->default_filters[EFX_EF10_BCAST] != 921 EFX_EF10_FILTER_ID_INVALID); 922 vlan->default_filters[EFX_EF10_BCAST] = 923 efx_mcdi_filter_get_unsafe_id(rc); 924 } 925 } 926 rc = 0; 927 } else { 928 /* mapping from encap types to default filter IDs (unicast) */ 929 static enum efx_mcdi_filter_default_filters map[] = { 930 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, 931 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, 932 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, 933 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, 934 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 935 EFX_EF10_VXLAN6_UCDEF, 936 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 937 EFX_EF10_NVGRE6_UCDEF, 938 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 939 EFX_EF10_GENEVE6_UCDEF, 940 }; 941 942 /* quick bounds check (BCAST result impossible) */ 943 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 944 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 945 WARN_ON(1); 946 return -EINVAL; 947 } 948 /* then follow map */ 949 id = &vlan->default_filters[map[encap_type]]; 950 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 951 *id = rc; 952 rc = 0; 953 } 954 return rc; 955 } 956 957 /* 958 * Caller must hold efx->filter_sem for read if race against 959 * efx_mcdi_filter_table_remove() is possible 960 */ 961 static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx, 962 struct efx_mcdi_filter_vlan *vlan) 963 { 964 struct efx_mcdi_filter_table *table = efx->filter_state; 965 966 /* 967 * Do not install unspecified VID if VLAN filtering is enabled. 968 * Do not install all specified VIDs if VLAN filtering is disabled. 969 */ 970 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) 971 return; 972 973 /* Insert/renew unicast filters */ 974 if (table->uc_promisc) { 975 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, 976 false, false); 977 efx_mcdi_filter_insert_addr_list(efx, vlan, false, false); 978 } else { 979 /* 980 * If any of the filters failed to insert, fall back to 981 * promiscuous mode - add in the uc_def filter. But keep 982 * our individual unicast filters. 983 */ 984 if (efx_mcdi_filter_insert_addr_list(efx, vlan, false, false)) 985 efx_mcdi_filter_insert_def(efx, vlan, 986 EFX_ENCAP_TYPE_NONE, 987 false, false); 988 } 989 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 990 false, false); 991 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 992 EFX_ENCAP_FLAG_IPV6, 993 false, false); 994 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 995 false, false); 996 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 997 EFX_ENCAP_FLAG_IPV6, 998 false, false); 999 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 1000 false, false); 1001 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 1002 EFX_ENCAP_FLAG_IPV6, 1003 false, false); 1004 1005 /* 1006 * Insert/renew multicast filters 1007 * 1008 * If changing promiscuous state with cascaded multicast filters, remove 1009 * old filters first, so that packets are dropped rather than duplicated 1010 */ 1011 if (table->mc_chaining && table->mc_promisc_last != table->mc_promisc) 1012 efx_mcdi_filter_remove_old(efx); 1013 if (table->mc_promisc) { 1014 if (table->mc_chaining) { 1015 /* 1016 * If we failed to insert promiscuous filters, rollback 1017 * and fall back to individual multicast filters 1018 */ 1019 if (efx_mcdi_filter_insert_def(efx, vlan, 1020 EFX_ENCAP_TYPE_NONE, 1021 true, true)) { 1022 /* Changing promisc state, so remove old filters */ 1023 efx_mcdi_filter_remove_old(efx); 1024 efx_mcdi_filter_insert_addr_list(efx, vlan, 1025 true, false); 1026 } 1027 } else { 1028 /* 1029 * If we failed to insert promiscuous filters, don't 1030 * rollback. Regardless, also insert the mc_list, 1031 * unless it's incomplete due to overflow 1032 */ 1033 efx_mcdi_filter_insert_def(efx, vlan, 1034 EFX_ENCAP_TYPE_NONE, 1035 true, false); 1036 if (!table->mc_overflow) 1037 efx_mcdi_filter_insert_addr_list(efx, vlan, 1038 true, false); 1039 } 1040 } else { 1041 /* 1042 * If any filters failed to insert, rollback and fall back to 1043 * promiscuous mode - mc_def filter and maybe broadcast. If 1044 * that fails, roll back again and insert as many of our 1045 * individual multicast filters as we can. 1046 */ 1047 if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) { 1048 /* Changing promisc state, so remove old filters */ 1049 if (table->mc_chaining) 1050 efx_mcdi_filter_remove_old(efx); 1051 if (efx_mcdi_filter_insert_def(efx, vlan, 1052 EFX_ENCAP_TYPE_NONE, 1053 true, true)) 1054 efx_mcdi_filter_insert_addr_list(efx, vlan, 1055 true, false); 1056 } 1057 } 1058 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 1059 true, false); 1060 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 1061 EFX_ENCAP_FLAG_IPV6, 1062 true, false); 1063 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 1064 true, false); 1065 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 1066 EFX_ENCAP_FLAG_IPV6, 1067 true, false); 1068 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 1069 true, false); 1070 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 1071 EFX_ENCAP_FLAG_IPV6, 1072 true, false); 1073 } 1074 1075 int efx_mcdi_filter_clear_rx(struct efx_nic *efx, 1076 enum efx_filter_priority priority) 1077 { 1078 struct efx_mcdi_filter_table *table; 1079 unsigned int priority_mask; 1080 unsigned int i; 1081 int rc; 1082 1083 priority_mask = (((1U << (priority + 1)) - 1) & 1084 ~(1U << EFX_FILTER_PRI_AUTO)); 1085 1086 down_read(&efx->filter_sem); 1087 table = efx->filter_state; 1088 down_write(&table->lock); 1089 for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) { 1090 rc = efx_mcdi_filter_remove_internal(efx, priority_mask, 1091 i, true); 1092 if (rc && rc != -ENOENT) 1093 break; 1094 rc = 0; 1095 } 1096 1097 up_write(&table->lock); 1098 up_read(&efx->filter_sem); 1099 return rc; 1100 } 1101 1102 u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx, 1103 enum efx_filter_priority priority) 1104 { 1105 struct efx_mcdi_filter_table *table; 1106 unsigned int filter_idx; 1107 s32 count = 0; 1108 1109 down_read(&efx->filter_sem); 1110 table = efx->filter_state; 1111 down_read(&table->lock); 1112 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1113 if (table->entry[filter_idx].spec && 1114 efx_mcdi_filter_entry_spec(table, filter_idx)->priority == 1115 priority) 1116 ++count; 1117 } 1118 up_read(&table->lock); 1119 up_read(&efx->filter_sem); 1120 return count; 1121 } 1122 1123 u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx) 1124 { 1125 struct efx_mcdi_filter_table *table = efx->filter_state; 1126 1127 return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2; 1128 } 1129 1130 s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx, 1131 enum efx_filter_priority priority, 1132 u32 *buf, u32 size) 1133 { 1134 struct efx_mcdi_filter_table *table; 1135 struct efx_filter_spec *spec; 1136 unsigned int filter_idx; 1137 s32 count = 0; 1138 1139 down_read(&efx->filter_sem); 1140 table = efx->filter_state; 1141 down_read(&table->lock); 1142 1143 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1144 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1145 if (spec && spec->priority == priority) { 1146 if (count == size) { 1147 count = -EMSGSIZE; 1148 break; 1149 } 1150 buf[count++] = 1151 efx_mcdi_filter_make_filter_id( 1152 efx_mcdi_filter_pri(table, spec), 1153 filter_idx); 1154 } 1155 } 1156 up_read(&table->lock); 1157 up_read(&efx->filter_sem); 1158 return count; 1159 } 1160 1161 static int efx_mcdi_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) 1162 { 1163 int match_flags = 0; 1164 1165 #define MAP_FLAG(gen_flag, mcdi_field) do { \ 1166 u32 old_mcdi_flags = mcdi_flags; \ 1167 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ 1168 mcdi_field ## _LBN); \ 1169 if (mcdi_flags != old_mcdi_flags) \ 1170 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ 1171 } while (0) 1172 1173 if (encap) { 1174 /* encap filters must specify encap type */ 1175 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1176 /* and imply ethertype and ip proto */ 1177 mcdi_flags &= 1178 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 1179 mcdi_flags &= 1180 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 1181 /* VLAN tags refer to the outer packet */ 1182 MAP_FLAG(INNER_VID, INNER_VLAN); 1183 MAP_FLAG(OUTER_VID, OUTER_VLAN); 1184 /* everything else refers to the inner packet */ 1185 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); 1186 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); 1187 MAP_FLAG(REM_HOST, IFRM_SRC_IP); 1188 MAP_FLAG(LOC_HOST, IFRM_DST_IP); 1189 MAP_FLAG(REM_MAC, IFRM_SRC_MAC); 1190 MAP_FLAG(REM_PORT, IFRM_SRC_PORT); 1191 MAP_FLAG(LOC_MAC, IFRM_DST_MAC); 1192 MAP_FLAG(LOC_PORT, IFRM_DST_PORT); 1193 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); 1194 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); 1195 } else { 1196 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); 1197 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); 1198 MAP_FLAG(REM_HOST, SRC_IP); 1199 MAP_FLAG(LOC_HOST, DST_IP); 1200 MAP_FLAG(REM_MAC, SRC_MAC); 1201 MAP_FLAG(REM_PORT, SRC_PORT); 1202 MAP_FLAG(LOC_MAC, DST_MAC); 1203 MAP_FLAG(LOC_PORT, DST_PORT); 1204 MAP_FLAG(ETHER_TYPE, ETHER_TYPE); 1205 MAP_FLAG(INNER_VID, INNER_VLAN); 1206 MAP_FLAG(OUTER_VID, OUTER_VLAN); 1207 MAP_FLAG(IP_PROTO, IP_PROTO); 1208 } 1209 #undef MAP_FLAG 1210 1211 /* Did we map them all? */ 1212 if (mcdi_flags) 1213 return -EINVAL; 1214 1215 return match_flags; 1216 } 1217 1218 bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table, 1219 bool encap, 1220 enum efx_filter_match_flags match_flags) 1221 { 1222 unsigned int match_pri; 1223 int mf; 1224 1225 for (match_pri = 0; 1226 match_pri < table->rx_match_count; 1227 match_pri++) { 1228 mf = efx_mcdi_filter_match_flags_from_mcdi(encap, 1229 table->rx_match_mcdi_flags[match_pri]); 1230 if (mf == match_flags) 1231 return true; 1232 } 1233 1234 return false; 1235 } 1236 1237 static int 1238 efx_mcdi_filter_table_probe_matches(struct efx_nic *efx, 1239 struct efx_mcdi_filter_table *table, 1240 bool encap) 1241 { 1242 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); 1243 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); 1244 unsigned int pd_match_pri, pd_match_count; 1245 size_t outlen; 1246 int rc; 1247 1248 /* Find out which RX filter types are supported, and their priorities */ 1249 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, 1250 encap ? 1251 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : 1252 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); 1253 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, 1254 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), 1255 &outlen); 1256 if (rc) 1257 return rc; 1258 1259 pd_match_count = MCDI_VAR_ARRAY_LEN( 1260 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); 1261 1262 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { 1263 u32 mcdi_flags = 1264 MCDI_ARRAY_DWORD( 1265 outbuf, 1266 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, 1267 pd_match_pri); 1268 rc = efx_mcdi_filter_match_flags_from_mcdi(encap, mcdi_flags); 1269 if (rc < 0) { 1270 netif_dbg(efx, probe, efx->net_dev, 1271 "%s: fw flags %#x pri %u not supported in driver\n", 1272 __func__, mcdi_flags, pd_match_pri); 1273 } else { 1274 netif_dbg(efx, probe, efx->net_dev, 1275 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", 1276 __func__, mcdi_flags, pd_match_pri, 1277 rc, table->rx_match_count); 1278 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; 1279 table->rx_match_count++; 1280 } 1281 } 1282 1283 return 0; 1284 } 1285 1286 int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining) 1287 { 1288 struct net_device *net_dev = efx->net_dev; 1289 struct efx_mcdi_filter_table *table; 1290 int rc; 1291 1292 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1293 return -EINVAL; 1294 1295 if (efx->filter_state) /* already probed */ 1296 return 0; 1297 1298 table = kzalloc(sizeof(*table), GFP_KERNEL); 1299 if (!table) 1300 return -ENOMEM; 1301 1302 table->mc_chaining = multicast_chaining; 1303 table->rx_match_count = 0; 1304 rc = efx_mcdi_filter_table_probe_matches(efx, table, false); 1305 if (rc) 1306 goto fail; 1307 if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1)) 1308 rc = efx_mcdi_filter_table_probe_matches(efx, table, true); 1309 if (rc) 1310 goto fail; 1311 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && 1312 !(efx_mcdi_filter_match_supported(table, false, 1313 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && 1314 efx_mcdi_filter_match_supported(table, false, 1315 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { 1316 netif_info(efx, probe, net_dev, 1317 "VLAN filters are not supported in this firmware variant\n"); 1318 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1319 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1320 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1321 } 1322 1323 table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS, 1324 sizeof(*table->entry))); 1325 if (!table->entry) { 1326 rc = -ENOMEM; 1327 goto fail; 1328 } 1329 1330 table->mc_promisc_last = false; 1331 table->vlan_filter = 1332 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 1333 INIT_LIST_HEAD(&table->vlan_list); 1334 init_rwsem(&table->lock); 1335 1336 efx->filter_state = table; 1337 1338 return 0; 1339 fail: 1340 kfree(table); 1341 return rc; 1342 } 1343 1344 void efx_mcdi_filter_table_reset_mc_allocations(struct efx_nic *efx) 1345 { 1346 struct efx_mcdi_filter_table *table = efx->filter_state; 1347 1348 if (table) { 1349 table->must_restore_filters = true; 1350 table->must_restore_rss_contexts = true; 1351 } 1352 } 1353 1354 /* 1355 * Caller must hold efx->filter_sem for read if race against 1356 * efx_mcdi_filter_table_remove() is possible 1357 */ 1358 void efx_mcdi_filter_table_restore(struct efx_nic *efx) 1359 { 1360 struct efx_mcdi_filter_table *table = efx->filter_state; 1361 unsigned int invalid_filters = 0, failed = 0; 1362 struct efx_mcdi_filter_vlan *vlan; 1363 struct efx_filter_spec *spec; 1364 struct efx_rss_context *ctx; 1365 unsigned int filter_idx; 1366 u32 mcdi_flags; 1367 int match_pri; 1368 int rc, i; 1369 1370 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 1371 1372 if (!table || !table->must_restore_filters) 1373 return; 1374 1375 down_write(&table->lock); 1376 mutex_lock(&efx->rss_lock); 1377 1378 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1379 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1380 if (!spec) 1381 continue; 1382 1383 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec); 1384 match_pri = 0; 1385 while (match_pri < table->rx_match_count && 1386 table->rx_match_mcdi_flags[match_pri] != mcdi_flags) 1387 ++match_pri; 1388 if (match_pri >= table->rx_match_count) { 1389 invalid_filters++; 1390 goto not_restored; 1391 } 1392 if (spec->rss_context) 1393 ctx = efx_find_rss_context_entry(efx, spec->rss_context); 1394 else 1395 ctx = &efx->rss_context; 1396 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { 1397 if (!ctx) { 1398 netif_warn(efx, drv, efx->net_dev, 1399 "Warning: unable to restore a filter with nonexistent RSS context %u.\n", 1400 spec->rss_context); 1401 invalid_filters++; 1402 goto not_restored; 1403 } 1404 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { 1405 netif_warn(efx, drv, efx->net_dev, 1406 "Warning: unable to restore a filter with RSS context %u as it was not created.\n", 1407 spec->rss_context); 1408 invalid_filters++; 1409 goto not_restored; 1410 } 1411 } 1412 1413 rc = efx_mcdi_filter_push(efx, spec, 1414 &table->entry[filter_idx].handle, 1415 ctx, false); 1416 if (rc) 1417 failed++; 1418 1419 if (rc) { 1420 not_restored: 1421 list_for_each_entry(vlan, &table->vlan_list, list) 1422 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) 1423 if (vlan->default_filters[i] == filter_idx) 1424 vlan->default_filters[i] = 1425 EFX_EF10_FILTER_ID_INVALID; 1426 1427 kfree(spec); 1428 efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0); 1429 } 1430 } 1431 1432 mutex_unlock(&efx->rss_lock); 1433 up_write(&table->lock); 1434 1435 /* 1436 * This can happen validly if the MC's capabilities have changed, so 1437 * is not an error. 1438 */ 1439 if (invalid_filters) 1440 netif_dbg(efx, drv, efx->net_dev, 1441 "Did not restore %u filters that are now unsupported.\n", 1442 invalid_filters); 1443 1444 if (failed) 1445 netif_err(efx, hw, efx->net_dev, 1446 "unable to restore %u filters\n", failed); 1447 else 1448 table->must_restore_filters = false; 1449 } 1450 1451 void efx_mcdi_filter_table_remove(struct efx_nic *efx) 1452 { 1453 struct efx_mcdi_filter_table *table = efx->filter_state; 1454 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 1455 struct efx_filter_spec *spec; 1456 unsigned int filter_idx; 1457 int rc; 1458 1459 efx_mcdi_filter_cleanup_vlans(efx); 1460 efx->filter_state = NULL; 1461 /* 1462 * If we were called without locking, then it's not safe to free 1463 * the table as others might be using it. So we just WARN, leak 1464 * the memory, and potentially get an inconsistent filter table 1465 * state. 1466 * This should never actually happen. 1467 */ 1468 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1469 return; 1470 1471 if (!table) 1472 return; 1473 1474 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1475 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1476 if (!spec) 1477 continue; 1478 1479 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 1480 efx_mcdi_filter_is_exclusive(spec) ? 1481 MC_CMD_FILTER_OP_IN_OP_REMOVE : 1482 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 1483 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 1484 table->entry[filter_idx].handle); 1485 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, 1486 sizeof(inbuf), NULL, 0, NULL); 1487 if (rc) 1488 netif_info(efx, drv, efx->net_dev, 1489 "%s: filter %04x remove failed\n", 1490 __func__, filter_idx); 1491 kfree(spec); 1492 } 1493 1494 vfree(table->entry); 1495 kfree(table); 1496 } 1497 1498 static void efx_mcdi_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) 1499 { 1500 struct efx_mcdi_filter_table *table = efx->filter_state; 1501 unsigned int filter_idx; 1502 1503 efx_rwsem_assert_write_locked(&table->lock); 1504 1505 if (*id != EFX_EF10_FILTER_ID_INVALID) { 1506 filter_idx = efx_mcdi_filter_get_unsafe_id(*id); 1507 if (!table->entry[filter_idx].spec) 1508 netif_dbg(efx, drv, efx->net_dev, 1509 "marked null spec old %04x:%04x\n", *id, 1510 filter_idx); 1511 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; 1512 *id = EFX_EF10_FILTER_ID_INVALID; 1513 } 1514 } 1515 1516 /* Mark old per-VLAN filters that may need to be removed */ 1517 static void _efx_mcdi_filter_vlan_mark_old(struct efx_nic *efx, 1518 struct efx_mcdi_filter_vlan *vlan) 1519 { 1520 struct efx_mcdi_filter_table *table = efx->filter_state; 1521 unsigned int i; 1522 1523 for (i = 0; i < table->dev_uc_count; i++) 1524 efx_mcdi_filter_mark_one_old(efx, &vlan->uc[i]); 1525 for (i = 0; i < table->dev_mc_count; i++) 1526 efx_mcdi_filter_mark_one_old(efx, &vlan->mc[i]); 1527 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 1528 efx_mcdi_filter_mark_one_old(efx, &vlan->default_filters[i]); 1529 } 1530 1531 /* 1532 * Mark old filters that may need to be removed. 1533 * Caller must hold efx->filter_sem for read if race against 1534 * efx_mcdi_filter_table_remove() is possible 1535 */ 1536 static void efx_mcdi_filter_mark_old(struct efx_nic *efx) 1537 { 1538 struct efx_mcdi_filter_table *table = efx->filter_state; 1539 struct efx_mcdi_filter_vlan *vlan; 1540 1541 down_write(&table->lock); 1542 list_for_each_entry(vlan, &table->vlan_list, list) 1543 _efx_mcdi_filter_vlan_mark_old(efx, vlan); 1544 up_write(&table->lock); 1545 } 1546 1547 int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid) 1548 { 1549 struct efx_mcdi_filter_table *table = efx->filter_state; 1550 struct efx_mcdi_filter_vlan *vlan; 1551 unsigned int i; 1552 1553 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1554 return -EINVAL; 1555 1556 vlan = efx_mcdi_filter_find_vlan(efx, vid); 1557 if (WARN_ON(vlan)) { 1558 netif_err(efx, drv, efx->net_dev, 1559 "VLAN %u already added\n", vid); 1560 return -EALREADY; 1561 } 1562 1563 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1564 if (!vlan) 1565 return -ENOMEM; 1566 1567 vlan->vid = vid; 1568 1569 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 1570 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; 1571 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 1572 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; 1573 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 1574 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; 1575 1576 list_add_tail(&vlan->list, &table->vlan_list); 1577 1578 if (efx_dev_registered(efx)) 1579 efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan); 1580 1581 return 0; 1582 } 1583 1584 static void efx_mcdi_filter_del_vlan_internal(struct efx_nic *efx, 1585 struct efx_mcdi_filter_vlan *vlan) 1586 { 1587 unsigned int i; 1588 1589 /* See comment in efx_mcdi_filter_table_remove() */ 1590 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1591 return; 1592 1593 list_del(&vlan->list); 1594 1595 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 1596 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 1597 vlan->uc[i]); 1598 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 1599 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 1600 vlan->mc[i]); 1601 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 1602 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) 1603 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 1604 vlan->default_filters[i]); 1605 1606 kfree(vlan); 1607 } 1608 1609 void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid) 1610 { 1611 struct efx_mcdi_filter_vlan *vlan; 1612 1613 /* See comment in efx_mcdi_filter_table_remove() */ 1614 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1615 return; 1616 1617 vlan = efx_mcdi_filter_find_vlan(efx, vid); 1618 if (!vlan) { 1619 netif_err(efx, drv, efx->net_dev, 1620 "VLAN %u not found in filter state\n", vid); 1621 return; 1622 } 1623 1624 efx_mcdi_filter_del_vlan_internal(efx, vlan); 1625 } 1626 1627 struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx, 1628 u16 vid) 1629 { 1630 struct efx_mcdi_filter_table *table = efx->filter_state; 1631 struct efx_mcdi_filter_vlan *vlan; 1632 1633 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 1634 1635 list_for_each_entry(vlan, &table->vlan_list, list) { 1636 if (vlan->vid == vid) 1637 return vlan; 1638 } 1639 1640 return NULL; 1641 } 1642 1643 void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx) 1644 { 1645 struct efx_mcdi_filter_table *table = efx->filter_state; 1646 struct efx_mcdi_filter_vlan *vlan, *next_vlan; 1647 1648 /* See comment in efx_mcdi_filter_table_remove() */ 1649 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1650 return; 1651 1652 if (!table) 1653 return; 1654 1655 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) 1656 efx_mcdi_filter_del_vlan_internal(efx, vlan); 1657 } 1658 1659 static void efx_mcdi_filter_uc_addr_list(struct efx_nic *efx) 1660 { 1661 struct efx_mcdi_filter_table *table = efx->filter_state; 1662 struct net_device *net_dev = efx->net_dev; 1663 struct netdev_hw_addr *uc; 1664 unsigned int i; 1665 1666 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); 1667 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 1668 i = 1; 1669 netdev_for_each_uc_addr(uc, net_dev) { 1670 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { 1671 table->uc_promisc = true; 1672 break; 1673 } 1674 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 1675 i++; 1676 } 1677 1678 table->dev_uc_count = i; 1679 } 1680 1681 static void efx_mcdi_filter_mc_addr_list(struct efx_nic *efx) 1682 { 1683 struct efx_mcdi_filter_table *table = efx->filter_state; 1684 struct net_device *net_dev = efx->net_dev; 1685 struct netdev_hw_addr *mc; 1686 unsigned int i; 1687 1688 table->mc_overflow = false; 1689 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); 1690 1691 i = 0; 1692 netdev_for_each_mc_addr(mc, net_dev) { 1693 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { 1694 table->mc_promisc = true; 1695 table->mc_overflow = true; 1696 break; 1697 } 1698 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); 1699 i++; 1700 } 1701 1702 table->dev_mc_count = i; 1703 } 1704 1705 /* 1706 * Caller must hold efx->filter_sem for read if race against 1707 * efx_mcdi_filter_table_remove() is possible 1708 */ 1709 void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx) 1710 { 1711 struct efx_mcdi_filter_table *table = efx->filter_state; 1712 struct net_device *net_dev = efx->net_dev; 1713 struct efx_mcdi_filter_vlan *vlan; 1714 bool vlan_filter; 1715 1716 if (!efx_dev_registered(efx)) 1717 return; 1718 1719 if (!table) 1720 return; 1721 1722 efx_mcdi_filter_mark_old(efx); 1723 1724 /* 1725 * Copy/convert the address lists; add the primary station 1726 * address and broadcast address 1727 */ 1728 netif_addr_lock_bh(net_dev); 1729 efx_mcdi_filter_uc_addr_list(efx); 1730 efx_mcdi_filter_mc_addr_list(efx); 1731 netif_addr_unlock_bh(net_dev); 1732 1733 /* 1734 * If VLAN filtering changes, all old filters are finally removed. 1735 * Do it in advance to avoid conflicts for unicast untagged and 1736 * VLAN 0 tagged filters. 1737 */ 1738 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 1739 if (table->vlan_filter != vlan_filter) { 1740 table->vlan_filter = vlan_filter; 1741 efx_mcdi_filter_remove_old(efx); 1742 } 1743 1744 list_for_each_entry(vlan, &table->vlan_list, list) 1745 efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan); 1746 1747 efx_mcdi_filter_remove_old(efx); 1748 table->mc_promisc_last = table->mc_promisc; 1749 } 1750 1751 #ifdef CONFIG_RFS_ACCEL 1752 1753 bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 1754 unsigned int filter_idx) 1755 { 1756 struct efx_filter_spec *spec, saved_spec; 1757 struct efx_mcdi_filter_table *table; 1758 struct efx_arfs_rule *rule = NULL; 1759 bool ret = true, force = false; 1760 u16 arfs_id; 1761 1762 down_read(&efx->filter_sem); 1763 table = efx->filter_state; 1764 down_write(&table->lock); 1765 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1766 1767 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) 1768 goto out_unlock; 1769 1770 spin_lock_bh(&efx->rps_hash_lock); 1771 if (!efx->rps_hash_table) { 1772 /* In the absence of the table, we always return 0 to ARFS. */ 1773 arfs_id = 0; 1774 } else { 1775 rule = efx_rps_hash_find(efx, spec); 1776 if (!rule) 1777 /* ARFS table doesn't know of this filter, so remove it */ 1778 goto expire; 1779 arfs_id = rule->arfs_id; 1780 ret = efx_rps_check_rule(rule, filter_idx, &force); 1781 if (force) 1782 goto expire; 1783 if (!ret) { 1784 spin_unlock_bh(&efx->rps_hash_lock); 1785 goto out_unlock; 1786 } 1787 } 1788 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) 1789 ret = false; 1790 else if (rule) 1791 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 1792 expire: 1793 saved_spec = *spec; /* remove operation will kfree spec */ 1794 spin_unlock_bh(&efx->rps_hash_lock); 1795 /* 1796 * At this point (since we dropped the lock), another thread might queue 1797 * up a fresh insertion request (but the actual insertion will be held 1798 * up by our possession of the filter table lock). In that case, it 1799 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that 1800 * the rule is not removed by efx_rps_hash_del() below. 1801 */ 1802 if (ret) 1803 ret = efx_mcdi_filter_remove_internal(efx, 1U << spec->priority, 1804 filter_idx, true) == 0; 1805 /* 1806 * While we can't safely dereference rule (we dropped the lock), we can 1807 * still test it for NULL. 1808 */ 1809 if (ret && rule) { 1810 /* Expiring, so remove entry from ARFS table */ 1811 spin_lock_bh(&efx->rps_hash_lock); 1812 efx_rps_hash_del(efx, &saved_spec); 1813 spin_unlock_bh(&efx->rps_hash_lock); 1814 } 1815 out_unlock: 1816 up_write(&table->lock); 1817 up_read(&efx->filter_sem); 1818 return ret; 1819 } 1820 1821 #endif /* CONFIG_RFS_ACCEL */ 1822 1823 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ 1824 1 << RSS_MODE_HASH_DST_ADDR_LBN) 1825 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ 1826 1 << RSS_MODE_HASH_DST_PORT_LBN) 1827 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ 1828 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ 1829 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ 1830 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\ 1831 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ 1832 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ 1833 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ 1834 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ 1835 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ 1836 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) 1837 1838 int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags) 1839 { 1840 /* 1841 * Firmware had a bug (sfc bug 61952) where it would not actually 1842 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. 1843 * This meant that it would always contain whatever was previously 1844 * in the MCDI buffer. Fortunately, all firmware versions with 1845 * this bug have the same default flags value for a newly-allocated 1846 * RSS context, and the only time we want to get the flags is just 1847 * after allocating. Moreover, the response has a 32-bit hole 1848 * where the context ID would be in the request, so we can use an 1849 * overlength buffer in the request and pre-fill the flags field 1850 * with what we believe the default to be. Thus if the firmware 1851 * has the bug, it will leave our pre-filled value in the flags 1852 * field of the response, and we will get the right answer. 1853 * 1854 * However, this does mean that this function should NOT be used if 1855 * the RSS context flags might not be their defaults - it is ONLY 1856 * reliably correct for a newly-allocated RSS context. 1857 */ 1858 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 1859 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 1860 size_t outlen; 1861 int rc; 1862 1863 /* Check we have a hole for the context ID */ 1864 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); 1865 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context); 1866 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, 1867 RSS_CONTEXT_FLAGS_DEFAULT); 1868 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, 1869 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 1870 if (rc == 0) { 1871 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) 1872 rc = -EIO; 1873 else 1874 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); 1875 } 1876 return rc; 1877 } 1878 1879 /* 1880 * Attempt to enable 4-tuple UDP hashing on the specified RSS context. 1881 * If we fail, we just leave the RSS context at its default hash settings, 1882 * which is safe but may slightly reduce performance. 1883 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we 1884 * just need to set the UDP ports flags (for both IP versions). 1885 */ 1886 void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, 1887 struct efx_rss_context *ctx) 1888 { 1889 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); 1890 u32 flags; 1891 1892 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); 1893 1894 if (efx_mcdi_get_rss_context_flags(efx, ctx->context_id, &flags) != 0) 1895 return; 1896 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, 1897 ctx->context_id); 1898 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; 1899 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; 1900 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); 1901 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), 1902 NULL, 0, NULL)) 1903 /* Succeeded, so UDP 4-tuple is now enabled */ 1904 ctx->rx_hash_udp_4tuple = true; 1905 } 1906 1907 static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive, 1908 struct efx_rss_context *ctx, 1909 unsigned *context_size) 1910 { 1911 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); 1912 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); 1913 size_t outlen; 1914 int rc; 1915 u32 alloc_type = exclusive ? 1916 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : 1917 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; 1918 unsigned rss_spread = exclusive ? 1919 efx->rss_spread : 1920 min(rounddown_pow_of_two(efx->rss_spread), 1921 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); 1922 1923 if (!exclusive && rss_spread == 1) { 1924 ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 1925 if (context_size) 1926 *context_size = 1; 1927 return 0; 1928 } 1929 1930 if (efx_has_cap(efx, RX_RSS_LIMITED, FLAGS1)) 1931 return -EOPNOTSUPP; 1932 1933 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 1934 efx->vport_id); 1935 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 1936 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); 1937 1938 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), 1939 outbuf, sizeof(outbuf), &outlen); 1940 if (rc != 0) 1941 return rc; 1942 1943 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) 1944 return -EIO; 1945 1946 ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); 1947 1948 if (context_size) 1949 *context_size = rss_spread; 1950 1951 if (efx_has_cap(efx, ADDITIONAL_RSS_MODES, FLAGS1)) 1952 efx_mcdi_set_rss_context_flags(efx, ctx); 1953 1954 return 0; 1955 } 1956 1957 static int efx_mcdi_filter_free_rss_context(struct efx_nic *efx, u32 context) 1958 { 1959 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); 1960 1961 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, 1962 context); 1963 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), 1964 NULL, 0, NULL); 1965 } 1966 1967 static int efx_mcdi_filter_populate_rss_table(struct efx_nic *efx, u32 context, 1968 const u32 *rx_indir_table, const u8 *key) 1969 { 1970 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); 1971 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); 1972 int i, rc; 1973 1974 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, 1975 context); 1976 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != 1977 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); 1978 1979 /* This iterates over the length of efx->rss_context.rx_indir_table, but 1980 * copies bytes from rx_indir_table. That's because the latter is a 1981 * pointer rather than an array, but should have the same length. 1982 * The efx->rss_context.rx_hash_key loop below is similar. 1983 */ 1984 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i) 1985 MCDI_PTR(tablebuf, 1986 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = 1987 (u8) rx_indir_table[i]; 1988 1989 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, 1990 sizeof(tablebuf), NULL, 0, NULL); 1991 if (rc != 0) 1992 return rc; 1993 1994 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, 1995 context); 1996 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) != 1997 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 1998 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i) 1999 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; 2000 2001 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, 2002 sizeof(keybuf), NULL, 0, NULL); 2003 } 2004 2005 void efx_mcdi_rx_free_indir_table(struct efx_nic *efx) 2006 { 2007 int rc; 2008 2009 if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) { 2010 rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id); 2011 WARN_ON(rc != 0); 2012 } 2013 efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 2014 } 2015 2016 static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx, 2017 unsigned *context_size) 2018 { 2019 struct efx_mcdi_filter_table *table = efx->filter_state; 2020 int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context, 2021 context_size); 2022 2023 if (rc != 0) 2024 return rc; 2025 2026 table->rx_rss_context_exclusive = false; 2027 efx_set_default_rx_indir_table(efx, &efx->rss_context); 2028 return 0; 2029 } 2030 2031 static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx, 2032 const u32 *rx_indir_table, 2033 const u8 *key) 2034 { 2035 struct efx_mcdi_filter_table *table = efx->filter_state; 2036 u32 old_rx_rss_context = efx->rss_context.context_id; 2037 int rc; 2038 2039 if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID || 2040 !table->rx_rss_context_exclusive) { 2041 rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context, 2042 NULL); 2043 if (rc == -EOPNOTSUPP) 2044 return rc; 2045 else if (rc != 0) 2046 goto fail1; 2047 } 2048 2049 rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id, 2050 rx_indir_table, key); 2051 if (rc != 0) 2052 goto fail2; 2053 2054 if (efx->rss_context.context_id != old_rx_rss_context && 2055 old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID) 2056 WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0); 2057 table->rx_rss_context_exclusive = true; 2058 if (rx_indir_table != efx->rss_context.rx_indir_table) 2059 memcpy(efx->rss_context.rx_indir_table, rx_indir_table, 2060 sizeof(efx->rss_context.rx_indir_table)); 2061 if (key != efx->rss_context.rx_hash_key) 2062 memcpy(efx->rss_context.rx_hash_key, key, 2063 efx->type->rx_hash_key_size); 2064 2065 return 0; 2066 2067 fail2: 2068 if (old_rx_rss_context != efx->rss_context.context_id) { 2069 WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0); 2070 efx->rss_context.context_id = old_rx_rss_context; 2071 } 2072 fail1: 2073 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 2074 return rc; 2075 } 2076 2077 int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, 2078 struct efx_rss_context *ctx, 2079 const u32 *rx_indir_table, 2080 const u8 *key) 2081 { 2082 int rc; 2083 2084 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2085 2086 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { 2087 rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL); 2088 if (rc) 2089 return rc; 2090 } 2091 2092 if (!rx_indir_table) /* Delete this context */ 2093 return efx_mcdi_filter_free_rss_context(efx, ctx->context_id); 2094 2095 rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id, 2096 rx_indir_table, key); 2097 if (rc) 2098 return rc; 2099 2100 memcpy(ctx->rx_indir_table, rx_indir_table, 2101 sizeof(efx->rss_context.rx_indir_table)); 2102 memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size); 2103 2104 return 0; 2105 } 2106 2107 int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, 2108 struct efx_rss_context *ctx) 2109 { 2110 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); 2111 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); 2112 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); 2113 size_t outlen; 2114 int rc, i; 2115 2116 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2117 2118 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != 2119 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); 2120 2121 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) 2122 return -ENOENT; 2123 2124 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, 2125 ctx->context_id); 2126 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) != 2127 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); 2128 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), 2129 tablebuf, sizeof(tablebuf), &outlen); 2130 if (rc != 0) 2131 return rc; 2132 2133 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) 2134 return -EIO; 2135 2136 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) 2137 ctx->rx_indir_table[i] = MCDI_PTR(tablebuf, 2138 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; 2139 2140 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, 2141 ctx->context_id); 2142 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) != 2143 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2144 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), 2145 keybuf, sizeof(keybuf), &outlen); 2146 if (rc != 0) 2147 return rc; 2148 2149 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) 2150 return -EIO; 2151 2152 for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i) 2153 ctx->rx_hash_key[i] = MCDI_PTR( 2154 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; 2155 2156 return 0; 2157 } 2158 2159 int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) 2160 { 2161 int rc; 2162 2163 mutex_lock(&efx->rss_lock); 2164 rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); 2165 mutex_unlock(&efx->rss_lock); 2166 return rc; 2167 } 2168 2169 void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx) 2170 { 2171 struct efx_mcdi_filter_table *table = efx->filter_state; 2172 struct efx_rss_context *ctx; 2173 int rc; 2174 2175 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2176 2177 if (!table->must_restore_rss_contexts) 2178 return; 2179 2180 list_for_each_entry(ctx, &efx->rss_context.list, list) { 2181 /* previous NIC RSS context is gone */ 2182 ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 2183 /* so try to allocate a new one */ 2184 rc = efx_mcdi_rx_push_rss_context_config(efx, ctx, 2185 ctx->rx_indir_table, 2186 ctx->rx_hash_key); 2187 if (rc) 2188 netif_warn(efx, probe, efx->net_dev, 2189 "failed to restore RSS context %u, rc=%d" 2190 "; RSS filters may fail to be applied\n", 2191 ctx->user_id, rc); 2192 } 2193 table->must_restore_rss_contexts = false; 2194 } 2195 2196 int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user, 2197 const u32 *rx_indir_table, 2198 const u8 *key) 2199 { 2200 int rc; 2201 2202 if (efx->rss_spread == 1) 2203 return 0; 2204 2205 if (!key) 2206 key = efx->rss_context.rx_hash_key; 2207 2208 rc = efx_mcdi_filter_rx_push_exclusive_rss_config(efx, rx_indir_table, key); 2209 2210 if (rc == -ENOBUFS && !user) { 2211 unsigned context_size; 2212 bool mismatch = false; 2213 size_t i; 2214 2215 for (i = 0; 2216 i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch; 2217 i++) 2218 mismatch = rx_indir_table[i] != 2219 ethtool_rxfh_indir_default(i, efx->rss_spread); 2220 2221 rc = efx_mcdi_filter_rx_push_shared_rss_config(efx, &context_size); 2222 if (rc == 0) { 2223 if (context_size != efx->rss_spread) 2224 netif_warn(efx, probe, efx->net_dev, 2225 "Could not allocate an exclusive RSS" 2226 " context; allocated a shared one of" 2227 " different size." 2228 " Wanted %u, got %u.\n", 2229 efx->rss_spread, context_size); 2230 else if (mismatch) 2231 netif_warn(efx, probe, efx->net_dev, 2232 "Could not allocate an exclusive RSS" 2233 " context; allocated a shared one but" 2234 " could not apply custom" 2235 " indirection.\n"); 2236 else 2237 netif_info(efx, probe, efx->net_dev, 2238 "Could not allocate an exclusive RSS" 2239 " context; allocated a shared one.\n"); 2240 } 2241 } 2242 return rc; 2243 } 2244 2245 int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, 2246 const u32 *rx_indir_table 2247 __attribute__ ((unused)), 2248 const u8 *key 2249 __attribute__ ((unused))) 2250 { 2251 if (user) 2252 return -EOPNOTSUPP; 2253 if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) 2254 return 0; 2255 return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL); 2256 } 2257