1 #include "mcdi_filters.h" 2 #include "mcdi.h" 3 #include "nic.h" 4 #include "rx_common.h" 5 6 /* The maximum size of a shared RSS context */ 7 /* TODO: this should really be from the mcdi protocol export */ 8 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL 9 10 #define EFX_EF10_FILTER_ID_INVALID 0xffff 11 12 /* An arbitrary search limit for the software hash table */ 13 #define EFX_EF10_FILTER_SEARCH_LIMIT 200 14 15 static struct efx_filter_spec * 16 efx_mcdi_filter_entry_spec(const struct efx_mcdi_filter_table *table, 17 unsigned int filter_idx) 18 { 19 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & 20 ~EFX_EF10_FILTER_FLAGS); 21 } 22 23 static unsigned int 24 efx_mcdi_filter_entry_flags(const struct efx_mcdi_filter_table *table, 25 unsigned int filter_idx) 26 { 27 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; 28 } 29 30 static u32 efx_mcdi_filter_get_unsafe_id(u32 filter_id) 31 { 32 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); 33 return filter_id & (EFX_MCDI_FILTER_TBL_ROWS - 1); 34 } 35 36 static unsigned int efx_mcdi_filter_get_unsafe_pri(u32 filter_id) 37 { 38 return filter_id / (EFX_MCDI_FILTER_TBL_ROWS * 2); 39 } 40 41 static u32 efx_mcdi_filter_make_filter_id(unsigned int pri, u16 idx) 42 { 43 return pri * EFX_MCDI_FILTER_TBL_ROWS * 2 + idx; 44 } 45 46 /* 47 * Decide whether a filter should be exclusive or else should allow 48 * delivery to additional recipients. Currently we decide that 49 * filters for specific local unicast MAC and IP addresses are 50 * exclusive. 51 */ 52 static bool efx_mcdi_filter_is_exclusive(const struct efx_filter_spec *spec) 53 { 54 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && 55 !is_multicast_ether_addr(spec->loc_mac)) 56 return true; 57 58 if ((spec->match_flags & 59 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 60 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 61 if (spec->ether_type == htons(ETH_P_IP) && 62 !ipv4_is_multicast(spec->loc_host[0])) 63 return true; 64 if (spec->ether_type == htons(ETH_P_IPV6) && 65 ((const u8 *)spec->loc_host)[0] != 0xff) 66 return true; 67 } 68 69 return false; 70 } 71 72 static void 73 efx_mcdi_filter_set_entry(struct efx_mcdi_filter_table *table, 74 unsigned int filter_idx, 75 const struct efx_filter_spec *spec, 76 unsigned int flags) 77 { 78 table->entry[filter_idx].spec = (unsigned long)spec | flags; 79 } 80 81 static void 82 efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx, 83 const struct efx_filter_spec *spec, 84 efx_dword_t *inbuf) 85 { 86 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 87 u32 match_fields = 0, uc_match, mc_match; 88 89 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 90 efx_mcdi_filter_is_exclusive(spec) ? 91 MC_CMD_FILTER_OP_IN_OP_INSERT : 92 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); 93 94 /* 95 * Convert match flags and values. Unlike almost 96 * everything else in MCDI, these fields are in 97 * network byte order. 98 */ 99 #define COPY_VALUE(value, mcdi_field) \ 100 do { \ 101 match_fields |= \ 102 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ 103 mcdi_field ## _LBN; \ 104 BUILD_BUG_ON( \ 105 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ 106 sizeof(value)); \ 107 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ 108 &value, sizeof(value)); \ 109 } while (0) 110 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ 111 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ 112 COPY_VALUE(spec->gen_field, mcdi_field); \ 113 } 114 /* 115 * Handle encap filters first. They will always be mismatch 116 * (unknown UC or MC) filters 117 */ 118 if (encap_type) { 119 /* 120 * ether_type and outer_ip_proto need to be variables 121 * because COPY_VALUE wants to memcpy them 122 */ 123 __be16 ether_type = 124 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? 125 ETH_P_IPV6 : ETH_P_IP); 126 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; 127 u8 outer_ip_proto; 128 129 switch (encap_type & EFX_ENCAP_TYPES_MASK) { 130 case EFX_ENCAP_TYPE_VXLAN: 131 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; 132 /* fallthrough */ 133 case EFX_ENCAP_TYPE_GENEVE: 134 COPY_VALUE(ether_type, ETHER_TYPE); 135 outer_ip_proto = IPPROTO_UDP; 136 COPY_VALUE(outer_ip_proto, IP_PROTO); 137 /* 138 * We always need to set the type field, even 139 * though we're not matching on the TNI. 140 */ 141 MCDI_POPULATE_DWORD_1(inbuf, 142 FILTER_OP_EXT_IN_VNI_OR_VSID, 143 FILTER_OP_EXT_IN_VNI_TYPE, 144 vni_type); 145 break; 146 case EFX_ENCAP_TYPE_NVGRE: 147 COPY_VALUE(ether_type, ETHER_TYPE); 148 outer_ip_proto = IPPROTO_GRE; 149 COPY_VALUE(outer_ip_proto, IP_PROTO); 150 break; 151 default: 152 WARN_ON(1); 153 } 154 155 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 156 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 157 } else { 158 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 159 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 160 } 161 162 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) 163 match_fields |= 164 is_multicast_ether_addr(spec->loc_mac) ? 165 1 << mc_match : 166 1 << uc_match; 167 COPY_FIELD(REM_HOST, rem_host, SRC_IP); 168 COPY_FIELD(LOC_HOST, loc_host, DST_IP); 169 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); 170 COPY_FIELD(REM_PORT, rem_port, SRC_PORT); 171 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); 172 COPY_FIELD(LOC_PORT, loc_port, DST_PORT); 173 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); 174 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); 175 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); 176 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); 177 #undef COPY_FIELD 178 #undef COPY_VALUE 179 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, 180 match_fields); 181 } 182 183 static void efx_mcdi_filter_push_prep(struct efx_nic *efx, 184 const struct efx_filter_spec *spec, 185 efx_dword_t *inbuf, u64 handle, 186 struct efx_rss_context *ctx, 187 bool replacing) 188 { 189 struct efx_ef10_nic_data *nic_data = efx->nic_data; 190 u32 flags = spec->flags; 191 192 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); 193 194 /* If RSS filter, caller better have given us an RSS context */ 195 if (flags & EFX_FILTER_FLAG_RX_RSS) { 196 /* 197 * We don't have the ability to return an error, so we'll just 198 * log a warning and disable RSS for the filter. 199 */ 200 if (WARN_ON_ONCE(!ctx)) 201 flags &= ~EFX_FILTER_FLAG_RX_RSS; 202 else if (WARN_ON_ONCE(ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)) 203 flags &= ~EFX_FILTER_FLAG_RX_RSS; 204 } 205 206 if (replacing) { 207 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 208 MC_CMD_FILTER_OP_IN_OP_REPLACE); 209 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); 210 } else { 211 efx_mcdi_filter_push_prep_set_match_fields(efx, spec, inbuf); 212 } 213 214 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); 215 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, 216 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 217 MC_CMD_FILTER_OP_IN_RX_DEST_DROP : 218 MC_CMD_FILTER_OP_IN_RX_DEST_HOST); 219 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); 220 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, 221 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); 222 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, 223 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 224 0 : spec->dmaq_id); 225 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 226 (flags & EFX_FILTER_FLAG_RX_RSS) ? 227 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 228 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 229 if (flags & EFX_FILTER_FLAG_RX_RSS) 230 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, ctx->context_id); 231 } 232 233 static int efx_mcdi_filter_push(struct efx_nic *efx, 234 const struct efx_filter_spec *spec, u64 *handle, 235 struct efx_rss_context *ctx, bool replacing) 236 { 237 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 238 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); 239 size_t outlen; 240 int rc; 241 242 efx_mcdi_filter_push_prep(efx, spec, inbuf, *handle, ctx, replacing); 243 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 244 outbuf, sizeof(outbuf), &outlen); 245 if (rc && spec->priority != EFX_FILTER_PRI_HINT) 246 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, sizeof(inbuf), 247 outbuf, outlen, rc); 248 if (rc == 0) 249 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 250 if (rc == -ENOSPC) 251 rc = -EBUSY; /* to match efx_farch_filter_insert() */ 252 return rc; 253 } 254 255 static u32 efx_mcdi_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) 256 { 257 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 258 unsigned int match_flags = spec->match_flags; 259 unsigned int uc_match, mc_match; 260 u32 mcdi_flags = 0; 261 262 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ 263 unsigned int old_match_flags = match_flags; \ 264 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ 265 if (match_flags != old_match_flags) \ 266 mcdi_flags |= \ 267 (1 << ((encap) ? \ 268 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ 269 mcdi_field ## _LBN : \ 270 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ 271 mcdi_field ## _LBN)); \ 272 } 273 /* inner or outer based on encap type */ 274 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); 275 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); 276 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); 277 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); 278 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); 279 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); 280 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); 281 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); 282 /* always outer */ 283 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); 284 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); 285 #undef MAP_FILTER_TO_MCDI_FLAG 286 287 /* special handling for encap type, and mismatch */ 288 if (encap_type) { 289 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; 290 mcdi_flags |= 291 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 292 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 293 294 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 295 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 296 } else { 297 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 298 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 299 } 300 301 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { 302 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; 303 mcdi_flags |= 304 is_multicast_ether_addr(spec->loc_mac) ? 305 1 << mc_match : 306 1 << uc_match; 307 } 308 309 /* Did we map them all? */ 310 WARN_ON_ONCE(match_flags); 311 312 return mcdi_flags; 313 } 314 315 static int efx_mcdi_filter_pri(struct efx_mcdi_filter_table *table, 316 const struct efx_filter_spec *spec) 317 { 318 u32 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec); 319 unsigned int match_pri; 320 321 for (match_pri = 0; 322 match_pri < table->rx_match_count; 323 match_pri++) 324 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) 325 return match_pri; 326 327 return -EPROTONOSUPPORT; 328 } 329 330 static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, 331 struct efx_filter_spec *spec, 332 bool replace_equal) 333 { 334 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 335 struct efx_ef10_nic_data *nic_data = efx->nic_data; 336 struct efx_mcdi_filter_table *table; 337 struct efx_filter_spec *saved_spec; 338 struct efx_rss_context *ctx = NULL; 339 unsigned int match_pri, hash; 340 unsigned int priv_flags; 341 bool rss_locked = false; 342 bool replacing = false; 343 unsigned int depth, i; 344 int ins_index = -1; 345 DEFINE_WAIT(wait); 346 bool is_mc_recip; 347 s32 rc; 348 349 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 350 table = efx->filter_state; 351 down_write(&table->lock); 352 353 /* For now, only support RX filters */ 354 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != 355 EFX_FILTER_FLAG_RX) { 356 rc = -EINVAL; 357 goto out_unlock; 358 } 359 360 rc = efx_mcdi_filter_pri(table, spec); 361 if (rc < 0) 362 goto out_unlock; 363 match_pri = rc; 364 365 hash = efx_filter_spec_hash(spec); 366 is_mc_recip = efx_filter_is_mc_recipient(spec); 367 if (is_mc_recip) 368 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 369 370 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { 371 mutex_lock(&efx->rss_lock); 372 rss_locked = true; 373 if (spec->rss_context) 374 ctx = efx_find_rss_context_entry(efx, spec->rss_context); 375 else 376 ctx = &efx->rss_context; 377 if (!ctx) { 378 rc = -ENOENT; 379 goto out_unlock; 380 } 381 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { 382 rc = -EOPNOTSUPP; 383 goto out_unlock; 384 } 385 } 386 387 /* Find any existing filters with the same match tuple or 388 * else a free slot to insert at. 389 */ 390 for (depth = 1; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 391 i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1); 392 saved_spec = efx_mcdi_filter_entry_spec(table, i); 393 394 if (!saved_spec) { 395 if (ins_index < 0) 396 ins_index = i; 397 } else if (efx_filter_spec_equal(spec, saved_spec)) { 398 if (spec->priority < saved_spec->priority && 399 spec->priority != EFX_FILTER_PRI_AUTO) { 400 rc = -EPERM; 401 goto out_unlock; 402 } 403 if (!is_mc_recip) { 404 /* This is the only one */ 405 if (spec->priority == 406 saved_spec->priority && 407 !replace_equal) { 408 rc = -EEXIST; 409 goto out_unlock; 410 } 411 ins_index = i; 412 break; 413 } else if (spec->priority > 414 saved_spec->priority || 415 (spec->priority == 416 saved_spec->priority && 417 replace_equal)) { 418 if (ins_index < 0) 419 ins_index = i; 420 else 421 __set_bit(depth, mc_rem_map); 422 } 423 } 424 } 425 426 /* Once we reach the maximum search depth, use the first suitable 427 * slot, or return -EBUSY if there was none 428 */ 429 if (ins_index < 0) { 430 rc = -EBUSY; 431 goto out_unlock; 432 } 433 434 /* Create a software table entry if necessary. */ 435 saved_spec = efx_mcdi_filter_entry_spec(table, ins_index); 436 if (saved_spec) { 437 if (spec->priority == EFX_FILTER_PRI_AUTO && 438 saved_spec->priority >= EFX_FILTER_PRI_AUTO) { 439 /* Just make sure it won't be removed */ 440 if (saved_spec->priority > EFX_FILTER_PRI_AUTO) 441 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 442 table->entry[ins_index].spec &= 443 ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 444 rc = ins_index; 445 goto out_unlock; 446 } 447 replacing = true; 448 priv_flags = efx_mcdi_filter_entry_flags(table, ins_index); 449 } else { 450 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 451 if (!saved_spec) { 452 rc = -ENOMEM; 453 goto out_unlock; 454 } 455 *saved_spec = *spec; 456 priv_flags = 0; 457 } 458 efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags); 459 460 /* Actually insert the filter on the HW */ 461 rc = efx_mcdi_filter_push(efx, spec, &table->entry[ins_index].handle, 462 ctx, replacing); 463 464 if (rc == -EINVAL && nic_data->must_realloc_vis) 465 /* The MC rebooted under us, causing it to reject our filter 466 * insertion as pointing to an invalid VI (spec->dmaq_id). 467 */ 468 rc = -EAGAIN; 469 470 /* Finalise the software table entry */ 471 if (rc == 0) { 472 if (replacing) { 473 /* Update the fields that may differ */ 474 if (saved_spec->priority == EFX_FILTER_PRI_AUTO) 475 saved_spec->flags |= 476 EFX_FILTER_FLAG_RX_OVER_AUTO; 477 saved_spec->priority = spec->priority; 478 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; 479 saved_spec->flags |= spec->flags; 480 saved_spec->rss_context = spec->rss_context; 481 saved_spec->dmaq_id = spec->dmaq_id; 482 } 483 } else if (!replacing) { 484 kfree(saved_spec); 485 saved_spec = NULL; 486 } else { 487 /* We failed to replace, so the old filter is still present. 488 * Roll back the software table to reflect this. In fact the 489 * efx_mcdi_filter_set_entry() call below will do the right 490 * thing, so nothing extra is needed here. 491 */ 492 } 493 efx_mcdi_filter_set_entry(table, ins_index, saved_spec, priv_flags); 494 495 /* Remove and finalise entries for lower-priority multicast 496 * recipients 497 */ 498 if (is_mc_recip) { 499 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 500 unsigned int depth, i; 501 502 memset(inbuf, 0, sizeof(inbuf)); 503 504 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 505 if (!test_bit(depth, mc_rem_map)) 506 continue; 507 508 i = (hash + depth) & (EFX_MCDI_FILTER_TBL_ROWS - 1); 509 saved_spec = efx_mcdi_filter_entry_spec(table, i); 510 priv_flags = efx_mcdi_filter_entry_flags(table, i); 511 512 if (rc == 0) { 513 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 514 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 515 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 516 table->entry[i].handle); 517 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, 518 inbuf, sizeof(inbuf), 519 NULL, 0, NULL); 520 } 521 522 if (rc == 0) { 523 kfree(saved_spec); 524 saved_spec = NULL; 525 priv_flags = 0; 526 } 527 efx_mcdi_filter_set_entry(table, i, saved_spec, 528 priv_flags); 529 } 530 } 531 532 /* If successful, return the inserted filter ID */ 533 if (rc == 0) 534 rc = efx_mcdi_filter_make_filter_id(match_pri, ins_index); 535 536 out_unlock: 537 if (rss_locked) 538 mutex_unlock(&efx->rss_lock); 539 up_write(&table->lock); 540 return rc; 541 } 542 543 s32 efx_mcdi_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec, 544 bool replace_equal) 545 { 546 s32 ret; 547 548 down_read(&efx->filter_sem); 549 ret = efx_mcdi_filter_insert_locked(efx, spec, replace_equal); 550 up_read(&efx->filter_sem); 551 552 return ret; 553 } 554 555 /* 556 * Remove a filter. 557 * If !by_index, remove by ID 558 * If by_index, remove by index 559 * Filter ID may come from userland and must be range-checked. 560 * Caller must hold efx->filter_sem for read, and efx->filter_state->lock 561 * for write. 562 */ 563 static int efx_mcdi_filter_remove_internal(struct efx_nic *efx, 564 unsigned int priority_mask, 565 u32 filter_id, bool by_index) 566 { 567 unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id); 568 struct efx_mcdi_filter_table *table = efx->filter_state; 569 MCDI_DECLARE_BUF(inbuf, 570 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 571 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 572 struct efx_filter_spec *spec; 573 DEFINE_WAIT(wait); 574 int rc; 575 576 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 577 if (!spec || 578 (!by_index && 579 efx_mcdi_filter_pri(table, spec) != 580 efx_mcdi_filter_get_unsafe_pri(filter_id))) 581 return -ENOENT; 582 583 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && 584 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { 585 /* Just remove flags */ 586 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; 587 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 588 return 0; 589 } 590 591 if (!(priority_mask & (1U << spec->priority))) 592 return -ENOENT; 593 594 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 595 /* Reset to an automatic filter */ 596 597 struct efx_filter_spec new_spec = *spec; 598 599 new_spec.priority = EFX_FILTER_PRI_AUTO; 600 new_spec.flags = (EFX_FILTER_FLAG_RX | 601 (efx_rss_active(&efx->rss_context) ? 602 EFX_FILTER_FLAG_RX_RSS : 0)); 603 new_spec.dmaq_id = 0; 604 new_spec.rss_context = 0; 605 rc = efx_mcdi_filter_push(efx, &new_spec, 606 &table->entry[filter_idx].handle, 607 &efx->rss_context, 608 true); 609 610 if (rc == 0) 611 *spec = new_spec; 612 } else { 613 /* Really remove the filter */ 614 615 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 616 efx_mcdi_filter_is_exclusive(spec) ? 617 MC_CMD_FILTER_OP_IN_OP_REMOVE : 618 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 619 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 620 table->entry[filter_idx].handle); 621 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, 622 inbuf, sizeof(inbuf), NULL, 0, NULL); 623 624 if ((rc == 0) || (rc == -ENOENT)) { 625 /* Filter removed OK or didn't actually exist */ 626 kfree(spec); 627 efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0); 628 } else { 629 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 630 MC_CMD_FILTER_OP_EXT_IN_LEN, 631 NULL, 0, rc); 632 } 633 } 634 635 return rc; 636 } 637 638 /* Remove filters that weren't renewed. */ 639 static void efx_mcdi_filter_remove_old(struct efx_nic *efx) 640 { 641 struct efx_mcdi_filter_table *table = efx->filter_state; 642 int remove_failed = 0; 643 int remove_noent = 0; 644 int rc; 645 int i; 646 647 down_write(&table->lock); 648 for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) { 649 if (READ_ONCE(table->entry[i].spec) & 650 EFX_EF10_FILTER_FLAG_AUTO_OLD) { 651 rc = efx_mcdi_filter_remove_internal(efx, 652 1U << EFX_FILTER_PRI_AUTO, i, true); 653 if (rc == -ENOENT) 654 remove_noent++; 655 else if (rc) 656 remove_failed++; 657 } 658 } 659 up_write(&table->lock); 660 661 if (remove_failed) 662 netif_info(efx, drv, efx->net_dev, 663 "%s: failed to remove %d filters\n", 664 __func__, remove_failed); 665 if (remove_noent) 666 netif_info(efx, drv, efx->net_dev, 667 "%s: failed to remove %d non-existent filters\n", 668 __func__, remove_noent); 669 } 670 671 int efx_mcdi_filter_remove_safe(struct efx_nic *efx, 672 enum efx_filter_priority priority, 673 u32 filter_id) 674 { 675 struct efx_mcdi_filter_table *table; 676 int rc; 677 678 down_read(&efx->filter_sem); 679 table = efx->filter_state; 680 down_write(&table->lock); 681 rc = efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id, 682 false); 683 up_write(&table->lock); 684 up_read(&efx->filter_sem); 685 return rc; 686 } 687 688 /* Caller must hold efx->filter_sem for read */ 689 static void efx_mcdi_filter_remove_unsafe(struct efx_nic *efx, 690 enum efx_filter_priority priority, 691 u32 filter_id) 692 { 693 struct efx_mcdi_filter_table *table = efx->filter_state; 694 695 if (filter_id == EFX_EF10_FILTER_ID_INVALID) 696 return; 697 698 down_write(&table->lock); 699 efx_mcdi_filter_remove_internal(efx, 1U << priority, filter_id, 700 true); 701 up_write(&table->lock); 702 } 703 704 int efx_mcdi_filter_get_safe(struct efx_nic *efx, 705 enum efx_filter_priority priority, 706 u32 filter_id, struct efx_filter_spec *spec) 707 { 708 unsigned int filter_idx = efx_mcdi_filter_get_unsafe_id(filter_id); 709 const struct efx_filter_spec *saved_spec; 710 struct efx_mcdi_filter_table *table; 711 int rc; 712 713 down_read(&efx->filter_sem); 714 table = efx->filter_state; 715 down_read(&table->lock); 716 saved_spec = efx_mcdi_filter_entry_spec(table, filter_idx); 717 if (saved_spec && saved_spec->priority == priority && 718 efx_mcdi_filter_pri(table, saved_spec) == 719 efx_mcdi_filter_get_unsafe_pri(filter_id)) { 720 *spec = *saved_spec; 721 rc = 0; 722 } else { 723 rc = -ENOENT; 724 } 725 up_read(&table->lock); 726 up_read(&efx->filter_sem); 727 return rc; 728 } 729 730 static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx, 731 struct efx_mcdi_filter_vlan *vlan, 732 bool multicast, bool rollback) 733 { 734 struct efx_mcdi_filter_table *table = efx->filter_state; 735 struct efx_mcdi_dev_addr *addr_list; 736 enum efx_filter_flags filter_flags; 737 struct efx_filter_spec spec; 738 u8 baddr[ETH_ALEN]; 739 unsigned int i, j; 740 int addr_count; 741 u16 *ids; 742 int rc; 743 744 if (multicast) { 745 addr_list = table->dev_mc_list; 746 addr_count = table->dev_mc_count; 747 ids = vlan->mc; 748 } else { 749 addr_list = table->dev_uc_list; 750 addr_count = table->dev_uc_count; 751 ids = vlan->uc; 752 } 753 754 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 755 756 /* Insert/renew filters */ 757 for (i = 0; i < addr_count; i++) { 758 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 759 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 760 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 761 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 762 if (rc < 0) { 763 if (rollback) { 764 netif_info(efx, drv, efx->net_dev, 765 "efx_mcdi_filter_insert failed rc=%d\n", 766 rc); 767 /* Fall back to promiscuous */ 768 for (j = 0; j < i; j++) { 769 efx_mcdi_filter_remove_unsafe( 770 efx, EFX_FILTER_PRI_AUTO, 771 ids[j]); 772 ids[j] = EFX_EF10_FILTER_ID_INVALID; 773 } 774 return rc; 775 } else { 776 /* keep invalid ID, and carry on */ 777 } 778 } else { 779 ids[i] = efx_mcdi_filter_get_unsafe_id(rc); 780 } 781 } 782 783 if (multicast && rollback) { 784 /* Also need an Ethernet broadcast filter */ 785 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != 786 EFX_EF10_FILTER_ID_INVALID); 787 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 788 eth_broadcast_addr(baddr); 789 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 790 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 791 if (rc < 0) { 792 netif_warn(efx, drv, efx->net_dev, 793 "Broadcast filter insert failed rc=%d\n", rc); 794 /* Fall back to promiscuous */ 795 for (j = 0; j < i; j++) { 796 efx_mcdi_filter_remove_unsafe( 797 efx, EFX_FILTER_PRI_AUTO, 798 ids[j]); 799 ids[j] = EFX_EF10_FILTER_ID_INVALID; 800 } 801 return rc; 802 } else { 803 vlan->default_filters[EFX_EF10_BCAST] = 804 efx_mcdi_filter_get_unsafe_id(rc); 805 } 806 } 807 808 return 0; 809 } 810 811 static int efx_mcdi_filter_insert_def(struct efx_nic *efx, 812 struct efx_mcdi_filter_vlan *vlan, 813 enum efx_encap_type encap_type, 814 bool multicast, bool rollback) 815 { 816 struct efx_ef10_nic_data *nic_data = efx->nic_data; 817 enum efx_filter_flags filter_flags; 818 struct efx_filter_spec spec; 819 u8 baddr[ETH_ALEN]; 820 int rc; 821 u16 *id; 822 823 filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; 824 825 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 826 827 if (multicast) 828 efx_filter_set_mc_def(&spec); 829 else 830 efx_filter_set_uc_def(&spec); 831 832 if (encap_type) { 833 if (nic_data->datapath_caps & 834 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 835 efx_filter_set_encap_type(&spec, encap_type); 836 else 837 /* 838 * don't insert encap filters on non-supporting 839 * platforms. ID will be left as INVALID. 840 */ 841 return 0; 842 } 843 844 if (vlan->vid != EFX_FILTER_VID_UNSPEC) 845 efx_filter_set_eth_local(&spec, vlan->vid, NULL); 846 847 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 848 if (rc < 0) { 849 const char *um = multicast ? "Multicast" : "Unicast"; 850 const char *encap_name = ""; 851 const char *encap_ipv = ""; 852 853 if ((encap_type & EFX_ENCAP_TYPES_MASK) == 854 EFX_ENCAP_TYPE_VXLAN) 855 encap_name = "VXLAN "; 856 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 857 EFX_ENCAP_TYPE_NVGRE) 858 encap_name = "NVGRE "; 859 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 860 EFX_ENCAP_TYPE_GENEVE) 861 encap_name = "GENEVE "; 862 if (encap_type & EFX_ENCAP_FLAG_IPV6) 863 encap_ipv = "IPv6 "; 864 else if (encap_type) 865 encap_ipv = "IPv4 "; 866 867 /* 868 * unprivileged functions can't insert mismatch filters 869 * for encapsulated or unicast traffic, so downgrade 870 * those warnings to debug. 871 */ 872 netif_cond_dbg(efx, drv, efx->net_dev, 873 rc == -EPERM && (encap_type || !multicast), warn, 874 "%s%s%s mismatch filter insert failed rc=%d\n", 875 encap_name, encap_ipv, um, rc); 876 } else if (multicast) { 877 /* mapping from encap types to default filter IDs (multicast) */ 878 static enum efx_mcdi_filter_default_filters map[] = { 879 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, 880 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, 881 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, 882 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, 883 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 884 EFX_EF10_VXLAN6_MCDEF, 885 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 886 EFX_EF10_NVGRE6_MCDEF, 887 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 888 EFX_EF10_GENEVE6_MCDEF, 889 }; 890 891 /* quick bounds check (BCAST result impossible) */ 892 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 893 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 894 WARN_ON(1); 895 return -EINVAL; 896 } 897 /* then follow map */ 898 id = &vlan->default_filters[map[encap_type]]; 899 900 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 901 *id = efx_mcdi_filter_get_unsafe_id(rc); 902 if (!nic_data->workaround_26807 && !encap_type) { 903 /* Also need an Ethernet broadcast filter */ 904 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 905 filter_flags, 0); 906 eth_broadcast_addr(baddr); 907 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 908 rc = efx_mcdi_filter_insert_locked(efx, &spec, true); 909 if (rc < 0) { 910 netif_warn(efx, drv, efx->net_dev, 911 "Broadcast filter insert failed rc=%d\n", 912 rc); 913 if (rollback) { 914 /* Roll back the mc_def filter */ 915 efx_mcdi_filter_remove_unsafe( 916 efx, EFX_FILTER_PRI_AUTO, 917 *id); 918 *id = EFX_EF10_FILTER_ID_INVALID; 919 return rc; 920 } 921 } else { 922 EFX_WARN_ON_PARANOID( 923 vlan->default_filters[EFX_EF10_BCAST] != 924 EFX_EF10_FILTER_ID_INVALID); 925 vlan->default_filters[EFX_EF10_BCAST] = 926 efx_mcdi_filter_get_unsafe_id(rc); 927 } 928 } 929 rc = 0; 930 } else { 931 /* mapping from encap types to default filter IDs (unicast) */ 932 static enum efx_mcdi_filter_default_filters map[] = { 933 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, 934 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, 935 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, 936 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, 937 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 938 EFX_EF10_VXLAN6_UCDEF, 939 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 940 EFX_EF10_NVGRE6_UCDEF, 941 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 942 EFX_EF10_GENEVE6_UCDEF, 943 }; 944 945 /* quick bounds check (BCAST result impossible) */ 946 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 947 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 948 WARN_ON(1); 949 return -EINVAL; 950 } 951 /* then follow map */ 952 id = &vlan->default_filters[map[encap_type]]; 953 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 954 *id = rc; 955 rc = 0; 956 } 957 return rc; 958 } 959 960 /* 961 * Caller must hold efx->filter_sem for read if race against 962 * efx_mcdi_filter_table_remove() is possible 963 */ 964 static void efx_mcdi_filter_vlan_sync_rx_mode(struct efx_nic *efx, 965 struct efx_mcdi_filter_vlan *vlan) 966 { 967 struct efx_mcdi_filter_table *table = efx->filter_state; 968 struct efx_ef10_nic_data *nic_data = efx->nic_data; 969 970 /* 971 * Do not install unspecified VID if VLAN filtering is enabled. 972 * Do not install all specified VIDs if VLAN filtering is disabled. 973 */ 974 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) 975 return; 976 977 /* Insert/renew unicast filters */ 978 if (table->uc_promisc) { 979 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, 980 false, false); 981 efx_mcdi_filter_insert_addr_list(efx, vlan, false, false); 982 } else { 983 /* 984 * If any of the filters failed to insert, fall back to 985 * promiscuous mode - add in the uc_def filter. But keep 986 * our individual unicast filters. 987 */ 988 if (efx_mcdi_filter_insert_addr_list(efx, vlan, false, false)) 989 efx_mcdi_filter_insert_def(efx, vlan, 990 EFX_ENCAP_TYPE_NONE, 991 false, false); 992 } 993 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 994 false, false); 995 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 996 EFX_ENCAP_FLAG_IPV6, 997 false, false); 998 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 999 false, false); 1000 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 1001 EFX_ENCAP_FLAG_IPV6, 1002 false, false); 1003 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 1004 false, false); 1005 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 1006 EFX_ENCAP_FLAG_IPV6, 1007 false, false); 1008 1009 /* 1010 * Insert/renew multicast filters 1011 * 1012 * If changing promiscuous state with cascaded multicast filters, remove 1013 * old filters first, so that packets are dropped rather than duplicated 1014 */ 1015 if (nic_data->workaround_26807 && 1016 table->mc_promisc_last != table->mc_promisc) 1017 efx_mcdi_filter_remove_old(efx); 1018 if (table->mc_promisc) { 1019 if (nic_data->workaround_26807) { 1020 /* 1021 * If we failed to insert promiscuous filters, rollback 1022 * and fall back to individual multicast filters 1023 */ 1024 if (efx_mcdi_filter_insert_def(efx, vlan, 1025 EFX_ENCAP_TYPE_NONE, 1026 true, true)) { 1027 /* Changing promisc state, so remove old filters */ 1028 efx_mcdi_filter_remove_old(efx); 1029 efx_mcdi_filter_insert_addr_list(efx, vlan, 1030 true, false); 1031 } 1032 } else { 1033 /* 1034 * If we failed to insert promiscuous filters, don't 1035 * rollback. Regardless, also insert the mc_list, 1036 * unless it's incomplete due to overflow 1037 */ 1038 efx_mcdi_filter_insert_def(efx, vlan, 1039 EFX_ENCAP_TYPE_NONE, 1040 true, false); 1041 if (!table->mc_overflow) 1042 efx_mcdi_filter_insert_addr_list(efx, vlan, 1043 true, false); 1044 } 1045 } else { 1046 /* 1047 * If any filters failed to insert, rollback and fall back to 1048 * promiscuous mode - mc_def filter and maybe broadcast. If 1049 * that fails, roll back again and insert as many of our 1050 * individual multicast filters as we can. 1051 */ 1052 if (efx_mcdi_filter_insert_addr_list(efx, vlan, true, true)) { 1053 /* Changing promisc state, so remove old filters */ 1054 if (nic_data->workaround_26807) 1055 efx_mcdi_filter_remove_old(efx); 1056 if (efx_mcdi_filter_insert_def(efx, vlan, 1057 EFX_ENCAP_TYPE_NONE, 1058 true, true)) 1059 efx_mcdi_filter_insert_addr_list(efx, vlan, 1060 true, false); 1061 } 1062 } 1063 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 1064 true, false); 1065 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 1066 EFX_ENCAP_FLAG_IPV6, 1067 true, false); 1068 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 1069 true, false); 1070 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 1071 EFX_ENCAP_FLAG_IPV6, 1072 true, false); 1073 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 1074 true, false); 1075 efx_mcdi_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 1076 EFX_ENCAP_FLAG_IPV6, 1077 true, false); 1078 } 1079 1080 int efx_mcdi_filter_clear_rx(struct efx_nic *efx, 1081 enum efx_filter_priority priority) 1082 { 1083 struct efx_mcdi_filter_table *table; 1084 unsigned int priority_mask; 1085 unsigned int i; 1086 int rc; 1087 1088 priority_mask = (((1U << (priority + 1)) - 1) & 1089 ~(1U << EFX_FILTER_PRI_AUTO)); 1090 1091 down_read(&efx->filter_sem); 1092 table = efx->filter_state; 1093 down_write(&table->lock); 1094 for (i = 0; i < EFX_MCDI_FILTER_TBL_ROWS; i++) { 1095 rc = efx_mcdi_filter_remove_internal(efx, priority_mask, 1096 i, true); 1097 if (rc && rc != -ENOENT) 1098 break; 1099 rc = 0; 1100 } 1101 1102 up_write(&table->lock); 1103 up_read(&efx->filter_sem); 1104 return rc; 1105 } 1106 1107 u32 efx_mcdi_filter_count_rx_used(struct efx_nic *efx, 1108 enum efx_filter_priority priority) 1109 { 1110 struct efx_mcdi_filter_table *table; 1111 unsigned int filter_idx; 1112 s32 count = 0; 1113 1114 down_read(&efx->filter_sem); 1115 table = efx->filter_state; 1116 down_read(&table->lock); 1117 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1118 if (table->entry[filter_idx].spec && 1119 efx_mcdi_filter_entry_spec(table, filter_idx)->priority == 1120 priority) 1121 ++count; 1122 } 1123 up_read(&table->lock); 1124 up_read(&efx->filter_sem); 1125 return count; 1126 } 1127 1128 u32 efx_mcdi_filter_get_rx_id_limit(struct efx_nic *efx) 1129 { 1130 struct efx_mcdi_filter_table *table = efx->filter_state; 1131 1132 return table->rx_match_count * EFX_MCDI_FILTER_TBL_ROWS * 2; 1133 } 1134 1135 s32 efx_mcdi_filter_get_rx_ids(struct efx_nic *efx, 1136 enum efx_filter_priority priority, 1137 u32 *buf, u32 size) 1138 { 1139 struct efx_mcdi_filter_table *table; 1140 struct efx_filter_spec *spec; 1141 unsigned int filter_idx; 1142 s32 count = 0; 1143 1144 down_read(&efx->filter_sem); 1145 table = efx->filter_state; 1146 down_read(&table->lock); 1147 1148 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1149 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1150 if (spec && spec->priority == priority) { 1151 if (count == size) { 1152 count = -EMSGSIZE; 1153 break; 1154 } 1155 buf[count++] = 1156 efx_mcdi_filter_make_filter_id( 1157 efx_mcdi_filter_pri(table, spec), 1158 filter_idx); 1159 } 1160 } 1161 up_read(&table->lock); 1162 up_read(&efx->filter_sem); 1163 return count; 1164 } 1165 1166 static int efx_mcdi_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) 1167 { 1168 int match_flags = 0; 1169 1170 #define MAP_FLAG(gen_flag, mcdi_field) do { \ 1171 u32 old_mcdi_flags = mcdi_flags; \ 1172 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ 1173 mcdi_field ## _LBN); \ 1174 if (mcdi_flags != old_mcdi_flags) \ 1175 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ 1176 } while (0) 1177 1178 if (encap) { 1179 /* encap filters must specify encap type */ 1180 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 1181 /* and imply ethertype and ip proto */ 1182 mcdi_flags &= 1183 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 1184 mcdi_flags &= 1185 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 1186 /* VLAN tags refer to the outer packet */ 1187 MAP_FLAG(INNER_VID, INNER_VLAN); 1188 MAP_FLAG(OUTER_VID, OUTER_VLAN); 1189 /* everything else refers to the inner packet */ 1190 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); 1191 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); 1192 MAP_FLAG(REM_HOST, IFRM_SRC_IP); 1193 MAP_FLAG(LOC_HOST, IFRM_DST_IP); 1194 MAP_FLAG(REM_MAC, IFRM_SRC_MAC); 1195 MAP_FLAG(REM_PORT, IFRM_SRC_PORT); 1196 MAP_FLAG(LOC_MAC, IFRM_DST_MAC); 1197 MAP_FLAG(LOC_PORT, IFRM_DST_PORT); 1198 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); 1199 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); 1200 } else { 1201 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); 1202 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); 1203 MAP_FLAG(REM_HOST, SRC_IP); 1204 MAP_FLAG(LOC_HOST, DST_IP); 1205 MAP_FLAG(REM_MAC, SRC_MAC); 1206 MAP_FLAG(REM_PORT, SRC_PORT); 1207 MAP_FLAG(LOC_MAC, DST_MAC); 1208 MAP_FLAG(LOC_PORT, DST_PORT); 1209 MAP_FLAG(ETHER_TYPE, ETHER_TYPE); 1210 MAP_FLAG(INNER_VID, INNER_VLAN); 1211 MAP_FLAG(OUTER_VID, OUTER_VLAN); 1212 MAP_FLAG(IP_PROTO, IP_PROTO); 1213 } 1214 #undef MAP_FLAG 1215 1216 /* Did we map them all? */ 1217 if (mcdi_flags) 1218 return -EINVAL; 1219 1220 return match_flags; 1221 } 1222 1223 bool efx_mcdi_filter_match_supported(struct efx_mcdi_filter_table *table, 1224 bool encap, 1225 enum efx_filter_match_flags match_flags) 1226 { 1227 unsigned int match_pri; 1228 int mf; 1229 1230 for (match_pri = 0; 1231 match_pri < table->rx_match_count; 1232 match_pri++) { 1233 mf = efx_mcdi_filter_match_flags_from_mcdi(encap, 1234 table->rx_match_mcdi_flags[match_pri]); 1235 if (mf == match_flags) 1236 return true; 1237 } 1238 1239 return false; 1240 } 1241 1242 static int 1243 efx_mcdi_filter_table_probe_matches(struct efx_nic *efx, 1244 struct efx_mcdi_filter_table *table, 1245 bool encap) 1246 { 1247 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); 1248 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); 1249 unsigned int pd_match_pri, pd_match_count; 1250 size_t outlen; 1251 int rc; 1252 1253 /* Find out which RX filter types are supported, and their priorities */ 1254 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, 1255 encap ? 1256 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : 1257 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); 1258 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, 1259 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), 1260 &outlen); 1261 if (rc) 1262 return rc; 1263 1264 pd_match_count = MCDI_VAR_ARRAY_LEN( 1265 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); 1266 1267 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { 1268 u32 mcdi_flags = 1269 MCDI_ARRAY_DWORD( 1270 outbuf, 1271 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, 1272 pd_match_pri); 1273 rc = efx_mcdi_filter_match_flags_from_mcdi(encap, mcdi_flags); 1274 if (rc < 0) { 1275 netif_dbg(efx, probe, efx->net_dev, 1276 "%s: fw flags %#x pri %u not supported in driver\n", 1277 __func__, mcdi_flags, pd_match_pri); 1278 } else { 1279 netif_dbg(efx, probe, efx->net_dev, 1280 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", 1281 __func__, mcdi_flags, pd_match_pri, 1282 rc, table->rx_match_count); 1283 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; 1284 table->rx_match_count++; 1285 } 1286 } 1287 1288 return 0; 1289 } 1290 1291 int efx_mcdi_filter_table_probe(struct efx_nic *efx) 1292 { 1293 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1294 struct net_device *net_dev = efx->net_dev; 1295 struct efx_mcdi_filter_table *table; 1296 struct efx_mcdi_filter_vlan *vlan; 1297 int rc; 1298 1299 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1300 return -EINVAL; 1301 1302 if (efx->filter_state) /* already probed */ 1303 return 0; 1304 1305 table = kzalloc(sizeof(*table), GFP_KERNEL); 1306 if (!table) 1307 return -ENOMEM; 1308 1309 table->rx_match_count = 0; 1310 rc = efx_mcdi_filter_table_probe_matches(efx, table, false); 1311 if (rc) 1312 goto fail; 1313 if (nic_data->datapath_caps & 1314 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 1315 rc = efx_mcdi_filter_table_probe_matches(efx, table, true); 1316 if (rc) 1317 goto fail; 1318 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && 1319 !(efx_mcdi_filter_match_supported(table, false, 1320 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && 1321 efx_mcdi_filter_match_supported(table, false, 1322 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { 1323 netif_info(efx, probe, net_dev, 1324 "VLAN filters are not supported in this firmware variant\n"); 1325 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1326 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1327 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 1328 } 1329 1330 table->entry = vzalloc(array_size(EFX_MCDI_FILTER_TBL_ROWS, 1331 sizeof(*table->entry))); 1332 if (!table->entry) { 1333 rc = -ENOMEM; 1334 goto fail; 1335 } 1336 1337 table->mc_promisc_last = false; 1338 table->vlan_filter = 1339 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 1340 INIT_LIST_HEAD(&table->vlan_list); 1341 init_rwsem(&table->lock); 1342 1343 efx->filter_state = table; 1344 1345 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 1346 rc = efx_mcdi_filter_add_vlan(efx, vlan->vid); 1347 if (rc) 1348 goto fail_add_vlan; 1349 } 1350 1351 return 0; 1352 1353 fail_add_vlan: 1354 efx_mcdi_filter_cleanup_vlans(efx); 1355 efx->filter_state = NULL; 1356 fail: 1357 kfree(table); 1358 return rc; 1359 } 1360 1361 /* 1362 * Caller must hold efx->filter_sem for read if race against 1363 * efx_mcdi_filter_table_remove() is possible 1364 */ 1365 void efx_mcdi_filter_table_restore(struct efx_nic *efx) 1366 { 1367 struct efx_mcdi_filter_table *table = efx->filter_state; 1368 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1369 unsigned int invalid_filters = 0, failed = 0; 1370 struct efx_mcdi_filter_vlan *vlan; 1371 struct efx_filter_spec *spec; 1372 struct efx_rss_context *ctx; 1373 unsigned int filter_idx; 1374 u32 mcdi_flags; 1375 int match_pri; 1376 int rc, i; 1377 1378 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 1379 1380 if (!nic_data->must_restore_filters) 1381 return; 1382 1383 if (!table) 1384 return; 1385 1386 down_write(&table->lock); 1387 mutex_lock(&efx->rss_lock); 1388 1389 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1390 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1391 if (!spec) 1392 continue; 1393 1394 mcdi_flags = efx_mcdi_filter_mcdi_flags_from_spec(spec); 1395 match_pri = 0; 1396 while (match_pri < table->rx_match_count && 1397 table->rx_match_mcdi_flags[match_pri] != mcdi_flags) 1398 ++match_pri; 1399 if (match_pri >= table->rx_match_count) { 1400 invalid_filters++; 1401 goto not_restored; 1402 } 1403 if (spec->rss_context) 1404 ctx = efx_find_rss_context_entry(efx, spec->rss_context); 1405 else 1406 ctx = &efx->rss_context; 1407 if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { 1408 if (!ctx) { 1409 netif_warn(efx, drv, efx->net_dev, 1410 "Warning: unable to restore a filter with nonexistent RSS context %u.\n", 1411 spec->rss_context); 1412 invalid_filters++; 1413 goto not_restored; 1414 } 1415 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { 1416 netif_warn(efx, drv, efx->net_dev, 1417 "Warning: unable to restore a filter with RSS context %u as it was not created.\n", 1418 spec->rss_context); 1419 invalid_filters++; 1420 goto not_restored; 1421 } 1422 } 1423 1424 rc = efx_mcdi_filter_push(efx, spec, 1425 &table->entry[filter_idx].handle, 1426 ctx, false); 1427 if (rc) 1428 failed++; 1429 1430 if (rc) { 1431 not_restored: 1432 list_for_each_entry(vlan, &table->vlan_list, list) 1433 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) 1434 if (vlan->default_filters[i] == filter_idx) 1435 vlan->default_filters[i] = 1436 EFX_EF10_FILTER_ID_INVALID; 1437 1438 kfree(spec); 1439 efx_mcdi_filter_set_entry(table, filter_idx, NULL, 0); 1440 } 1441 } 1442 1443 mutex_unlock(&efx->rss_lock); 1444 up_write(&table->lock); 1445 1446 /* 1447 * This can happen validly if the MC's capabilities have changed, so 1448 * is not an error. 1449 */ 1450 if (invalid_filters) 1451 netif_dbg(efx, drv, efx->net_dev, 1452 "Did not restore %u filters that are now unsupported.\n", 1453 invalid_filters); 1454 1455 if (failed) 1456 netif_err(efx, hw, efx->net_dev, 1457 "unable to restore %u filters\n", failed); 1458 else 1459 nic_data->must_restore_filters = false; 1460 } 1461 1462 void efx_mcdi_filter_table_remove(struct efx_nic *efx) 1463 { 1464 struct efx_mcdi_filter_table *table = efx->filter_state; 1465 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 1466 struct efx_filter_spec *spec; 1467 unsigned int filter_idx; 1468 int rc; 1469 1470 efx_mcdi_filter_cleanup_vlans(efx); 1471 efx->filter_state = NULL; 1472 /* 1473 * If we were called without locking, then it's not safe to free 1474 * the table as others might be using it. So we just WARN, leak 1475 * the memory, and potentially get an inconsistent filter table 1476 * state. 1477 * This should never actually happen. 1478 */ 1479 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1480 return; 1481 1482 if (!table) 1483 return; 1484 1485 for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { 1486 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1487 if (!spec) 1488 continue; 1489 1490 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 1491 efx_mcdi_filter_is_exclusive(spec) ? 1492 MC_CMD_FILTER_OP_IN_OP_REMOVE : 1493 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 1494 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 1495 table->entry[filter_idx].handle); 1496 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, 1497 sizeof(inbuf), NULL, 0, NULL); 1498 if (rc) 1499 netif_info(efx, drv, efx->net_dev, 1500 "%s: filter %04x remove failed\n", 1501 __func__, filter_idx); 1502 kfree(spec); 1503 } 1504 1505 vfree(table->entry); 1506 kfree(table); 1507 } 1508 1509 static void efx_mcdi_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) 1510 { 1511 struct efx_mcdi_filter_table *table = efx->filter_state; 1512 unsigned int filter_idx; 1513 1514 efx_rwsem_assert_write_locked(&table->lock); 1515 1516 if (*id != EFX_EF10_FILTER_ID_INVALID) { 1517 filter_idx = efx_mcdi_filter_get_unsafe_id(*id); 1518 if (!table->entry[filter_idx].spec) 1519 netif_dbg(efx, drv, efx->net_dev, 1520 "marked null spec old %04x:%04x\n", *id, 1521 filter_idx); 1522 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; 1523 *id = EFX_EF10_FILTER_ID_INVALID; 1524 } 1525 } 1526 1527 /* Mark old per-VLAN filters that may need to be removed */ 1528 static void _efx_mcdi_filter_vlan_mark_old(struct efx_nic *efx, 1529 struct efx_mcdi_filter_vlan *vlan) 1530 { 1531 struct efx_mcdi_filter_table *table = efx->filter_state; 1532 unsigned int i; 1533 1534 for (i = 0; i < table->dev_uc_count; i++) 1535 efx_mcdi_filter_mark_one_old(efx, &vlan->uc[i]); 1536 for (i = 0; i < table->dev_mc_count; i++) 1537 efx_mcdi_filter_mark_one_old(efx, &vlan->mc[i]); 1538 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 1539 efx_mcdi_filter_mark_one_old(efx, &vlan->default_filters[i]); 1540 } 1541 1542 /* 1543 * Mark old filters that may need to be removed. 1544 * Caller must hold efx->filter_sem for read if race against 1545 * efx_mcdi_filter_table_remove() is possible 1546 */ 1547 static void efx_mcdi_filter_mark_old(struct efx_nic *efx) 1548 { 1549 struct efx_mcdi_filter_table *table = efx->filter_state; 1550 struct efx_mcdi_filter_vlan *vlan; 1551 1552 down_write(&table->lock); 1553 list_for_each_entry(vlan, &table->vlan_list, list) 1554 _efx_mcdi_filter_vlan_mark_old(efx, vlan); 1555 up_write(&table->lock); 1556 } 1557 1558 int efx_mcdi_filter_add_vlan(struct efx_nic *efx, u16 vid) 1559 { 1560 struct efx_mcdi_filter_table *table = efx->filter_state; 1561 struct efx_mcdi_filter_vlan *vlan; 1562 unsigned int i; 1563 1564 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1565 return -EINVAL; 1566 1567 vlan = efx_mcdi_filter_find_vlan(efx, vid); 1568 if (WARN_ON(vlan)) { 1569 netif_err(efx, drv, efx->net_dev, 1570 "VLAN %u already added\n", vid); 1571 return -EALREADY; 1572 } 1573 1574 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 1575 if (!vlan) 1576 return -ENOMEM; 1577 1578 vlan->vid = vid; 1579 1580 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 1581 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; 1582 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 1583 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; 1584 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 1585 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; 1586 1587 list_add_tail(&vlan->list, &table->vlan_list); 1588 1589 if (efx_dev_registered(efx)) 1590 efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan); 1591 1592 return 0; 1593 } 1594 1595 static void efx_mcdi_filter_del_vlan_internal(struct efx_nic *efx, 1596 struct efx_mcdi_filter_vlan *vlan) 1597 { 1598 unsigned int i; 1599 1600 /* See comment in efx_mcdi_filter_table_remove() */ 1601 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1602 return; 1603 1604 list_del(&vlan->list); 1605 1606 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 1607 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 1608 vlan->uc[i]); 1609 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 1610 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 1611 vlan->mc[i]); 1612 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 1613 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) 1614 efx_mcdi_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 1615 vlan->default_filters[i]); 1616 1617 kfree(vlan); 1618 } 1619 1620 void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid) 1621 { 1622 struct efx_mcdi_filter_vlan *vlan; 1623 1624 /* See comment in efx_mcdi_filter_table_remove() */ 1625 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1626 return; 1627 1628 vlan = efx_mcdi_filter_find_vlan(efx, vid); 1629 if (!vlan) { 1630 netif_err(efx, drv, efx->net_dev, 1631 "VLAN %u not found in filter state\n", vid); 1632 return; 1633 } 1634 1635 efx_mcdi_filter_del_vlan_internal(efx, vlan); 1636 } 1637 1638 struct efx_mcdi_filter_vlan *efx_mcdi_filter_find_vlan(struct efx_nic *efx, 1639 u16 vid) 1640 { 1641 struct efx_mcdi_filter_table *table = efx->filter_state; 1642 struct efx_mcdi_filter_vlan *vlan; 1643 1644 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 1645 1646 list_for_each_entry(vlan, &table->vlan_list, list) { 1647 if (vlan->vid == vid) 1648 return vlan; 1649 } 1650 1651 return NULL; 1652 } 1653 1654 void efx_mcdi_filter_cleanup_vlans(struct efx_nic *efx) 1655 { 1656 struct efx_mcdi_filter_table *table = efx->filter_state; 1657 struct efx_mcdi_filter_vlan *vlan, *next_vlan; 1658 1659 /* See comment in efx_mcdi_filter_table_remove() */ 1660 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 1661 return; 1662 1663 if (!table) 1664 return; 1665 1666 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) 1667 efx_mcdi_filter_del_vlan_internal(efx, vlan); 1668 } 1669 1670 static void efx_mcdi_filter_uc_addr_list(struct efx_nic *efx) 1671 { 1672 struct efx_mcdi_filter_table *table = efx->filter_state; 1673 struct net_device *net_dev = efx->net_dev; 1674 struct netdev_hw_addr *uc; 1675 unsigned int i; 1676 1677 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); 1678 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 1679 i = 1; 1680 netdev_for_each_uc_addr(uc, net_dev) { 1681 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { 1682 table->uc_promisc = true; 1683 break; 1684 } 1685 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 1686 i++; 1687 } 1688 1689 table->dev_uc_count = i; 1690 } 1691 1692 static void efx_mcdi_filter_mc_addr_list(struct efx_nic *efx) 1693 { 1694 struct efx_mcdi_filter_table *table = efx->filter_state; 1695 struct net_device *net_dev = efx->net_dev; 1696 struct netdev_hw_addr *mc; 1697 unsigned int i; 1698 1699 table->mc_overflow = false; 1700 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); 1701 1702 i = 0; 1703 netdev_for_each_mc_addr(mc, net_dev) { 1704 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { 1705 table->mc_promisc = true; 1706 table->mc_overflow = true; 1707 break; 1708 } 1709 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); 1710 i++; 1711 } 1712 1713 table->dev_mc_count = i; 1714 } 1715 1716 /* 1717 * Caller must hold efx->filter_sem for read if race against 1718 * efx_mcdi_filter_table_remove() is possible 1719 */ 1720 void efx_mcdi_filter_sync_rx_mode(struct efx_nic *efx) 1721 { 1722 struct efx_mcdi_filter_table *table = efx->filter_state; 1723 struct net_device *net_dev = efx->net_dev; 1724 struct efx_mcdi_filter_vlan *vlan; 1725 bool vlan_filter; 1726 1727 if (!efx_dev_registered(efx)) 1728 return; 1729 1730 if (!table) 1731 return; 1732 1733 efx_mcdi_filter_mark_old(efx); 1734 1735 /* 1736 * Copy/convert the address lists; add the primary station 1737 * address and broadcast address 1738 */ 1739 netif_addr_lock_bh(net_dev); 1740 efx_mcdi_filter_uc_addr_list(efx); 1741 efx_mcdi_filter_mc_addr_list(efx); 1742 netif_addr_unlock_bh(net_dev); 1743 1744 /* 1745 * If VLAN filtering changes, all old filters are finally removed. 1746 * Do it in advance to avoid conflicts for unicast untagged and 1747 * VLAN 0 tagged filters. 1748 */ 1749 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 1750 if (table->vlan_filter != vlan_filter) { 1751 table->vlan_filter = vlan_filter; 1752 efx_mcdi_filter_remove_old(efx); 1753 } 1754 1755 list_for_each_entry(vlan, &table->vlan_list, list) 1756 efx_mcdi_filter_vlan_sync_rx_mode(efx, vlan); 1757 1758 efx_mcdi_filter_remove_old(efx); 1759 table->mc_promisc_last = table->mc_promisc; 1760 } 1761 1762 #ifdef CONFIG_RFS_ACCEL 1763 1764 bool efx_mcdi_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 1765 unsigned int filter_idx) 1766 { 1767 struct efx_filter_spec *spec, saved_spec; 1768 struct efx_mcdi_filter_table *table; 1769 struct efx_arfs_rule *rule = NULL; 1770 bool ret = true, force = false; 1771 u16 arfs_id; 1772 1773 down_read(&efx->filter_sem); 1774 table = efx->filter_state; 1775 down_write(&table->lock); 1776 spec = efx_mcdi_filter_entry_spec(table, filter_idx); 1777 1778 if (!spec || spec->priority != EFX_FILTER_PRI_HINT) 1779 goto out_unlock; 1780 1781 spin_lock_bh(&efx->rps_hash_lock); 1782 if (!efx->rps_hash_table) { 1783 /* In the absence of the table, we always return 0 to ARFS. */ 1784 arfs_id = 0; 1785 } else { 1786 rule = efx_rps_hash_find(efx, spec); 1787 if (!rule) 1788 /* ARFS table doesn't know of this filter, so remove it */ 1789 goto expire; 1790 arfs_id = rule->arfs_id; 1791 ret = efx_rps_check_rule(rule, filter_idx, &force); 1792 if (force) 1793 goto expire; 1794 if (!ret) { 1795 spin_unlock_bh(&efx->rps_hash_lock); 1796 goto out_unlock; 1797 } 1798 } 1799 if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) 1800 ret = false; 1801 else if (rule) 1802 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; 1803 expire: 1804 saved_spec = *spec; /* remove operation will kfree spec */ 1805 spin_unlock_bh(&efx->rps_hash_lock); 1806 /* 1807 * At this point (since we dropped the lock), another thread might queue 1808 * up a fresh insertion request (but the actual insertion will be held 1809 * up by our possession of the filter table lock). In that case, it 1810 * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that 1811 * the rule is not removed by efx_rps_hash_del() below. 1812 */ 1813 if (ret) 1814 ret = efx_mcdi_filter_remove_internal(efx, 1U << spec->priority, 1815 filter_idx, true) == 0; 1816 /* 1817 * While we can't safely dereference rule (we dropped the lock), we can 1818 * still test it for NULL. 1819 */ 1820 if (ret && rule) { 1821 /* Expiring, so remove entry from ARFS table */ 1822 spin_lock_bh(&efx->rps_hash_lock); 1823 efx_rps_hash_del(efx, &saved_spec); 1824 spin_unlock_bh(&efx->rps_hash_lock); 1825 } 1826 out_unlock: 1827 up_write(&table->lock); 1828 up_read(&efx->filter_sem); 1829 return ret; 1830 } 1831 1832 #endif /* CONFIG_RFS_ACCEL */ 1833 1834 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ 1835 1 << RSS_MODE_HASH_DST_ADDR_LBN) 1836 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ 1837 1 << RSS_MODE_HASH_DST_PORT_LBN) 1838 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ 1839 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ 1840 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ 1841 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\ 1842 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ 1843 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ 1844 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ 1845 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ 1846 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ 1847 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) 1848 1849 int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags) 1850 { 1851 /* 1852 * Firmware had a bug (sfc bug 61952) where it would not actually 1853 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. 1854 * This meant that it would always contain whatever was previously 1855 * in the MCDI buffer. Fortunately, all firmware versions with 1856 * this bug have the same default flags value for a newly-allocated 1857 * RSS context, and the only time we want to get the flags is just 1858 * after allocating. Moreover, the response has a 32-bit hole 1859 * where the context ID would be in the request, so we can use an 1860 * overlength buffer in the request and pre-fill the flags field 1861 * with what we believe the default to be. Thus if the firmware 1862 * has the bug, it will leave our pre-filled value in the flags 1863 * field of the response, and we will get the right answer. 1864 * 1865 * However, this does mean that this function should NOT be used if 1866 * the RSS context flags might not be their defaults - it is ONLY 1867 * reliably correct for a newly-allocated RSS context. 1868 */ 1869 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 1870 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 1871 size_t outlen; 1872 int rc; 1873 1874 /* Check we have a hole for the context ID */ 1875 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); 1876 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context); 1877 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, 1878 RSS_CONTEXT_FLAGS_DEFAULT); 1879 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, 1880 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 1881 if (rc == 0) { 1882 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) 1883 rc = -EIO; 1884 else 1885 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); 1886 } 1887 return rc; 1888 } 1889 1890 /* 1891 * Attempt to enable 4-tuple UDP hashing on the specified RSS context. 1892 * If we fail, we just leave the RSS context at its default hash settings, 1893 * which is safe but may slightly reduce performance. 1894 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we 1895 * just need to set the UDP ports flags (for both IP versions). 1896 */ 1897 void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, 1898 struct efx_rss_context *ctx) 1899 { 1900 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); 1901 u32 flags; 1902 1903 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); 1904 1905 if (efx_mcdi_get_rss_context_flags(efx, ctx->context_id, &flags) != 0) 1906 return; 1907 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, 1908 ctx->context_id); 1909 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; 1910 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; 1911 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); 1912 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), 1913 NULL, 0, NULL)) 1914 /* Succeeded, so UDP 4-tuple is now enabled */ 1915 ctx->rx_hash_udp_4tuple = true; 1916 } 1917 1918 static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive, 1919 struct efx_rss_context *ctx, 1920 unsigned *context_size) 1921 { 1922 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); 1923 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); 1924 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1925 size_t outlen; 1926 int rc; 1927 u32 alloc_type = exclusive ? 1928 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : 1929 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; 1930 unsigned rss_spread = exclusive ? 1931 efx->rss_spread : 1932 min(rounddown_pow_of_two(efx->rss_spread), 1933 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); 1934 1935 if (!exclusive && rss_spread == 1) { 1936 ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 1937 if (context_size) 1938 *context_size = 1; 1939 return 0; 1940 } 1941 1942 if (nic_data->datapath_caps & 1943 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) 1944 return -EOPNOTSUPP; 1945 1946 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 1947 nic_data->vport_id); 1948 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 1949 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); 1950 1951 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), 1952 outbuf, sizeof(outbuf), &outlen); 1953 if (rc != 0) 1954 return rc; 1955 1956 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) 1957 return -EIO; 1958 1959 ctx->context_id = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); 1960 1961 if (context_size) 1962 *context_size = rss_spread; 1963 1964 if (nic_data->datapath_caps & 1965 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN) 1966 efx_mcdi_set_rss_context_flags(efx, ctx); 1967 1968 return 0; 1969 } 1970 1971 static int efx_mcdi_filter_free_rss_context(struct efx_nic *efx, u32 context) 1972 { 1973 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); 1974 1975 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, 1976 context); 1977 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), 1978 NULL, 0, NULL); 1979 } 1980 1981 static int efx_mcdi_filter_populate_rss_table(struct efx_nic *efx, u32 context, 1982 const u32 *rx_indir_table, const u8 *key) 1983 { 1984 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); 1985 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); 1986 int i, rc; 1987 1988 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, 1989 context); 1990 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_indir_table) != 1991 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); 1992 1993 /* This iterates over the length of efx->rss_context.rx_indir_table, but 1994 * copies bytes from rx_indir_table. That's because the latter is a 1995 * pointer rather than an array, but should have the same length. 1996 * The efx->rss_context.rx_hash_key loop below is similar. 1997 */ 1998 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); ++i) 1999 MCDI_PTR(tablebuf, 2000 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = 2001 (u8) rx_indir_table[i]; 2002 2003 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, 2004 sizeof(tablebuf), NULL, 0, NULL); 2005 if (rc != 0) 2006 return rc; 2007 2008 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, 2009 context); 2010 BUILD_BUG_ON(ARRAY_SIZE(efx->rss_context.rx_hash_key) != 2011 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2012 for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_hash_key); ++i) 2013 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; 2014 2015 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, 2016 sizeof(keybuf), NULL, 0, NULL); 2017 } 2018 2019 void efx_mcdi_rx_free_indir_table(struct efx_nic *efx) 2020 { 2021 int rc; 2022 2023 if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) { 2024 rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id); 2025 WARN_ON(rc != 0); 2026 } 2027 efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 2028 } 2029 2030 static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx, 2031 unsigned *context_size) 2032 { 2033 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2034 int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context, 2035 context_size); 2036 2037 if (rc != 0) 2038 return rc; 2039 2040 nic_data->rx_rss_context_exclusive = false; 2041 efx_set_default_rx_indir_table(efx, &efx->rss_context); 2042 return 0; 2043 } 2044 2045 static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx, 2046 const u32 *rx_indir_table, 2047 const u8 *key) 2048 { 2049 u32 old_rx_rss_context = efx->rss_context.context_id; 2050 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2051 int rc; 2052 2053 if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID || 2054 !nic_data->rx_rss_context_exclusive) { 2055 rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context, 2056 NULL); 2057 if (rc == -EOPNOTSUPP) 2058 return rc; 2059 else if (rc != 0) 2060 goto fail1; 2061 } 2062 2063 rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id, 2064 rx_indir_table, key); 2065 if (rc != 0) 2066 goto fail2; 2067 2068 if (efx->rss_context.context_id != old_rx_rss_context && 2069 old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID) 2070 WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0); 2071 nic_data->rx_rss_context_exclusive = true; 2072 if (rx_indir_table != efx->rss_context.rx_indir_table) 2073 memcpy(efx->rss_context.rx_indir_table, rx_indir_table, 2074 sizeof(efx->rss_context.rx_indir_table)); 2075 if (key != efx->rss_context.rx_hash_key) 2076 memcpy(efx->rss_context.rx_hash_key, key, 2077 efx->type->rx_hash_key_size); 2078 2079 return 0; 2080 2081 fail2: 2082 if (old_rx_rss_context != efx->rss_context.context_id) { 2083 WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0); 2084 efx->rss_context.context_id = old_rx_rss_context; 2085 } 2086 fail1: 2087 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 2088 return rc; 2089 } 2090 2091 int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, 2092 struct efx_rss_context *ctx, 2093 const u32 *rx_indir_table, 2094 const u8 *key) 2095 { 2096 int rc; 2097 2098 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2099 2100 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { 2101 rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL); 2102 if (rc) 2103 return rc; 2104 } 2105 2106 if (!rx_indir_table) /* Delete this context */ 2107 return efx_mcdi_filter_free_rss_context(efx, ctx->context_id); 2108 2109 rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id, 2110 rx_indir_table, key); 2111 if (rc) 2112 return rc; 2113 2114 memcpy(ctx->rx_indir_table, rx_indir_table, 2115 sizeof(efx->rss_context.rx_indir_table)); 2116 memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size); 2117 2118 return 0; 2119 } 2120 2121 int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, 2122 struct efx_rss_context *ctx) 2123 { 2124 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); 2125 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); 2126 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); 2127 size_t outlen; 2128 int rc, i; 2129 2130 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2131 2132 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != 2133 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); 2134 2135 if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) 2136 return -ENOENT; 2137 2138 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, 2139 ctx->context_id); 2140 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) != 2141 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); 2142 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), 2143 tablebuf, sizeof(tablebuf), &outlen); 2144 if (rc != 0) 2145 return rc; 2146 2147 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) 2148 return -EIO; 2149 2150 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) 2151 ctx->rx_indir_table[i] = MCDI_PTR(tablebuf, 2152 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; 2153 2154 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, 2155 ctx->context_id); 2156 BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) != 2157 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2158 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), 2159 keybuf, sizeof(keybuf), &outlen); 2160 if (rc != 0) 2161 return rc; 2162 2163 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) 2164 return -EIO; 2165 2166 for (i = 0; i < ARRAY_SIZE(ctx->rx_hash_key); ++i) 2167 ctx->rx_hash_key[i] = MCDI_PTR( 2168 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; 2169 2170 return 0; 2171 } 2172 2173 int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) 2174 { 2175 int rc; 2176 2177 mutex_lock(&efx->rss_lock); 2178 rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); 2179 mutex_unlock(&efx->rss_lock); 2180 return rc; 2181 } 2182 2183 void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx) 2184 { 2185 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2186 struct efx_rss_context *ctx; 2187 int rc; 2188 2189 WARN_ON(!mutex_is_locked(&efx->rss_lock)); 2190 2191 if (!nic_data->must_restore_rss_contexts) 2192 return; 2193 2194 list_for_each_entry(ctx, &efx->rss_context.list, list) { 2195 /* previous NIC RSS context is gone */ 2196 ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; 2197 /* so try to allocate a new one */ 2198 rc = efx_mcdi_rx_push_rss_context_config(efx, ctx, 2199 ctx->rx_indir_table, 2200 ctx->rx_hash_key); 2201 if (rc) 2202 netif_warn(efx, probe, efx->net_dev, 2203 "failed to restore RSS context %u, rc=%d" 2204 "; RSS filters may fail to be applied\n", 2205 ctx->user_id, rc); 2206 } 2207 nic_data->must_restore_rss_contexts = false; 2208 } 2209 2210 int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user, 2211 const u32 *rx_indir_table, 2212 const u8 *key) 2213 { 2214 int rc; 2215 2216 if (efx->rss_spread == 1) 2217 return 0; 2218 2219 if (!key) 2220 key = efx->rss_context.rx_hash_key; 2221 2222 rc = efx_mcdi_filter_rx_push_exclusive_rss_config(efx, rx_indir_table, key); 2223 2224 if (rc == -ENOBUFS && !user) { 2225 unsigned context_size; 2226 bool mismatch = false; 2227 size_t i; 2228 2229 for (i = 0; 2230 i < ARRAY_SIZE(efx->rss_context.rx_indir_table) && !mismatch; 2231 i++) 2232 mismatch = rx_indir_table[i] != 2233 ethtool_rxfh_indir_default(i, efx->rss_spread); 2234 2235 rc = efx_mcdi_filter_rx_push_shared_rss_config(efx, &context_size); 2236 if (rc == 0) { 2237 if (context_size != efx->rss_spread) 2238 netif_warn(efx, probe, efx->net_dev, 2239 "Could not allocate an exclusive RSS" 2240 " context; allocated a shared one of" 2241 " different size." 2242 " Wanted %u, got %u.\n", 2243 efx->rss_spread, context_size); 2244 else if (mismatch) 2245 netif_warn(efx, probe, efx->net_dev, 2246 "Could not allocate an exclusive RSS" 2247 " context; allocated a shared one but" 2248 " could not apply custom" 2249 " indirection.\n"); 2250 else 2251 netif_info(efx, probe, efx->net_dev, 2252 "Could not allocate an exclusive RSS" 2253 " context; allocated a shared one.\n"); 2254 } 2255 } 2256 return rc; 2257 } 2258 2259 int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, 2260 const u32 *rx_indir_table 2261 __attribute__ ((unused)), 2262 const u8 *key 2263 __attribute__ ((unused))) 2264 { 2265 if (user) 2266 return -EOPNOTSUPP; 2267 if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) 2268 return 0; 2269 return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL); 2270 } 2271