1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2012-2013 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include "net_driver.h" 11 #include "ef10_regs.h" 12 #include "io.h" 13 #include "mcdi.h" 14 #include "mcdi_pcol.h" 15 #include "nic.h" 16 #include "workarounds.h" 17 #include "selftest.h" 18 #include "ef10_sriov.h" 19 #include <linux/in.h> 20 #include <linux/jhash.h> 21 #include <linux/wait.h> 22 #include <linux/workqueue.h> 23 24 /* Hardware control for EF10 architecture including 'Huntington'. */ 25 26 #define EFX_EF10_DRVGEN_EV 7 27 enum { 28 EFX_EF10_TEST = 1, 29 EFX_EF10_REFILL, 30 }; 31 32 /* The reserved RSS context value */ 33 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff 34 /* The maximum size of a shared RSS context */ 35 /* TODO: this should really be from the mcdi protocol export */ 36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL 37 38 /* The filter table(s) are managed by firmware and we have write-only 39 * access. When removing filters we must identify them to the 40 * firmware by a 64-bit handle, but this is too wide for Linux kernel 41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to 42 * be able to tell in advance whether a requested insertion will 43 * replace an existing filter. Therefore we maintain a software hash 44 * table, which should be at least as large as the hardware hash 45 * table. 46 * 47 * Huntington has a single 8K filter table shared between all filter 48 * types and both ports. 49 */ 50 #define HUNT_FILTER_TBL_ROWS 8192 51 52 #define EFX_EF10_FILTER_ID_INVALID 0xffff 53 54 #define EFX_EF10_FILTER_DEV_UC_MAX 32 55 #define EFX_EF10_FILTER_DEV_MC_MAX 256 56 57 /* VLAN list entry */ 58 struct efx_ef10_vlan { 59 struct list_head list; 60 u16 vid; 61 }; 62 63 enum efx_ef10_default_filters { 64 EFX_EF10_BCAST, 65 EFX_EF10_UCDEF, 66 EFX_EF10_MCDEF, 67 EFX_EF10_VXLAN4_UCDEF, 68 EFX_EF10_VXLAN4_MCDEF, 69 EFX_EF10_VXLAN6_UCDEF, 70 EFX_EF10_VXLAN6_MCDEF, 71 EFX_EF10_NVGRE4_UCDEF, 72 EFX_EF10_NVGRE4_MCDEF, 73 EFX_EF10_NVGRE6_UCDEF, 74 EFX_EF10_NVGRE6_MCDEF, 75 EFX_EF10_GENEVE4_UCDEF, 76 EFX_EF10_GENEVE4_MCDEF, 77 EFX_EF10_GENEVE6_UCDEF, 78 EFX_EF10_GENEVE6_MCDEF, 79 80 EFX_EF10_NUM_DEFAULT_FILTERS 81 }; 82 83 /* Per-VLAN filters information */ 84 struct efx_ef10_filter_vlan { 85 struct list_head list; 86 u16 vid; 87 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; 88 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; 89 u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; 90 }; 91 92 struct efx_ef10_dev_addr { 93 u8 addr[ETH_ALEN]; 94 }; 95 96 struct efx_ef10_filter_table { 97 /* The MCDI match masks supported by this fw & hw, in order of priority */ 98 u32 rx_match_mcdi_flags[ 99 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; 100 unsigned int rx_match_count; 101 102 struct { 103 unsigned long spec; /* pointer to spec plus flag bits */ 104 /* BUSY flag indicates that an update is in progress. AUTO_OLD is 105 * used to mark and sweep MAC filters for the device address lists. 106 */ 107 #define EFX_EF10_FILTER_FLAG_BUSY 1UL 108 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL 109 #define EFX_EF10_FILTER_FLAGS 3UL 110 u64 handle; /* firmware handle */ 111 } *entry; 112 wait_queue_head_t waitq; 113 /* Shadow of net_device address lists, guarded by mac_lock */ 114 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; 115 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; 116 int dev_uc_count; 117 int dev_mc_count; 118 bool uc_promisc; 119 bool mc_promisc; 120 /* Whether in multicast promiscuous mode when last changed */ 121 bool mc_promisc_last; 122 bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */ 123 bool vlan_filter; 124 struct list_head vlan_list; 125 }; 126 127 /* An arbitrary search limit for the software hash table */ 128 #define EFX_EF10_FILTER_SEARCH_LIMIT 200 129 130 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); 131 static void efx_ef10_filter_table_remove(struct efx_nic *efx); 132 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); 133 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, 134 struct efx_ef10_filter_vlan *vlan); 135 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); 136 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); 137 138 static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id) 139 { 140 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); 141 return filter_id & (HUNT_FILTER_TBL_ROWS - 1); 142 } 143 144 static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id) 145 { 146 return filter_id / (HUNT_FILTER_TBL_ROWS * 2); 147 } 148 149 static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx) 150 { 151 return pri * HUNT_FILTER_TBL_ROWS * 2 + idx; 152 } 153 154 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) 155 { 156 efx_dword_t reg; 157 158 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); 159 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? 160 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; 161 } 162 163 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for 164 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O 165 * bar; PFs use BAR 0/1 for memory. 166 */ 167 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) 168 { 169 switch (efx->pci_dev->device) { 170 case 0x0b03: /* SFC9250 PF */ 171 return 0; 172 default: 173 return 2; 174 } 175 } 176 177 /* All VFs use BAR 0/1 for memory */ 178 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) 179 { 180 return 0; 181 } 182 183 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) 184 { 185 int bar; 186 187 bar = efx->type->mem_bar(efx); 188 return resource_size(&efx->pci_dev->resource[bar]); 189 } 190 191 static bool efx_ef10_is_vf(struct efx_nic *efx) 192 { 193 return efx->type->is_vf; 194 } 195 196 static int efx_ef10_get_pf_index(struct efx_nic *efx) 197 { 198 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 199 struct efx_ef10_nic_data *nic_data = efx->nic_data; 200 size_t outlen; 201 int rc; 202 203 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 204 sizeof(outbuf), &outlen); 205 if (rc) 206 return rc; 207 if (outlen < sizeof(outbuf)) 208 return -EIO; 209 210 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); 211 return 0; 212 } 213 214 #ifdef CONFIG_SFC_SRIOV 215 static int efx_ef10_get_vf_index(struct efx_nic *efx) 216 { 217 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 218 struct efx_ef10_nic_data *nic_data = efx->nic_data; 219 size_t outlen; 220 int rc; 221 222 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 223 sizeof(outbuf), &outlen); 224 if (rc) 225 return rc; 226 if (outlen < sizeof(outbuf)) 227 return -EIO; 228 229 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); 230 return 0; 231 } 232 #endif 233 234 static int efx_ef10_init_datapath_caps(struct efx_nic *efx) 235 { 236 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); 237 struct efx_ef10_nic_data *nic_data = efx->nic_data; 238 size_t outlen; 239 int rc; 240 241 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 242 243 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 244 outbuf, sizeof(outbuf), &outlen); 245 if (rc) 246 return rc; 247 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 248 netif_err(efx, drv, efx->net_dev, 249 "unable to read datapath firmware capabilities\n"); 250 return -EIO; 251 } 252 253 nic_data->datapath_caps = 254 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 255 256 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { 257 nic_data->datapath_caps2 = MCDI_DWORD(outbuf, 258 GET_CAPABILITIES_V2_OUT_FLAGS2); 259 nic_data->piobuf_size = MCDI_WORD(outbuf, 260 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); 261 } else { 262 nic_data->datapath_caps2 = 0; 263 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; 264 } 265 266 /* record the DPCPU firmware IDs to determine VEB vswitching support. 267 */ 268 nic_data->rx_dpcpu_fw_id = 269 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 270 nic_data->tx_dpcpu_fw_id = 271 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 272 273 if (!(nic_data->datapath_caps & 274 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 275 netif_err(efx, probe, efx->net_dev, 276 "current firmware does not support an RX prefix\n"); 277 return -ENODEV; 278 } 279 280 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { 281 u8 vi_window_mode = MCDI_BYTE(outbuf, 282 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); 283 284 switch (vi_window_mode) { 285 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: 286 efx->vi_stride = 8192; 287 break; 288 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: 289 efx->vi_stride = 16384; 290 break; 291 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: 292 efx->vi_stride = 65536; 293 break; 294 default: 295 netif_err(efx, probe, efx->net_dev, 296 "Unrecognised VI window mode %d\n", 297 vi_window_mode); 298 return -EIO; 299 } 300 netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n", 301 efx->vi_stride); 302 } else { 303 /* keep default VI stride */ 304 netif_dbg(efx, probe, efx->net_dev, 305 "firmware did not report VI window mode, assuming vi_stride = %u\n", 306 efx->vi_stride); 307 } 308 309 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { 310 efx->num_mac_stats = MCDI_WORD(outbuf, 311 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); 312 netif_dbg(efx, probe, efx->net_dev, 313 "firmware reports num_mac_stats = %u\n", 314 efx->num_mac_stats); 315 } else { 316 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ 317 netif_dbg(efx, probe, efx->net_dev, 318 "firmware did not report num_mac_stats, assuming %u\n", 319 efx->num_mac_stats); 320 } 321 322 return 0; 323 } 324 325 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) 326 { 327 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); 328 int rc; 329 330 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, 331 outbuf, sizeof(outbuf), NULL); 332 if (rc) 333 return rc; 334 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); 335 return rc > 0 ? rc : -ERANGE; 336 } 337 338 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) 339 { 340 struct efx_ef10_nic_data *nic_data = efx->nic_data; 341 unsigned int implemented; 342 unsigned int enabled; 343 int rc; 344 345 nic_data->workaround_35388 = false; 346 nic_data->workaround_61265 = false; 347 348 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 349 350 if (rc == -ENOSYS) { 351 /* Firmware without GET_WORKAROUNDS - not a problem. */ 352 rc = 0; 353 } else if (rc == 0) { 354 /* Bug61265 workaround is always enabled if implemented. */ 355 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) 356 nic_data->workaround_61265 = true; 357 358 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 359 nic_data->workaround_35388 = true; 360 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 361 /* Workaround is implemented but not enabled. 362 * Try to enable it. 363 */ 364 rc = efx_mcdi_set_workaround(efx, 365 MC_CMD_WORKAROUND_BUG35388, 366 true, NULL); 367 if (rc == 0) 368 nic_data->workaround_35388 = true; 369 /* If we failed to set the workaround just carry on. */ 370 rc = 0; 371 } 372 } 373 374 netif_dbg(efx, probe, efx->net_dev, 375 "workaround for bug 35388 is %sabled\n", 376 nic_data->workaround_35388 ? "en" : "dis"); 377 netif_dbg(efx, probe, efx->net_dev, 378 "workaround for bug 61265 is %sabled\n", 379 nic_data->workaround_61265 ? "en" : "dis"); 380 381 return rc; 382 } 383 384 static void efx_ef10_process_timer_config(struct efx_nic *efx, 385 const efx_dword_t *data) 386 { 387 unsigned int max_count; 388 389 if (EFX_EF10_WORKAROUND_61265(efx)) { 390 efx->timer_quantum_ns = MCDI_DWORD(data, 391 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); 392 efx->timer_max_ns = MCDI_DWORD(data, 393 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); 394 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 395 efx->timer_quantum_ns = MCDI_DWORD(data, 396 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); 397 max_count = MCDI_DWORD(data, 398 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); 399 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 400 } else { 401 efx->timer_quantum_ns = MCDI_DWORD(data, 402 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); 403 max_count = MCDI_DWORD(data, 404 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); 405 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 406 } 407 408 netif_dbg(efx, probe, efx->net_dev, 409 "got timer properties from MC: quantum %u ns; max %u ns\n", 410 efx->timer_quantum_ns, efx->timer_max_ns); 411 } 412 413 static int efx_ef10_get_timer_config(struct efx_nic *efx) 414 { 415 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); 416 int rc; 417 418 rc = efx_ef10_get_timer_workarounds(efx); 419 if (rc) 420 return rc; 421 422 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, 423 outbuf, sizeof(outbuf), NULL); 424 425 if (rc == 0) { 426 efx_ef10_process_timer_config(efx, outbuf); 427 } else if (rc == -ENOSYS || rc == -EPERM) { 428 /* Not available - fall back to Huntington defaults. */ 429 unsigned int quantum; 430 431 rc = efx_ef10_get_sysclk_freq(efx); 432 if (rc < 0) 433 return rc; 434 435 quantum = 1536000 / rc; /* 1536 cycles */ 436 efx->timer_quantum_ns = quantum; 437 efx->timer_max_ns = efx->type->timer_period_max * quantum; 438 rc = 0; 439 } else { 440 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, 441 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, 442 NULL, 0, rc); 443 } 444 445 return rc; 446 } 447 448 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) 449 { 450 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 451 size_t outlen; 452 int rc; 453 454 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); 455 456 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, 457 outbuf, sizeof(outbuf), &outlen); 458 if (rc) 459 return rc; 460 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 461 return -EIO; 462 463 ether_addr_copy(mac_address, 464 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); 465 return 0; 466 } 467 468 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) 469 { 470 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); 471 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 472 size_t outlen; 473 int num_addrs, rc; 474 475 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 476 EVB_PORT_ID_ASSIGNED); 477 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, 478 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 479 480 if (rc) 481 return rc; 482 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) 483 return -EIO; 484 485 num_addrs = MCDI_DWORD(outbuf, 486 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); 487 488 WARN_ON(num_addrs != 1); 489 490 ether_addr_copy(mac_address, 491 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); 492 493 return 0; 494 } 495 496 static ssize_t efx_ef10_show_link_control_flag(struct device *dev, 497 struct device_attribute *attr, 498 char *buf) 499 { 500 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 501 502 return sprintf(buf, "%d\n", 503 ((efx->mcdi->fn_flags) & 504 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 505 ? 1 : 0); 506 } 507 508 static ssize_t efx_ef10_show_primary_flag(struct device *dev, 509 struct device_attribute *attr, 510 char *buf) 511 { 512 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 513 514 return sprintf(buf, "%d\n", 515 ((efx->mcdi->fn_flags) & 516 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) 517 ? 1 : 0); 518 } 519 520 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) 521 { 522 struct efx_ef10_nic_data *nic_data = efx->nic_data; 523 struct efx_ef10_vlan *vlan; 524 525 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 526 527 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 528 if (vlan->vid == vid) 529 return vlan; 530 } 531 532 return NULL; 533 } 534 535 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) 536 { 537 struct efx_ef10_nic_data *nic_data = efx->nic_data; 538 struct efx_ef10_vlan *vlan; 539 int rc; 540 541 mutex_lock(&nic_data->vlan_lock); 542 543 vlan = efx_ef10_find_vlan(efx, vid); 544 if (vlan) { 545 /* We add VID 0 on init. 8021q adds it on module init 546 * for all interfaces with VLAN filtring feature. 547 */ 548 if (vid == 0) 549 goto done_unlock; 550 netif_warn(efx, drv, efx->net_dev, 551 "VLAN %u already added\n", vid); 552 rc = -EALREADY; 553 goto fail_exist; 554 } 555 556 rc = -ENOMEM; 557 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 558 if (!vlan) 559 goto fail_alloc; 560 561 vlan->vid = vid; 562 563 list_add_tail(&vlan->list, &nic_data->vlan_list); 564 565 if (efx->filter_state) { 566 mutex_lock(&efx->mac_lock); 567 down_write(&efx->filter_sem); 568 rc = efx_ef10_filter_add_vlan(efx, vlan->vid); 569 up_write(&efx->filter_sem); 570 mutex_unlock(&efx->mac_lock); 571 if (rc) 572 goto fail_filter_add_vlan; 573 } 574 575 done_unlock: 576 mutex_unlock(&nic_data->vlan_lock); 577 return 0; 578 579 fail_filter_add_vlan: 580 list_del(&vlan->list); 581 kfree(vlan); 582 fail_alloc: 583 fail_exist: 584 mutex_unlock(&nic_data->vlan_lock); 585 return rc; 586 } 587 588 static void efx_ef10_del_vlan_internal(struct efx_nic *efx, 589 struct efx_ef10_vlan *vlan) 590 { 591 struct efx_ef10_nic_data *nic_data = efx->nic_data; 592 593 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 594 595 if (efx->filter_state) { 596 down_write(&efx->filter_sem); 597 efx_ef10_filter_del_vlan(efx, vlan->vid); 598 up_write(&efx->filter_sem); 599 } 600 601 list_del(&vlan->list); 602 kfree(vlan); 603 } 604 605 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) 606 { 607 struct efx_ef10_nic_data *nic_data = efx->nic_data; 608 struct efx_ef10_vlan *vlan; 609 int rc = 0; 610 611 /* 8021q removes VID 0 on module unload for all interfaces 612 * with VLAN filtering feature. We need to keep it to receive 613 * untagged traffic. 614 */ 615 if (vid == 0) 616 return 0; 617 618 mutex_lock(&nic_data->vlan_lock); 619 620 vlan = efx_ef10_find_vlan(efx, vid); 621 if (!vlan) { 622 netif_err(efx, drv, efx->net_dev, 623 "VLAN %u to be deleted not found\n", vid); 624 rc = -ENOENT; 625 } else { 626 efx_ef10_del_vlan_internal(efx, vlan); 627 } 628 629 mutex_unlock(&nic_data->vlan_lock); 630 631 return rc; 632 } 633 634 static void efx_ef10_cleanup_vlans(struct efx_nic *efx) 635 { 636 struct efx_ef10_nic_data *nic_data = efx->nic_data; 637 struct efx_ef10_vlan *vlan, *next_vlan; 638 639 mutex_lock(&nic_data->vlan_lock); 640 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) 641 efx_ef10_del_vlan_internal(efx, vlan); 642 mutex_unlock(&nic_data->vlan_lock); 643 } 644 645 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, 646 NULL); 647 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); 648 649 static int efx_ef10_probe(struct efx_nic *efx) 650 { 651 struct efx_ef10_nic_data *nic_data; 652 int i, rc; 653 654 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 655 if (!nic_data) 656 return -ENOMEM; 657 efx->nic_data = nic_data; 658 659 /* we assume later that we can copy from this buffer in dwords */ 660 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); 661 662 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 663 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); 664 if (rc) 665 goto fail1; 666 667 /* Get the MC's warm boot count. In case it's rebooting right 668 * now, be prepared to retry. 669 */ 670 i = 0; 671 for (;;) { 672 rc = efx_ef10_get_warm_boot_count(efx); 673 if (rc >= 0) 674 break; 675 if (++i == 5) 676 goto fail2; 677 ssleep(1); 678 } 679 nic_data->warm_boot_count = rc; 680 681 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 682 683 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 684 685 /* In case we're recovering from a crash (kexec), we want to 686 * cancel any outstanding request by the previous user of this 687 * function. We send a special message using the least 688 * significant bits of the 'high' (doorbell) register. 689 */ 690 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); 691 692 rc = efx_mcdi_init(efx); 693 if (rc) 694 goto fail2; 695 696 mutex_init(&nic_data->udp_tunnels_lock); 697 698 /* Reset (most) configuration for this function */ 699 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); 700 if (rc) 701 goto fail3; 702 703 /* Enable event logging */ 704 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 705 if (rc) 706 goto fail3; 707 708 rc = device_create_file(&efx->pci_dev->dev, 709 &dev_attr_link_control_flag); 710 if (rc) 711 goto fail3; 712 713 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 714 if (rc) 715 goto fail4; 716 717 rc = efx_ef10_get_pf_index(efx); 718 if (rc) 719 goto fail5; 720 721 rc = efx_ef10_init_datapath_caps(efx); 722 if (rc < 0) 723 goto fail5; 724 725 /* We can have one VI for each vi_stride-byte region. 726 * However, until we use TX option descriptors we need two TX queues 727 * per channel. 728 */ 729 efx->max_channels = min_t(unsigned int, 730 EFX_MAX_CHANNELS, 731 efx_ef10_mem_map_size(efx) / 732 (efx->vi_stride * EFX_TXQ_TYPES)); 733 efx->max_tx_channels = efx->max_channels; 734 if (WARN_ON(efx->max_channels == 0)) { 735 rc = -EIO; 736 goto fail5; 737 } 738 739 efx->rx_packet_len_offset = 740 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 741 742 if (nic_data->datapath_caps & 743 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN)) 744 efx->net_dev->hw_features |= NETIF_F_RXFCS; 745 746 rc = efx_mcdi_port_get_number(efx); 747 if (rc < 0) 748 goto fail5; 749 efx->port_num = rc; 750 751 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); 752 if (rc) 753 goto fail5; 754 755 rc = efx_ef10_get_timer_config(efx); 756 if (rc < 0) 757 goto fail5; 758 759 rc = efx_mcdi_mon_probe(efx); 760 if (rc && rc != -EPERM) 761 goto fail5; 762 763 rc = efx_ptp_probe(efx, NULL); 764 /* Failure to probe PTP is not fatal. 765 * In the case of EPERM, efx_ptp_probe will print its own message (in 766 * efx_ptp_get_attributes()), so we don't need to. 767 */ 768 if (rc && rc != -EPERM) 769 netif_warn(efx, drv, efx->net_dev, 770 "Failed to probe PTP, rc=%d\n", rc); 771 772 #ifdef CONFIG_SFC_SRIOV 773 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { 774 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 775 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 776 777 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); 778 } else 779 #endif 780 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); 781 782 INIT_LIST_HEAD(&nic_data->vlan_list); 783 mutex_init(&nic_data->vlan_lock); 784 785 /* Add unspecified VID to support VLAN filtering being disabled */ 786 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); 787 if (rc) 788 goto fail_add_vid_unspec; 789 790 /* If VLAN filtering is enabled, we need VID 0 to get untagged 791 * traffic. It is added automatically if 8021q module is loaded, 792 * but we can't rely on it since module may be not loaded. 793 */ 794 rc = efx_ef10_add_vlan(efx, 0); 795 if (rc) 796 goto fail_add_vid_0; 797 798 return 0; 799 800 fail_add_vid_0: 801 efx_ef10_cleanup_vlans(efx); 802 fail_add_vid_unspec: 803 mutex_destroy(&nic_data->vlan_lock); 804 efx_ptp_remove(efx); 805 efx_mcdi_mon_remove(efx); 806 fail5: 807 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 808 fail4: 809 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 810 fail3: 811 efx_mcdi_detach(efx); 812 813 mutex_lock(&nic_data->udp_tunnels_lock); 814 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 815 (void)efx_ef10_set_udp_tnl_ports(efx, true); 816 mutex_unlock(&nic_data->udp_tunnels_lock); 817 mutex_destroy(&nic_data->udp_tunnels_lock); 818 819 efx_mcdi_fini(efx); 820 fail2: 821 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 822 fail1: 823 kfree(nic_data); 824 efx->nic_data = NULL; 825 return rc; 826 } 827 828 static int efx_ef10_free_vis(struct efx_nic *efx) 829 { 830 MCDI_DECLARE_BUF_ERR(outbuf); 831 size_t outlen; 832 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, 833 outbuf, sizeof(outbuf), &outlen); 834 835 /* -EALREADY means nothing to free, so ignore */ 836 if (rc == -EALREADY) 837 rc = 0; 838 if (rc) 839 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, 840 rc); 841 return rc; 842 } 843 844 #ifdef EFX_USE_PIO 845 846 static void efx_ef10_free_piobufs(struct efx_nic *efx) 847 { 848 struct efx_ef10_nic_data *nic_data = efx->nic_data; 849 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); 850 unsigned int i; 851 int rc; 852 853 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); 854 855 for (i = 0; i < nic_data->n_piobufs; i++) { 856 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, 857 nic_data->piobuf_handle[i]); 858 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), 859 NULL, 0, NULL); 860 WARN_ON(rc); 861 } 862 863 nic_data->n_piobufs = 0; 864 } 865 866 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 867 { 868 struct efx_ef10_nic_data *nic_data = efx->nic_data; 869 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); 870 unsigned int i; 871 size_t outlen; 872 int rc = 0; 873 874 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); 875 876 for (i = 0; i < n; i++) { 877 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, 878 outbuf, sizeof(outbuf), &outlen); 879 if (rc) { 880 /* Don't display the MC error if we didn't have space 881 * for a VF. 882 */ 883 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) 884 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, 885 0, outbuf, outlen, rc); 886 break; 887 } 888 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 889 rc = -EIO; 890 break; 891 } 892 nic_data->piobuf_handle[i] = 893 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 894 netif_dbg(efx, probe, efx->net_dev, 895 "allocated PIO buffer %u handle %x\n", i, 896 nic_data->piobuf_handle[i]); 897 } 898 899 nic_data->n_piobufs = i; 900 if (rc) 901 efx_ef10_free_piobufs(efx); 902 return rc; 903 } 904 905 static int efx_ef10_link_piobufs(struct efx_nic *efx) 906 { 907 struct efx_ef10_nic_data *nic_data = efx->nic_data; 908 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); 909 struct efx_channel *channel; 910 struct efx_tx_queue *tx_queue; 911 unsigned int offset, index; 912 int rc; 913 914 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 915 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 916 917 /* Link a buffer to each VI in the write-combining mapping */ 918 for (index = 0; index < nic_data->n_piobufs; ++index) { 919 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, 920 nic_data->piobuf_handle[index]); 921 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, 922 nic_data->pio_write_vi_base + index); 923 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 924 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 925 NULL, 0, NULL); 926 if (rc) { 927 netif_err(efx, drv, efx->net_dev, 928 "failed to link VI %u to PIO buffer %u (%d)\n", 929 nic_data->pio_write_vi_base + index, index, 930 rc); 931 goto fail; 932 } 933 netif_dbg(efx, probe, efx->net_dev, 934 "linked VI %u to PIO buffer %u\n", 935 nic_data->pio_write_vi_base + index, index); 936 } 937 938 /* Link a buffer to each TX queue */ 939 efx_for_each_channel(channel, efx) { 940 efx_for_each_channel_tx_queue(tx_queue, channel) { 941 /* We assign the PIO buffers to queues in 942 * reverse order to allow for the following 943 * special case. 944 */ 945 offset = ((efx->tx_channel_offset + efx->n_tx_channels - 946 tx_queue->channel->channel - 1) * 947 efx_piobuf_size); 948 index = offset / nic_data->piobuf_size; 949 offset = offset % nic_data->piobuf_size; 950 951 /* When the host page size is 4K, the first 952 * host page in the WC mapping may be within 953 * the same VI page as the last TX queue. We 954 * can only link one buffer to each VI. 955 */ 956 if (tx_queue->queue == nic_data->pio_write_vi_base) { 957 BUG_ON(index != 0); 958 rc = 0; 959 } else { 960 MCDI_SET_DWORD(inbuf, 961 LINK_PIOBUF_IN_PIOBUF_HANDLE, 962 nic_data->piobuf_handle[index]); 963 MCDI_SET_DWORD(inbuf, 964 LINK_PIOBUF_IN_TXQ_INSTANCE, 965 tx_queue->queue); 966 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 967 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 968 NULL, 0, NULL); 969 } 970 971 if (rc) { 972 /* This is non-fatal; the TX path just 973 * won't use PIO for this queue 974 */ 975 netif_err(efx, drv, efx->net_dev, 976 "failed to link VI %u to PIO buffer %u (%d)\n", 977 tx_queue->queue, index, rc); 978 tx_queue->piobuf = NULL; 979 } else { 980 tx_queue->piobuf = 981 nic_data->pio_write_base + 982 index * efx->vi_stride + offset; 983 tx_queue->piobuf_offset = offset; 984 netif_dbg(efx, probe, efx->net_dev, 985 "linked VI %u to PIO buffer %u offset %x addr %p\n", 986 tx_queue->queue, index, 987 tx_queue->piobuf_offset, 988 tx_queue->piobuf); 989 } 990 } 991 } 992 993 return 0; 994 995 fail: 996 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same 997 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. 998 */ 999 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); 1000 while (index--) { 1001 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 1002 nic_data->pio_write_vi_base + index); 1003 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, 1004 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, 1005 NULL, 0, NULL); 1006 } 1007 return rc; 1008 } 1009 1010 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 1011 { 1012 struct efx_channel *channel; 1013 struct efx_tx_queue *tx_queue; 1014 1015 /* All our existing PIO buffers went away */ 1016 efx_for_each_channel(channel, efx) 1017 efx_for_each_channel_tx_queue(tx_queue, channel) 1018 tx_queue->piobuf = NULL; 1019 } 1020 1021 #else /* !EFX_USE_PIO */ 1022 1023 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 1024 { 1025 return n == 0 ? 0 : -ENOBUFS; 1026 } 1027 1028 static int efx_ef10_link_piobufs(struct efx_nic *efx) 1029 { 1030 return 0; 1031 } 1032 1033 static void efx_ef10_free_piobufs(struct efx_nic *efx) 1034 { 1035 } 1036 1037 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 1038 { 1039 } 1040 1041 #endif /* EFX_USE_PIO */ 1042 1043 static void efx_ef10_remove(struct efx_nic *efx) 1044 { 1045 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1046 int rc; 1047 1048 #ifdef CONFIG_SFC_SRIOV 1049 struct efx_ef10_nic_data *nic_data_pf; 1050 struct pci_dev *pci_dev_pf; 1051 struct efx_nic *efx_pf; 1052 struct ef10_vf *vf; 1053 1054 if (efx->pci_dev->is_virtfn) { 1055 pci_dev_pf = efx->pci_dev->physfn; 1056 if (pci_dev_pf) { 1057 efx_pf = pci_get_drvdata(pci_dev_pf); 1058 nic_data_pf = efx_pf->nic_data; 1059 vf = nic_data_pf->vf + nic_data->vf_index; 1060 vf->efx = NULL; 1061 } else 1062 netif_info(efx, drv, efx->net_dev, 1063 "Could not get the PF id from VF\n"); 1064 } 1065 #endif 1066 1067 efx_ef10_cleanup_vlans(efx); 1068 mutex_destroy(&nic_data->vlan_lock); 1069 1070 efx_ptp_remove(efx); 1071 1072 efx_mcdi_mon_remove(efx); 1073 1074 efx_ef10_rx_free_indir_table(efx); 1075 1076 if (nic_data->wc_membase) 1077 iounmap(nic_data->wc_membase); 1078 1079 rc = efx_ef10_free_vis(efx); 1080 WARN_ON(rc != 0); 1081 1082 if (!nic_data->must_restore_piobufs) 1083 efx_ef10_free_piobufs(efx); 1084 1085 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 1086 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 1087 1088 efx_mcdi_detach(efx); 1089 1090 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 1091 mutex_lock(&nic_data->udp_tunnels_lock); 1092 (void)efx_ef10_set_udp_tnl_ports(efx, true); 1093 mutex_unlock(&nic_data->udp_tunnels_lock); 1094 1095 mutex_destroy(&nic_data->udp_tunnels_lock); 1096 1097 efx_mcdi_fini(efx); 1098 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 1099 kfree(nic_data); 1100 } 1101 1102 static int efx_ef10_probe_pf(struct efx_nic *efx) 1103 { 1104 return efx_ef10_probe(efx); 1105 } 1106 1107 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, 1108 u32 *port_flags, u32 *vadaptor_flags, 1109 unsigned int *vlan_tags) 1110 { 1111 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1112 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); 1113 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); 1114 size_t outlen; 1115 int rc; 1116 1117 if (nic_data->datapath_caps & 1118 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { 1119 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, 1120 port_id); 1121 1122 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), 1123 outbuf, sizeof(outbuf), &outlen); 1124 if (rc) 1125 return rc; 1126 1127 if (outlen < sizeof(outbuf)) { 1128 rc = -EIO; 1129 return rc; 1130 } 1131 } 1132 1133 if (port_flags) 1134 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); 1135 if (vadaptor_flags) 1136 *vadaptor_flags = 1137 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); 1138 if (vlan_tags) 1139 *vlan_tags = 1140 MCDI_DWORD(outbuf, 1141 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); 1142 1143 return 0; 1144 } 1145 1146 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 1147 { 1148 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 1149 1150 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 1151 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), 1152 NULL, 0, NULL); 1153 } 1154 1155 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) 1156 { 1157 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); 1158 1159 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 1160 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), 1161 NULL, 0, NULL); 1162 } 1163 1164 int efx_ef10_vport_add_mac(struct efx_nic *efx, 1165 unsigned int port_id, u8 *mac) 1166 { 1167 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); 1168 1169 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); 1170 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); 1171 1172 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, 1173 sizeof(inbuf), NULL, 0, NULL); 1174 } 1175 1176 int efx_ef10_vport_del_mac(struct efx_nic *efx, 1177 unsigned int port_id, u8 *mac) 1178 { 1179 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); 1180 1181 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); 1182 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); 1183 1184 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, 1185 sizeof(inbuf), NULL, 0, NULL); 1186 } 1187 1188 #ifdef CONFIG_SFC_SRIOV 1189 static int efx_ef10_probe_vf(struct efx_nic *efx) 1190 { 1191 int rc; 1192 struct pci_dev *pci_dev_pf; 1193 1194 /* If the parent PF has no VF data structure, it doesn't know about this 1195 * VF so fail probe. The VF needs to be re-created. This can happen 1196 * if the PF driver is unloaded while the VF is assigned to a guest. 1197 */ 1198 pci_dev_pf = efx->pci_dev->physfn; 1199 if (pci_dev_pf) { 1200 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 1201 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; 1202 1203 if (!nic_data_pf->vf) { 1204 netif_info(efx, drv, efx->net_dev, 1205 "The VF cannot link to its parent PF; " 1206 "please destroy and re-create the VF\n"); 1207 return -EBUSY; 1208 } 1209 } 1210 1211 rc = efx_ef10_probe(efx); 1212 if (rc) 1213 return rc; 1214 1215 rc = efx_ef10_get_vf_index(efx); 1216 if (rc) 1217 goto fail; 1218 1219 if (efx->pci_dev->is_virtfn) { 1220 if (efx->pci_dev->physfn) { 1221 struct efx_nic *efx_pf = 1222 pci_get_drvdata(efx->pci_dev->physfn); 1223 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; 1224 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1225 1226 nic_data_p->vf[nic_data->vf_index].efx = efx; 1227 nic_data_p->vf[nic_data->vf_index].pci_dev = 1228 efx->pci_dev; 1229 } else 1230 netif_info(efx, drv, efx->net_dev, 1231 "Could not get the PF id from VF\n"); 1232 } 1233 1234 return 0; 1235 1236 fail: 1237 efx_ef10_remove(efx); 1238 return rc; 1239 } 1240 #else 1241 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) 1242 { 1243 return 0; 1244 } 1245 #endif 1246 1247 static int efx_ef10_alloc_vis(struct efx_nic *efx, 1248 unsigned int min_vis, unsigned int max_vis) 1249 { 1250 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); 1251 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); 1252 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1253 size_t outlen; 1254 int rc; 1255 1256 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); 1257 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); 1258 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), 1259 outbuf, sizeof(outbuf), &outlen); 1260 if (rc != 0) 1261 return rc; 1262 1263 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) 1264 return -EIO; 1265 1266 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", 1267 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); 1268 1269 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); 1270 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); 1271 return 0; 1272 } 1273 1274 /* Note that the failure path of this function does not free 1275 * resources, as this will be done by efx_ef10_remove(). 1276 */ 1277 static int efx_ef10_dimension_resources(struct efx_nic *efx) 1278 { 1279 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1280 unsigned int uc_mem_map_size, wc_mem_map_size; 1281 unsigned int min_vis = max(EFX_TXQ_TYPES, 1282 efx_separate_tx_channels ? 2 : 1); 1283 unsigned int channel_vis, pio_write_vi_base, max_vis; 1284 void __iomem *membase; 1285 int rc; 1286 1287 channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1288 1289 #ifdef EFX_USE_PIO 1290 /* Try to allocate PIO buffers if wanted and if the full 1291 * number of PIO buffers would be sufficient to allocate one 1292 * copy-buffer per TX channel. Failure is non-fatal, as there 1293 * are only a small number of PIO buffers shared between all 1294 * functions of the controller. 1295 */ 1296 if (efx_piobuf_size != 0 && 1297 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= 1298 efx->n_tx_channels) { 1299 unsigned int n_piobufs = 1300 DIV_ROUND_UP(efx->n_tx_channels, 1301 nic_data->piobuf_size / efx_piobuf_size); 1302 1303 rc = efx_ef10_alloc_piobufs(efx, n_piobufs); 1304 if (rc == -ENOSPC) 1305 netif_dbg(efx, probe, efx->net_dev, 1306 "out of PIO buffers; cannot allocate more\n"); 1307 else if (rc == -EPERM) 1308 netif_dbg(efx, probe, efx->net_dev, 1309 "not permitted to allocate PIO buffers\n"); 1310 else if (rc) 1311 netif_err(efx, probe, efx->net_dev, 1312 "failed to allocate PIO buffers (%d)\n", rc); 1313 else 1314 netif_dbg(efx, probe, efx->net_dev, 1315 "allocated %u PIO buffers\n", n_piobufs); 1316 } 1317 #else 1318 nic_data->n_piobufs = 0; 1319 #endif 1320 1321 /* PIO buffers should be mapped with write-combining enabled, 1322 * and we want to make single UC and WC mappings rather than 1323 * several of each (in fact that's the only option if host 1324 * page size is >4K). So we may allocate some extra VIs just 1325 * for writing PIO buffers through. 1326 * 1327 * The UC mapping contains (channel_vis - 1) complete VIs and the 1328 * first 4K of the next VI. Then the WC mapping begins with 1329 * the remainder of this last VI. 1330 */ 1331 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + 1332 ER_DZ_TX_PIOBUF); 1333 if (nic_data->n_piobufs) { 1334 /* pio_write_vi_base rounds down to give the number of complete 1335 * VIs inside the UC mapping. 1336 */ 1337 pio_write_vi_base = uc_mem_map_size / efx->vi_stride; 1338 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + 1339 nic_data->n_piobufs) * 1340 efx->vi_stride) - 1341 uc_mem_map_size); 1342 max_vis = pio_write_vi_base + nic_data->n_piobufs; 1343 } else { 1344 pio_write_vi_base = 0; 1345 wc_mem_map_size = 0; 1346 max_vis = channel_vis; 1347 } 1348 1349 /* In case the last attached driver failed to free VIs, do it now */ 1350 rc = efx_ef10_free_vis(efx); 1351 if (rc != 0) 1352 return rc; 1353 1354 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); 1355 if (rc != 0) 1356 return rc; 1357 1358 if (nic_data->n_allocated_vis < channel_vis) { 1359 netif_info(efx, drv, efx->net_dev, 1360 "Could not allocate enough VIs to satisfy RSS" 1361 " requirements. Performance may not be optimal.\n"); 1362 /* We didn't get the VIs to populate our channels. 1363 * We could keep what we got but then we'd have more 1364 * interrupts than we need. 1365 * Instead calculate new max_channels and restart 1366 */ 1367 efx->max_channels = nic_data->n_allocated_vis; 1368 efx->max_tx_channels = 1369 nic_data->n_allocated_vis / EFX_TXQ_TYPES; 1370 1371 efx_ef10_free_vis(efx); 1372 return -EAGAIN; 1373 } 1374 1375 /* If we didn't get enough VIs to map all the PIO buffers, free the 1376 * PIO buffers 1377 */ 1378 if (nic_data->n_piobufs && 1379 nic_data->n_allocated_vis < 1380 pio_write_vi_base + nic_data->n_piobufs) { 1381 netif_dbg(efx, probe, efx->net_dev, 1382 "%u VIs are not sufficient to map %u PIO buffers\n", 1383 nic_data->n_allocated_vis, nic_data->n_piobufs); 1384 efx_ef10_free_piobufs(efx); 1385 } 1386 1387 /* Shrink the original UC mapping of the memory BAR */ 1388 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); 1389 if (!membase) { 1390 netif_err(efx, probe, efx->net_dev, 1391 "could not shrink memory BAR to %x\n", 1392 uc_mem_map_size); 1393 return -ENOMEM; 1394 } 1395 iounmap(efx->membase); 1396 efx->membase = membase; 1397 1398 /* Set up the WC mapping if needed */ 1399 if (wc_mem_map_size) { 1400 nic_data->wc_membase = ioremap_wc(efx->membase_phys + 1401 uc_mem_map_size, 1402 wc_mem_map_size); 1403 if (!nic_data->wc_membase) { 1404 netif_err(efx, probe, efx->net_dev, 1405 "could not allocate WC mapping of size %x\n", 1406 wc_mem_map_size); 1407 return -ENOMEM; 1408 } 1409 nic_data->pio_write_vi_base = pio_write_vi_base; 1410 nic_data->pio_write_base = 1411 nic_data->wc_membase + 1412 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - 1413 uc_mem_map_size); 1414 1415 rc = efx_ef10_link_piobufs(efx); 1416 if (rc) 1417 efx_ef10_free_piobufs(efx); 1418 } 1419 1420 netif_dbg(efx, probe, efx->net_dev, 1421 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", 1422 &efx->membase_phys, efx->membase, uc_mem_map_size, 1423 nic_data->wc_membase, wc_mem_map_size); 1424 1425 return 0; 1426 } 1427 1428 static int efx_ef10_init_nic(struct efx_nic *efx) 1429 { 1430 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1431 int rc; 1432 1433 if (nic_data->must_check_datapath_caps) { 1434 rc = efx_ef10_init_datapath_caps(efx); 1435 if (rc) 1436 return rc; 1437 nic_data->must_check_datapath_caps = false; 1438 } 1439 1440 if (nic_data->must_realloc_vis) { 1441 /* We cannot let the number of VIs change now */ 1442 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 1443 nic_data->n_allocated_vis); 1444 if (rc) 1445 return rc; 1446 nic_data->must_realloc_vis = false; 1447 } 1448 1449 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { 1450 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); 1451 if (rc == 0) { 1452 rc = efx_ef10_link_piobufs(efx); 1453 if (rc) 1454 efx_ef10_free_piobufs(efx); 1455 } 1456 1457 /* Log an error on failure, but this is non-fatal. 1458 * Permission errors are less important - we've presumably 1459 * had the PIO buffer licence removed. 1460 */ 1461 if (rc == -EPERM) 1462 netif_dbg(efx, drv, efx->net_dev, 1463 "not permitted to restore PIO buffers\n"); 1464 else if (rc) 1465 netif_err(efx, drv, efx->net_dev, 1466 "failed to restore PIO buffers (%d)\n", rc); 1467 nic_data->must_restore_piobufs = false; 1468 } 1469 1470 /* don't fail init if RSS setup doesn't work */ 1471 rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); 1472 efx->rss_active = (rc == 0); 1473 1474 return 0; 1475 } 1476 1477 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) 1478 { 1479 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1480 #ifdef CONFIG_SFC_SRIOV 1481 unsigned int i; 1482 #endif 1483 1484 /* All our allocations have been reset */ 1485 nic_data->must_realloc_vis = true; 1486 nic_data->must_restore_filters = true; 1487 nic_data->must_restore_piobufs = true; 1488 efx_ef10_forget_old_piobufs(efx); 1489 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 1490 1491 /* Driver-created vswitches and vports must be re-created */ 1492 nic_data->must_probe_vswitching = true; 1493 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 1494 #ifdef CONFIG_SFC_SRIOV 1495 if (nic_data->vf) 1496 for (i = 0; i < efx->vf_count; i++) 1497 nic_data->vf[i].vport_id = 0; 1498 #endif 1499 } 1500 1501 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) 1502 { 1503 if (reason == RESET_TYPE_MC_FAILURE) 1504 return RESET_TYPE_DATAPATH; 1505 1506 return efx_mcdi_map_reset_reason(reason); 1507 } 1508 1509 static int efx_ef10_map_reset_flags(u32 *flags) 1510 { 1511 enum { 1512 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << 1513 ETH_RESET_SHARED_SHIFT), 1514 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | 1515 ETH_RESET_OFFLOAD | ETH_RESET_MAC | 1516 ETH_RESET_PHY | ETH_RESET_MGMT) << 1517 ETH_RESET_SHARED_SHIFT) 1518 }; 1519 1520 /* We assume for now that our PCI function is permitted to 1521 * reset everything. 1522 */ 1523 1524 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { 1525 *flags &= ~EF10_RESET_MC; 1526 return RESET_TYPE_WORLD; 1527 } 1528 1529 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { 1530 *flags &= ~EF10_RESET_PORT; 1531 return RESET_TYPE_ALL; 1532 } 1533 1534 /* no invisible reset implemented */ 1535 1536 return -EINVAL; 1537 } 1538 1539 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) 1540 { 1541 int rc = efx_mcdi_reset(efx, reset_type); 1542 1543 /* Unprivileged functions return -EPERM, but need to return success 1544 * here so that the datapath is brought back up. 1545 */ 1546 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) 1547 rc = 0; 1548 1549 /* If it was a port reset, trigger reallocation of MC resources. 1550 * Note that on an MC reset nothing needs to be done now because we'll 1551 * detect the MC reset later and handle it then. 1552 * For an FLR, we never get an MC reset event, but the MC has reset all 1553 * resources assigned to us, so we have to trigger reallocation now. 1554 */ 1555 if ((reset_type == RESET_TYPE_ALL || 1556 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) 1557 efx_ef10_reset_mc_allocations(efx); 1558 return rc; 1559 } 1560 1561 #define EF10_DMA_STAT(ext_name, mcdi_name) \ 1562 [EF10_STAT_ ## ext_name] = \ 1563 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1564 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ 1565 [EF10_STAT_ ## int_name] = \ 1566 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1567 #define EF10_OTHER_STAT(ext_name) \ 1568 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1569 #define GENERIC_SW_STAT(ext_name) \ 1570 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1571 1572 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { 1573 EF10_DMA_STAT(port_tx_bytes, TX_BYTES), 1574 EF10_DMA_STAT(port_tx_packets, TX_PKTS), 1575 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), 1576 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), 1577 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), 1578 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), 1579 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), 1580 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), 1581 EF10_DMA_STAT(port_tx_64, TX_64_PKTS), 1582 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), 1583 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), 1584 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), 1585 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), 1586 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), 1587 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), 1588 EF10_DMA_STAT(port_rx_bytes, RX_BYTES), 1589 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), 1590 EF10_OTHER_STAT(port_rx_good_bytes), 1591 EF10_OTHER_STAT(port_rx_bad_bytes), 1592 EF10_DMA_STAT(port_rx_packets, RX_PKTS), 1593 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), 1594 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), 1595 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), 1596 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), 1597 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), 1598 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), 1599 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), 1600 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), 1601 EF10_DMA_STAT(port_rx_64, RX_64_PKTS), 1602 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), 1603 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), 1604 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), 1605 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), 1606 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), 1607 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), 1608 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), 1609 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), 1610 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), 1611 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), 1612 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), 1613 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), 1614 GENERIC_SW_STAT(rx_nodesc_trunc), 1615 GENERIC_SW_STAT(rx_noskb_drops), 1616 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), 1617 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), 1618 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), 1619 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), 1620 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), 1621 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), 1622 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), 1623 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), 1624 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), 1625 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), 1626 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), 1627 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), 1628 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), 1629 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), 1630 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), 1631 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), 1632 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), 1633 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), 1634 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), 1635 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), 1636 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), 1637 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), 1638 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), 1639 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), 1640 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), 1641 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), 1642 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), 1643 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), 1644 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), 1645 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), 1646 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), 1647 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), 1648 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), 1649 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), 1650 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), 1651 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), 1652 EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START), 1653 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), 1654 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), 1655 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), 1656 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), 1657 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), 1658 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), 1659 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), 1660 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), 1661 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), 1662 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), 1663 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), 1664 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), 1665 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), 1666 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), 1667 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), 1668 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), 1669 }; 1670 1671 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ 1672 (1ULL << EF10_STAT_port_tx_packets) | \ 1673 (1ULL << EF10_STAT_port_tx_pause) | \ 1674 (1ULL << EF10_STAT_port_tx_unicast) | \ 1675 (1ULL << EF10_STAT_port_tx_multicast) | \ 1676 (1ULL << EF10_STAT_port_tx_broadcast) | \ 1677 (1ULL << EF10_STAT_port_rx_bytes) | \ 1678 (1ULL << \ 1679 EF10_STAT_port_rx_bytes_minus_good_bytes) | \ 1680 (1ULL << EF10_STAT_port_rx_good_bytes) | \ 1681 (1ULL << EF10_STAT_port_rx_bad_bytes) | \ 1682 (1ULL << EF10_STAT_port_rx_packets) | \ 1683 (1ULL << EF10_STAT_port_rx_good) | \ 1684 (1ULL << EF10_STAT_port_rx_bad) | \ 1685 (1ULL << EF10_STAT_port_rx_pause) | \ 1686 (1ULL << EF10_STAT_port_rx_control) | \ 1687 (1ULL << EF10_STAT_port_rx_unicast) | \ 1688 (1ULL << EF10_STAT_port_rx_multicast) | \ 1689 (1ULL << EF10_STAT_port_rx_broadcast) | \ 1690 (1ULL << EF10_STAT_port_rx_lt64) | \ 1691 (1ULL << EF10_STAT_port_rx_64) | \ 1692 (1ULL << EF10_STAT_port_rx_65_to_127) | \ 1693 (1ULL << EF10_STAT_port_rx_128_to_255) | \ 1694 (1ULL << EF10_STAT_port_rx_256_to_511) | \ 1695 (1ULL << EF10_STAT_port_rx_512_to_1023) |\ 1696 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ 1697 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ 1698 (1ULL << EF10_STAT_port_rx_gtjumbo) | \ 1699 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ 1700 (1ULL << EF10_STAT_port_rx_overflow) | \ 1701 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ 1702 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ 1703 (1ULL << GENERIC_STAT_rx_noskb_drops)) 1704 1705 /* On 7000 series NICs, these statistics are only provided by the 10G MAC. 1706 * For a 10G/40G switchable port we do not expose these because they might 1707 * not include all the packets they should. 1708 * On 8000 series NICs these statistics are always provided. 1709 */ 1710 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ 1711 (1ULL << EF10_STAT_port_tx_lt64) | \ 1712 (1ULL << EF10_STAT_port_tx_64) | \ 1713 (1ULL << EF10_STAT_port_tx_65_to_127) |\ 1714 (1ULL << EF10_STAT_port_tx_128_to_255) |\ 1715 (1ULL << EF10_STAT_port_tx_256_to_511) |\ 1716 (1ULL << EF10_STAT_port_tx_512_to_1023) |\ 1717 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ 1718 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) 1719 1720 /* These statistics are only provided by the 40G MAC. For a 10G/40G 1721 * switchable port we do expose these because the errors will otherwise 1722 * be silent. 1723 */ 1724 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ 1725 (1ULL << EF10_STAT_port_rx_length_error)) 1726 1727 /* These statistics are only provided if the firmware supports the 1728 * capability PM_AND_RXDP_COUNTERS. 1729 */ 1730 #define HUNT_PM_AND_RXDP_STAT_MASK ( \ 1731 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ 1732 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ 1733 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ 1734 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ 1735 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ 1736 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ 1737 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ 1738 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ 1739 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ 1740 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ 1741 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ 1742 (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) 1743 1744 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, 1745 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in 1746 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1747 * These bits are in the second u64 of the raw mask. 1748 */ 1749 #define EF10_FEC_STAT_MASK ( \ 1750 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \ 1751 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \ 1752 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \ 1753 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \ 1754 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \ 1755 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64))) 1756 1757 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, 1758 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in 1759 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1760 * These bits are in the second u64 of the raw mask. 1761 */ 1762 #define EF10_CTPIO_STAT_MASK ( \ 1763 (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \ 1764 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ 1765 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ 1766 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ 1767 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \ 1768 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \ 1769 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \ 1770 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \ 1771 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \ 1772 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \ 1773 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \ 1774 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \ 1775 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \ 1776 (1ULL << (EF10_STAT_ctpio_success - 64)) | \ 1777 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \ 1778 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \ 1779 (1ULL << (EF10_STAT_ctpio_erase - 64))) 1780 1781 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1782 { 1783 u64 raw_mask = HUNT_COMMON_STAT_MASK; 1784 u32 port_caps = efx_mcdi_phy_get_caps(efx); 1785 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1786 1787 if (!(efx->mcdi->fn_flags & 1788 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 1789 return 0; 1790 1791 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { 1792 raw_mask |= HUNT_40G_EXTRA_STAT_MASK; 1793 /* 8000 series have everything even at 40G */ 1794 if (nic_data->datapath_caps2 & 1795 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) 1796 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1797 } else { 1798 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1799 } 1800 1801 if (nic_data->datapath_caps & 1802 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) 1803 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; 1804 1805 return raw_mask; 1806 } 1807 1808 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) 1809 { 1810 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1811 u64 raw_mask[2]; 1812 1813 raw_mask[0] = efx_ef10_raw_stat_mask(efx); 1814 1815 /* Only show vadaptor stats when EVB capability is present */ 1816 if (nic_data->datapath_caps & 1817 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { 1818 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); 1819 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1; 1820 } else { 1821 raw_mask[1] = 0; 1822 } 1823 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ 1824 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) 1825 raw_mask[1] |= EF10_FEC_STAT_MASK; 1826 1827 /* CTPIO stats appear in V3. Only show them on devices that actually 1828 * support CTPIO. Although this driver doesn't use CTPIO others might, 1829 * and we may be reporting the stats for the underlying port. 1830 */ 1831 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 && 1832 (nic_data->datapath_caps2 & 1833 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN))) 1834 raw_mask[1] |= EF10_CTPIO_STAT_MASK; 1835 1836 #if BITS_PER_LONG == 64 1837 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); 1838 mask[0] = raw_mask[0]; 1839 mask[1] = raw_mask[1]; 1840 #else 1841 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); 1842 mask[0] = raw_mask[0] & 0xffffffff; 1843 mask[1] = raw_mask[0] >> 32; 1844 mask[2] = raw_mask[1] & 0xffffffff; 1845 #endif 1846 } 1847 1848 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 1849 { 1850 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1851 1852 efx_ef10_get_stat_mask(efx, mask); 1853 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1854 mask, names); 1855 } 1856 1857 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, 1858 struct rtnl_link_stats64 *core_stats) 1859 { 1860 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1861 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1862 u64 *stats = nic_data->stats; 1863 size_t stats_count = 0, index; 1864 1865 efx_ef10_get_stat_mask(efx, mask); 1866 1867 if (full_stats) { 1868 for_each_set_bit(index, mask, EF10_STAT_COUNT) { 1869 if (efx_ef10_stat_desc[index].name) { 1870 *full_stats++ = stats[index]; 1871 ++stats_count; 1872 } 1873 } 1874 } 1875 1876 if (!core_stats) 1877 return stats_count; 1878 1879 if (nic_data->datapath_caps & 1880 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { 1881 /* Use vadaptor stats. */ 1882 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + 1883 stats[EF10_STAT_rx_multicast] + 1884 stats[EF10_STAT_rx_broadcast]; 1885 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + 1886 stats[EF10_STAT_tx_multicast] + 1887 stats[EF10_STAT_tx_broadcast]; 1888 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + 1889 stats[EF10_STAT_rx_multicast_bytes] + 1890 stats[EF10_STAT_rx_broadcast_bytes]; 1891 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + 1892 stats[EF10_STAT_tx_multicast_bytes] + 1893 stats[EF10_STAT_tx_broadcast_bytes]; 1894 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + 1895 stats[GENERIC_STAT_rx_noskb_drops]; 1896 core_stats->multicast = stats[EF10_STAT_rx_multicast]; 1897 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; 1898 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; 1899 core_stats->rx_errors = core_stats->rx_crc_errors; 1900 core_stats->tx_errors = stats[EF10_STAT_tx_bad]; 1901 } else { 1902 /* Use port stats. */ 1903 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; 1904 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; 1905 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; 1906 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; 1907 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + 1908 stats[GENERIC_STAT_rx_nodesc_trunc] + 1909 stats[GENERIC_STAT_rx_noskb_drops]; 1910 core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; 1911 core_stats->rx_length_errors = 1912 stats[EF10_STAT_port_rx_gtjumbo] + 1913 stats[EF10_STAT_port_rx_length_error]; 1914 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; 1915 core_stats->rx_frame_errors = 1916 stats[EF10_STAT_port_rx_align_error]; 1917 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; 1918 core_stats->rx_errors = (core_stats->rx_length_errors + 1919 core_stats->rx_crc_errors + 1920 core_stats->rx_frame_errors); 1921 } 1922 1923 return stats_count; 1924 } 1925 1926 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) 1927 { 1928 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1929 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1930 __le64 generation_start, generation_end; 1931 u64 *stats = nic_data->stats; 1932 __le64 *dma_stats; 1933 1934 efx_ef10_get_stat_mask(efx, mask); 1935 1936 dma_stats = efx->stats_buffer.addr; 1937 1938 generation_end = dma_stats[efx->num_mac_stats - 1]; 1939 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 1940 return 0; 1941 rmb(); 1942 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 1943 stats, efx->stats_buffer.addr, false); 1944 rmb(); 1945 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 1946 if (generation_end != generation_start) 1947 return -EAGAIN; 1948 1949 /* Update derived statistics */ 1950 efx_nic_fix_nodesc_drop_stat(efx, 1951 &stats[EF10_STAT_port_rx_nodesc_drops]); 1952 stats[EF10_STAT_port_rx_good_bytes] = 1953 stats[EF10_STAT_port_rx_bytes] - 1954 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; 1955 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], 1956 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); 1957 efx_update_sw_stats(efx, stats); 1958 return 0; 1959 } 1960 1961 1962 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, 1963 struct rtnl_link_stats64 *core_stats) 1964 { 1965 int retry; 1966 1967 /* If we're unlucky enough to read statistics during the DMA, wait 1968 * up to 10ms for it to finish (typically takes <500us) 1969 */ 1970 for (retry = 0; retry < 100; ++retry) { 1971 if (efx_ef10_try_update_nic_stats_pf(efx) == 0) 1972 break; 1973 udelay(100); 1974 } 1975 1976 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1977 } 1978 1979 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) 1980 { 1981 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); 1982 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1983 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1984 __le64 generation_start, generation_end; 1985 u64 *stats = nic_data->stats; 1986 u32 dma_len = efx->num_mac_stats * sizeof(u64); 1987 struct efx_buffer stats_buf; 1988 __le64 *dma_stats; 1989 int rc; 1990 1991 spin_unlock_bh(&efx->stats_lock); 1992 1993 if (in_interrupt()) { 1994 /* If in atomic context, cannot update stats. Just update the 1995 * software stats and return so the caller can continue. 1996 */ 1997 spin_lock_bh(&efx->stats_lock); 1998 efx_update_sw_stats(efx, stats); 1999 return 0; 2000 } 2001 2002 efx_ef10_get_stat_mask(efx, mask); 2003 2004 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); 2005 if (rc) { 2006 spin_lock_bh(&efx->stats_lock); 2007 return rc; 2008 } 2009 2010 dma_stats = stats_buf.addr; 2011 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; 2012 2013 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); 2014 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, 2015 MAC_STATS_IN_DMA, 1); 2016 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 2017 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 2018 2019 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 2020 NULL, 0, NULL); 2021 spin_lock_bh(&efx->stats_lock); 2022 if (rc) { 2023 /* Expect ENOENT if DMA queues have not been set up */ 2024 if (rc != -ENOENT || atomic_read(&efx->active_queues)) 2025 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, 2026 sizeof(inbuf), NULL, 0, rc); 2027 goto out; 2028 } 2029 2030 generation_end = dma_stats[efx->num_mac_stats - 1]; 2031 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { 2032 WARN_ON_ONCE(1); 2033 goto out; 2034 } 2035 rmb(); 2036 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 2037 stats, stats_buf.addr, false); 2038 rmb(); 2039 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 2040 if (generation_end != generation_start) { 2041 rc = -EAGAIN; 2042 goto out; 2043 } 2044 2045 efx_update_sw_stats(efx, stats); 2046 out: 2047 efx_nic_free_buffer(efx, &stats_buf); 2048 return rc; 2049 } 2050 2051 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, 2052 struct rtnl_link_stats64 *core_stats) 2053 { 2054 if (efx_ef10_try_update_nic_stats_vf(efx)) 2055 return 0; 2056 2057 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 2058 } 2059 2060 static void efx_ef10_push_irq_moderation(struct efx_channel *channel) 2061 { 2062 struct efx_nic *efx = channel->efx; 2063 unsigned int mode, usecs; 2064 efx_dword_t timer_cmd; 2065 2066 if (channel->irq_moderation_us) { 2067 mode = 3; 2068 usecs = channel->irq_moderation_us; 2069 } else { 2070 mode = 0; 2071 usecs = 0; 2072 } 2073 2074 if (EFX_EF10_WORKAROUND_61265(efx)) { 2075 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); 2076 unsigned int ns = usecs * 1000; 2077 2078 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, 2079 channel->channel); 2080 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); 2081 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); 2082 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); 2083 2084 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, 2085 inbuf, sizeof(inbuf), 0, NULL, 0); 2086 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 2087 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 2088 2089 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, 2090 EFE_DD_EVQ_IND_TIMER_FLAGS, 2091 ERF_DD_EVQ_IND_TIMER_MODE, mode, 2092 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 2093 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, 2094 channel->channel); 2095 } else { 2096 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 2097 2098 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, 2099 ERF_DZ_TC_TIMER_VAL, ticks, 2100 ERF_FZ_TC_TMR_REL_VAL, ticks); 2101 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, 2102 channel->channel); 2103 } 2104 } 2105 2106 static void efx_ef10_get_wol_vf(struct efx_nic *efx, 2107 struct ethtool_wolinfo *wol) {} 2108 2109 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) 2110 { 2111 return -EOPNOTSUPP; 2112 } 2113 2114 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) 2115 { 2116 wol->supported = 0; 2117 wol->wolopts = 0; 2118 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2119 } 2120 2121 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) 2122 { 2123 if (type != 0) 2124 return -EINVAL; 2125 return 0; 2126 } 2127 2128 static void efx_ef10_mcdi_request(struct efx_nic *efx, 2129 const efx_dword_t *hdr, size_t hdr_len, 2130 const efx_dword_t *sdu, size_t sdu_len) 2131 { 2132 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2133 u8 *pdu = nic_data->mcdi_buf.addr; 2134 2135 memcpy(pdu, hdr, hdr_len); 2136 memcpy(pdu + hdr_len, sdu, sdu_len); 2137 wmb(); 2138 2139 /* The hardware provides 'low' and 'high' (doorbell) registers 2140 * for passing the 64-bit address of an MCDI request to 2141 * firmware. However the dwords are swapped by firmware. The 2142 * least significant bits of the doorbell are then 0 for all 2143 * MCDI requests due to alignment. 2144 */ 2145 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), 2146 ER_DZ_MC_DB_LWRD); 2147 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), 2148 ER_DZ_MC_DB_HWRD); 2149 } 2150 2151 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) 2152 { 2153 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2154 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; 2155 2156 rmb(); 2157 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); 2158 } 2159 2160 static void 2161 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, 2162 size_t offset, size_t outlen) 2163 { 2164 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2165 const u8 *pdu = nic_data->mcdi_buf.addr; 2166 2167 memcpy(outbuf, pdu + offset, outlen); 2168 } 2169 2170 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) 2171 { 2172 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2173 2174 /* All our allocations have been reset */ 2175 efx_ef10_reset_mc_allocations(efx); 2176 2177 /* The datapath firmware might have been changed */ 2178 nic_data->must_check_datapath_caps = true; 2179 2180 /* MAC statistics have been cleared on the NIC; clear the local 2181 * statistic that we update with efx_update_diff_stat(). 2182 */ 2183 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; 2184 } 2185 2186 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) 2187 { 2188 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2189 int rc; 2190 2191 rc = efx_ef10_get_warm_boot_count(efx); 2192 if (rc < 0) { 2193 /* The firmware is presumably in the process of 2194 * rebooting. However, we are supposed to report each 2195 * reboot just once, so we must only do that once we 2196 * can read and store the updated warm boot count. 2197 */ 2198 return 0; 2199 } 2200 2201 if (rc == nic_data->warm_boot_count) 2202 return 0; 2203 2204 nic_data->warm_boot_count = rc; 2205 efx_ef10_mcdi_reboot_detected(efx); 2206 2207 return -EIO; 2208 } 2209 2210 /* Handle an MSI interrupt 2211 * 2212 * Handle an MSI hardware interrupt. This routine schedules event 2213 * queue processing. No interrupt acknowledgement cycle is necessary. 2214 * Also, we never need to check that the interrupt is for us, since 2215 * MSI interrupts cannot be shared. 2216 */ 2217 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) 2218 { 2219 struct efx_msi_context *context = dev_id; 2220 struct efx_nic *efx = context->efx; 2221 2222 netif_vdbg(efx, intr, efx->net_dev, 2223 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); 2224 2225 if (likely(READ_ONCE(efx->irq_soft_enabled))) { 2226 /* Note test interrupts */ 2227 if (context->index == efx->irq_level) 2228 efx->last_irq_cpu = raw_smp_processor_id(); 2229 2230 /* Schedule processing of the channel */ 2231 efx_schedule_channel_irq(efx->channel[context->index]); 2232 } 2233 2234 return IRQ_HANDLED; 2235 } 2236 2237 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) 2238 { 2239 struct efx_nic *efx = dev_id; 2240 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 2241 struct efx_channel *channel; 2242 efx_dword_t reg; 2243 u32 queues; 2244 2245 /* Read the ISR which also ACKs the interrupts */ 2246 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); 2247 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); 2248 2249 if (queues == 0) 2250 return IRQ_NONE; 2251 2252 if (likely(soft_enabled)) { 2253 /* Note test interrupts */ 2254 if (queues & (1U << efx->irq_level)) 2255 efx->last_irq_cpu = raw_smp_processor_id(); 2256 2257 efx_for_each_channel(channel, efx) { 2258 if (queues & 1) 2259 efx_schedule_channel_irq(channel); 2260 queues >>= 1; 2261 } 2262 } 2263 2264 netif_vdbg(efx, intr, efx->net_dev, 2265 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 2266 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 2267 2268 return IRQ_HANDLED; 2269 } 2270 2271 static int efx_ef10_irq_test_generate(struct efx_nic *efx) 2272 { 2273 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); 2274 2275 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, 2276 NULL) == 0) 2277 return -ENOTSUPP; 2278 2279 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); 2280 2281 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); 2282 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, 2283 inbuf, sizeof(inbuf), NULL, 0, NULL); 2284 } 2285 2286 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) 2287 { 2288 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 2289 (tx_queue->ptr_mask + 1) * 2290 sizeof(efx_qword_t), 2291 GFP_KERNEL); 2292 } 2293 2294 /* This writes to the TX_DESC_WPTR and also pushes data */ 2295 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, 2296 const efx_qword_t *txd) 2297 { 2298 unsigned int write_ptr; 2299 efx_oword_t reg; 2300 2301 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2302 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); 2303 reg.qword[0] = *txd; 2304 efx_writeo_page(tx_queue->efx, ®, 2305 ER_DZ_TX_DESC_UPD, tx_queue->queue); 2306 } 2307 2308 /* Add Firmware-Assisted TSO v2 option descriptors to a queue. 2309 */ 2310 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, 2311 struct sk_buff *skb, 2312 bool *data_mapped) 2313 { 2314 struct efx_tx_buffer *buffer; 2315 struct tcphdr *tcp; 2316 struct iphdr *ip; 2317 2318 u16 ipv4_id; 2319 u32 seqnum; 2320 u32 mss; 2321 2322 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); 2323 2324 mss = skb_shinfo(skb)->gso_size; 2325 2326 if (unlikely(mss < 4)) { 2327 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); 2328 return -EINVAL; 2329 } 2330 2331 ip = ip_hdr(skb); 2332 if (ip->version == 4) { 2333 /* Modify IPv4 header if needed. */ 2334 ip->tot_len = 0; 2335 ip->check = 0; 2336 ipv4_id = ntohs(ip->id); 2337 } else { 2338 /* Modify IPv6 header if needed. */ 2339 struct ipv6hdr *ipv6 = ipv6_hdr(skb); 2340 2341 ipv6->payload_len = 0; 2342 ipv4_id = 0; 2343 } 2344 2345 tcp = tcp_hdr(skb); 2346 seqnum = ntohl(tcp->seq); 2347 2348 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2349 2350 buffer->flags = EFX_TX_BUF_OPTION; 2351 buffer->len = 0; 2352 buffer->unmap_len = 0; 2353 EFX_POPULATE_QWORD_5(buffer->option, 2354 ESF_DZ_TX_DESC_IS_OPT, 1, 2355 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2356 ESF_DZ_TX_TSO_OPTION_TYPE, 2357 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 2358 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 2359 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum 2360 ); 2361 ++tx_queue->insert_count; 2362 2363 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2364 2365 buffer->flags = EFX_TX_BUF_OPTION; 2366 buffer->len = 0; 2367 buffer->unmap_len = 0; 2368 EFX_POPULATE_QWORD_4(buffer->option, 2369 ESF_DZ_TX_DESC_IS_OPT, 1, 2370 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2371 ESF_DZ_TX_TSO_OPTION_TYPE, 2372 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 2373 ESF_DZ_TX_TSO_TCP_MSS, mss 2374 ); 2375 ++tx_queue->insert_count; 2376 2377 return 0; 2378 } 2379 2380 static u32 efx_ef10_tso_versions(struct efx_nic *efx) 2381 { 2382 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2383 u32 tso_versions = 0; 2384 2385 if (nic_data->datapath_caps & 2386 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) 2387 tso_versions |= BIT(1); 2388 if (nic_data->datapath_caps2 & 2389 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) 2390 tso_versions |= BIT(2); 2391 return tso_versions; 2392 } 2393 2394 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) 2395 { 2396 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 2397 EFX_BUF_SIZE)); 2398 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 2399 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; 2400 struct efx_channel *channel = tx_queue->channel; 2401 struct efx_nic *efx = tx_queue->efx; 2402 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2403 bool tso_v2 = false; 2404 size_t inlen; 2405 dma_addr_t dma_addr; 2406 efx_qword_t *txd; 2407 int rc; 2408 int i; 2409 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); 2410 2411 /* TSOv2 is a limited resource that can only be configured on a limited 2412 * number of queues. TSO without checksum offload is not really a thing, 2413 * so we only enable it for those queues. 2414 */ 2415 if (csum_offload && (nic_data->datapath_caps2 & 2416 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN))) { 2417 tso_v2 = true; 2418 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", 2419 channel->channel); 2420 } 2421 2422 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); 2423 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); 2424 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); 2425 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); 2426 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); 2427 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); 2428 2429 dma_addr = tx_queue->txd.buf.dma_addr; 2430 2431 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", 2432 tx_queue->queue, entries, (u64)dma_addr); 2433 2434 for (i = 0; i < entries; ++i) { 2435 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); 2436 dma_addr += EFX_BUF_SIZE; 2437 } 2438 2439 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); 2440 2441 do { 2442 MCDI_POPULATE_DWORD_3(inbuf, INIT_TXQ_IN_FLAGS, 2443 /* This flag was removed from mcdi_pcol.h for 2444 * the non-_EXT version of INIT_TXQ. However, 2445 * firmware still honours it. 2446 */ 2447 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2, 2448 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, 2449 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload); 2450 2451 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen, 2452 NULL, 0, NULL); 2453 if (rc == -ENOSPC && tso_v2) { 2454 /* Retry without TSOv2 if we're short on contexts. */ 2455 tso_v2 = false; 2456 netif_warn(efx, probe, efx->net_dev, 2457 "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n"); 2458 } else if (rc) { 2459 efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ, 2460 MC_CMD_INIT_TXQ_EXT_IN_LEN, 2461 NULL, 0, rc); 2462 goto fail; 2463 } 2464 } while (rc); 2465 2466 /* A previous user of this TX queue might have set us up the 2467 * bomb by writing a descriptor to the TX push collector but 2468 * not the doorbell. (Each collector belongs to a port, not a 2469 * queue or function, so cannot easily be reset.) We must 2470 * attempt to push a no-op descriptor in its place. 2471 */ 2472 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; 2473 tx_queue->insert_count = 1; 2474 txd = efx_tx_desc(tx_queue, 0); 2475 EFX_POPULATE_QWORD_4(*txd, 2476 ESF_DZ_TX_DESC_IS_OPT, true, 2477 ESF_DZ_TX_OPTION_TYPE, 2478 ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 2479 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, 2480 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload); 2481 tx_queue->write_count = 1; 2482 2483 if (tso_v2) { 2484 tx_queue->handle_tso = efx_ef10_tx_tso_desc; 2485 tx_queue->tso_version = 2; 2486 } else if (nic_data->datapath_caps & 2487 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { 2488 tx_queue->tso_version = 1; 2489 } 2490 2491 wmb(); 2492 efx_ef10_push_tx_desc(tx_queue, txd); 2493 2494 return; 2495 2496 fail: 2497 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", 2498 tx_queue->queue); 2499 } 2500 2501 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) 2502 { 2503 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); 2504 MCDI_DECLARE_BUF_ERR(outbuf); 2505 struct efx_nic *efx = tx_queue->efx; 2506 size_t outlen; 2507 int rc; 2508 2509 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, 2510 tx_queue->queue); 2511 2512 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), 2513 outbuf, sizeof(outbuf), &outlen); 2514 2515 if (rc && rc != -EALREADY) 2516 goto fail; 2517 2518 return; 2519 2520 fail: 2521 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, 2522 outbuf, outlen, rc); 2523 } 2524 2525 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) 2526 { 2527 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); 2528 } 2529 2530 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 2531 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) 2532 { 2533 unsigned int write_ptr; 2534 efx_dword_t reg; 2535 2536 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2537 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); 2538 efx_writed_page(tx_queue->efx, ®, 2539 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); 2540 } 2541 2542 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff 2543 2544 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, 2545 dma_addr_t dma_addr, unsigned int len) 2546 { 2547 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { 2548 /* If we need to break across multiple descriptors we should 2549 * stop at a page boundary. This assumes the length limit is 2550 * greater than the page size. 2551 */ 2552 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; 2553 2554 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); 2555 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; 2556 } 2557 2558 return len; 2559 } 2560 2561 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) 2562 { 2563 unsigned int old_write_count = tx_queue->write_count; 2564 struct efx_tx_buffer *buffer; 2565 unsigned int write_ptr; 2566 efx_qword_t *txd; 2567 2568 tx_queue->xmit_more_available = false; 2569 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 2570 return; 2571 2572 do { 2573 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2574 buffer = &tx_queue->buffer[write_ptr]; 2575 txd = efx_tx_desc(tx_queue, write_ptr); 2576 ++tx_queue->write_count; 2577 2578 /* Create TX descriptor ring entry */ 2579 if (buffer->flags & EFX_TX_BUF_OPTION) { 2580 *txd = buffer->option; 2581 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) 2582 /* PIO descriptor */ 2583 tx_queue->packet_write_count = tx_queue->write_count; 2584 } else { 2585 tx_queue->packet_write_count = tx_queue->write_count; 2586 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 2587 EFX_POPULATE_QWORD_3( 2588 *txd, 2589 ESF_DZ_TX_KER_CONT, 2590 buffer->flags & EFX_TX_BUF_CONT, 2591 ESF_DZ_TX_KER_BYTE_CNT, buffer->len, 2592 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); 2593 } 2594 } while (tx_queue->write_count != tx_queue->insert_count); 2595 2596 wmb(); /* Ensure descriptors are written before they are fetched */ 2597 2598 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 2599 txd = efx_tx_desc(tx_queue, 2600 old_write_count & tx_queue->ptr_mask); 2601 efx_ef10_push_tx_desc(tx_queue, txd); 2602 ++tx_queue->pushes; 2603 } else { 2604 efx_ef10_notify_tx_desc(tx_queue); 2605 } 2606 } 2607 2608 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ 2609 1 << RSS_MODE_HASH_DST_ADDR_LBN) 2610 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ 2611 1 << RSS_MODE_HASH_DST_PORT_LBN) 2612 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ 2613 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ 2614 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ 2615 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\ 2616 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ 2617 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ 2618 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ 2619 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ 2620 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ 2621 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) 2622 2623 static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags) 2624 { 2625 /* Firmware had a bug (sfc bug 61952) where it would not actually 2626 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. 2627 * This meant that it would always contain whatever was previously 2628 * in the MCDI buffer. Fortunately, all firmware versions with 2629 * this bug have the same default flags value for a newly-allocated 2630 * RSS context, and the only time we want to get the flags is just 2631 * after allocating. Moreover, the response has a 32-bit hole 2632 * where the context ID would be in the request, so we can use an 2633 * overlength buffer in the request and pre-fill the flags field 2634 * with what we believe the default to be. Thus if the firmware 2635 * has the bug, it will leave our pre-filled value in the flags 2636 * field of the response, and we will get the right answer. 2637 * 2638 * However, this does mean that this function should NOT be used if 2639 * the RSS context flags might not be their defaults - it is ONLY 2640 * reliably correct for a newly-allocated RSS context. 2641 */ 2642 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 2643 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 2644 size_t outlen; 2645 int rc; 2646 2647 /* Check we have a hole for the context ID */ 2648 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); 2649 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context); 2650 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, 2651 RSS_CONTEXT_FLAGS_DEFAULT); 2652 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, 2653 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 2654 if (rc == 0) { 2655 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) 2656 rc = -EIO; 2657 else 2658 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); 2659 } 2660 return rc; 2661 } 2662 2663 /* Attempt to enable 4-tuple UDP hashing on the specified RSS context. 2664 * If we fail, we just leave the RSS context at its default hash settings, 2665 * which is safe but may slightly reduce performance. 2666 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we 2667 * just need to set the UDP ports flags (for both IP versions). 2668 */ 2669 static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context) 2670 { 2671 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); 2672 u32 flags; 2673 2674 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); 2675 2676 if (efx_ef10_get_rss_flags(efx, context, &flags) != 0) 2677 return; 2678 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context); 2679 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; 2680 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; 2681 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); 2682 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), 2683 NULL, 0, NULL)) 2684 /* Succeeded, so UDP 4-tuple is now enabled */ 2685 efx->rx_hash_udp_4tuple = true; 2686 } 2687 2688 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, 2689 bool exclusive, unsigned *context_size) 2690 { 2691 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); 2692 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); 2693 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2694 size_t outlen; 2695 int rc; 2696 u32 alloc_type = exclusive ? 2697 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : 2698 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; 2699 unsigned rss_spread = exclusive ? 2700 efx->rss_spread : 2701 min(rounddown_pow_of_two(efx->rss_spread), 2702 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); 2703 2704 if (!exclusive && rss_spread == 1) { 2705 *context = EFX_EF10_RSS_CONTEXT_INVALID; 2706 if (context_size) 2707 *context_size = 1; 2708 return 0; 2709 } 2710 2711 if (nic_data->datapath_caps & 2712 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) 2713 return -EOPNOTSUPP; 2714 2715 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 2716 nic_data->vport_id); 2717 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 2718 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); 2719 2720 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), 2721 outbuf, sizeof(outbuf), &outlen); 2722 if (rc != 0) 2723 return rc; 2724 2725 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) 2726 return -EIO; 2727 2728 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); 2729 2730 if (context_size) 2731 *context_size = rss_spread; 2732 2733 if (nic_data->datapath_caps & 2734 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN) 2735 efx_ef10_set_rss_flags(efx, *context); 2736 2737 return 0; 2738 } 2739 2740 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) 2741 { 2742 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); 2743 int rc; 2744 2745 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, 2746 context); 2747 2748 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), 2749 NULL, 0, NULL); 2750 WARN_ON(rc != 0); 2751 } 2752 2753 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, 2754 const u32 *rx_indir_table, const u8 *key) 2755 { 2756 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); 2757 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); 2758 int i, rc; 2759 2760 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, 2761 context); 2762 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 2763 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); 2764 2765 /* This iterates over the length of efx->rx_indir_table, but copies 2766 * bytes from rx_indir_table. That's because the latter is a pointer 2767 * rather than an array, but should have the same length. 2768 * The efx->rx_hash_key loop below is similar. 2769 */ 2770 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) 2771 MCDI_PTR(tablebuf, 2772 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = 2773 (u8) rx_indir_table[i]; 2774 2775 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, 2776 sizeof(tablebuf), NULL, 0, NULL); 2777 if (rc != 0) 2778 return rc; 2779 2780 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, 2781 context); 2782 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != 2783 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2784 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) 2785 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; 2786 2787 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, 2788 sizeof(keybuf), NULL, 0, NULL); 2789 } 2790 2791 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) 2792 { 2793 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2794 2795 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) 2796 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); 2797 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 2798 } 2799 2800 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, 2801 unsigned *context_size) 2802 { 2803 u32 new_rx_rss_context; 2804 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2805 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, 2806 false, context_size); 2807 2808 if (rc != 0) 2809 return rc; 2810 2811 nic_data->rx_rss_context = new_rx_rss_context; 2812 nic_data->rx_rss_context_exclusive = false; 2813 efx_set_default_rx_indir_table(efx); 2814 return 0; 2815 } 2816 2817 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, 2818 const u32 *rx_indir_table, 2819 const u8 *key) 2820 { 2821 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2822 int rc; 2823 u32 new_rx_rss_context; 2824 2825 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || 2826 !nic_data->rx_rss_context_exclusive) { 2827 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, 2828 true, NULL); 2829 if (rc == -EOPNOTSUPP) 2830 return rc; 2831 else if (rc != 0) 2832 goto fail1; 2833 } else { 2834 new_rx_rss_context = nic_data->rx_rss_context; 2835 } 2836 2837 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, 2838 rx_indir_table, key); 2839 if (rc != 0) 2840 goto fail2; 2841 2842 if (nic_data->rx_rss_context != new_rx_rss_context) 2843 efx_ef10_rx_free_indir_table(efx); 2844 nic_data->rx_rss_context = new_rx_rss_context; 2845 nic_data->rx_rss_context_exclusive = true; 2846 if (rx_indir_table != efx->rx_indir_table) 2847 memcpy(efx->rx_indir_table, rx_indir_table, 2848 sizeof(efx->rx_indir_table)); 2849 if (key != efx->rx_hash_key) 2850 memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); 2851 2852 return 0; 2853 2854 fail2: 2855 if (new_rx_rss_context != nic_data->rx_rss_context) 2856 efx_ef10_free_rss_context(efx, new_rx_rss_context); 2857 fail1: 2858 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 2859 return rc; 2860 } 2861 2862 static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) 2863 { 2864 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2865 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); 2866 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); 2867 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); 2868 size_t outlen; 2869 int rc, i; 2870 2871 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != 2872 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); 2873 2874 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) 2875 return -ENOENT; 2876 2877 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, 2878 nic_data->rx_rss_context); 2879 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 2880 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); 2881 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), 2882 tablebuf, sizeof(tablebuf), &outlen); 2883 if (rc != 0) 2884 return rc; 2885 2886 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) 2887 return -EIO; 2888 2889 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 2890 efx->rx_indir_table[i] = MCDI_PTR(tablebuf, 2891 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; 2892 2893 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, 2894 nic_data->rx_rss_context); 2895 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != 2896 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2897 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), 2898 keybuf, sizeof(keybuf), &outlen); 2899 if (rc != 0) 2900 return rc; 2901 2902 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) 2903 return -EIO; 2904 2905 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) 2906 efx->rx_hash_key[i] = MCDI_PTR( 2907 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; 2908 2909 return 0; 2910 } 2911 2912 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, 2913 const u32 *rx_indir_table, 2914 const u8 *key) 2915 { 2916 int rc; 2917 2918 if (efx->rss_spread == 1) 2919 return 0; 2920 2921 if (!key) 2922 key = efx->rx_hash_key; 2923 2924 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); 2925 2926 if (rc == -ENOBUFS && !user) { 2927 unsigned context_size; 2928 bool mismatch = false; 2929 size_t i; 2930 2931 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; 2932 i++) 2933 mismatch = rx_indir_table[i] != 2934 ethtool_rxfh_indir_default(i, efx->rss_spread); 2935 2936 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); 2937 if (rc == 0) { 2938 if (context_size != efx->rss_spread) 2939 netif_warn(efx, probe, efx->net_dev, 2940 "Could not allocate an exclusive RSS" 2941 " context; allocated a shared one of" 2942 " different size." 2943 " Wanted %u, got %u.\n", 2944 efx->rss_spread, context_size); 2945 else if (mismatch) 2946 netif_warn(efx, probe, efx->net_dev, 2947 "Could not allocate an exclusive RSS" 2948 " context; allocated a shared one but" 2949 " could not apply custom" 2950 " indirection.\n"); 2951 else 2952 netif_info(efx, probe, efx->net_dev, 2953 "Could not allocate an exclusive RSS" 2954 " context; allocated a shared one.\n"); 2955 } 2956 } 2957 return rc; 2958 } 2959 2960 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, 2961 const u32 *rx_indir_table 2962 __attribute__ ((unused)), 2963 const u8 *key 2964 __attribute__ ((unused))) 2965 { 2966 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2967 2968 if (user) 2969 return -EOPNOTSUPP; 2970 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) 2971 return 0; 2972 return efx_ef10_rx_push_shared_rss_config(efx, NULL); 2973 } 2974 2975 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) 2976 { 2977 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, 2978 (rx_queue->ptr_mask + 1) * 2979 sizeof(efx_qword_t), 2980 GFP_KERNEL); 2981 } 2982 2983 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) 2984 { 2985 MCDI_DECLARE_BUF(inbuf, 2986 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 2987 EFX_BUF_SIZE)); 2988 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 2989 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; 2990 struct efx_nic *efx = rx_queue->efx; 2991 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2992 size_t inlen; 2993 dma_addr_t dma_addr; 2994 int rc; 2995 int i; 2996 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); 2997 2998 rx_queue->scatter_n = 0; 2999 rx_queue->scatter_len = 0; 3000 3001 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); 3002 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); 3003 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); 3004 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, 3005 efx_rx_queue_index(rx_queue)); 3006 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, 3007 INIT_RXQ_IN_FLAG_PREFIX, 1, 3008 INIT_RXQ_IN_FLAG_TIMESTAMP, 1); 3009 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); 3010 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); 3011 3012 dma_addr = rx_queue->rxd.buf.dma_addr; 3013 3014 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", 3015 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); 3016 3017 for (i = 0; i < entries; ++i) { 3018 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); 3019 dma_addr += EFX_BUF_SIZE; 3020 } 3021 3022 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); 3023 3024 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, 3025 NULL, 0, NULL); 3026 if (rc) 3027 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", 3028 efx_rx_queue_index(rx_queue)); 3029 } 3030 3031 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) 3032 { 3033 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); 3034 MCDI_DECLARE_BUF_ERR(outbuf); 3035 struct efx_nic *efx = rx_queue->efx; 3036 size_t outlen; 3037 int rc; 3038 3039 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, 3040 efx_rx_queue_index(rx_queue)); 3041 3042 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), 3043 outbuf, sizeof(outbuf), &outlen); 3044 3045 if (rc && rc != -EALREADY) 3046 goto fail; 3047 3048 return; 3049 3050 fail: 3051 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, 3052 outbuf, outlen, rc); 3053 } 3054 3055 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) 3056 { 3057 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); 3058 } 3059 3060 /* This creates an entry in the RX descriptor queue */ 3061 static inline void 3062 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 3063 { 3064 struct efx_rx_buffer *rx_buf; 3065 efx_qword_t *rxd; 3066 3067 rxd = efx_rx_desc(rx_queue, index); 3068 rx_buf = efx_rx_buffer(rx_queue, index); 3069 EFX_POPULATE_QWORD_2(*rxd, 3070 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, 3071 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 3072 } 3073 3074 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) 3075 { 3076 struct efx_nic *efx = rx_queue->efx; 3077 unsigned int write_count; 3078 efx_dword_t reg; 3079 3080 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ 3081 write_count = rx_queue->added_count & ~7; 3082 if (rx_queue->notified_count == write_count) 3083 return; 3084 3085 do 3086 efx_ef10_build_rx_desc( 3087 rx_queue, 3088 rx_queue->notified_count & rx_queue->ptr_mask); 3089 while (++rx_queue->notified_count != write_count); 3090 3091 wmb(); 3092 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, 3093 write_count & rx_queue->ptr_mask); 3094 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, 3095 efx_rx_queue_index(rx_queue)); 3096 } 3097 3098 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; 3099 3100 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) 3101 { 3102 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 3103 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3104 efx_qword_t event; 3105 3106 EFX_POPULATE_QWORD_2(event, 3107 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3108 ESF_DZ_EV_DATA, EFX_EF10_REFILL); 3109 3110 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3111 3112 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3113 * already swapped the data to little-endian order. 3114 */ 3115 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3116 sizeof(efx_qword_t)); 3117 3118 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, 3119 inbuf, sizeof(inbuf), 0, 3120 efx_ef10_rx_defer_refill_complete, 0); 3121 } 3122 3123 static void 3124 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, 3125 int rc, efx_dword_t *outbuf, 3126 size_t outlen_actual) 3127 { 3128 /* nothing to do */ 3129 } 3130 3131 static int efx_ef10_ev_probe(struct efx_channel *channel) 3132 { 3133 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, 3134 (channel->eventq_mask + 1) * 3135 sizeof(efx_qword_t), 3136 GFP_KERNEL); 3137 } 3138 3139 static void efx_ef10_ev_fini(struct efx_channel *channel) 3140 { 3141 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); 3142 MCDI_DECLARE_BUF_ERR(outbuf); 3143 struct efx_nic *efx = channel->efx; 3144 size_t outlen; 3145 int rc; 3146 3147 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); 3148 3149 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), 3150 outbuf, sizeof(outbuf), &outlen); 3151 3152 if (rc && rc != -EALREADY) 3153 goto fail; 3154 3155 return; 3156 3157 fail: 3158 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, 3159 outbuf, outlen, rc); 3160 } 3161 3162 static int efx_ef10_ev_init(struct efx_channel *channel) 3163 { 3164 MCDI_DECLARE_BUF(inbuf, 3165 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / 3166 EFX_BUF_SIZE)); 3167 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); 3168 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; 3169 struct efx_nic *efx = channel->efx; 3170 struct efx_ef10_nic_data *nic_data; 3171 size_t inlen, outlen; 3172 unsigned int enabled, implemented; 3173 dma_addr_t dma_addr; 3174 int rc; 3175 int i; 3176 3177 nic_data = efx->nic_data; 3178 3179 /* Fill event queue with all ones (i.e. empty events) */ 3180 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 3181 3182 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); 3183 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); 3184 /* INIT_EVQ expects index in vector table, not absolute */ 3185 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); 3186 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, 3187 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 3188 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); 3189 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); 3190 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, 3191 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 3192 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); 3193 3194 if (nic_data->datapath_caps2 & 3195 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) { 3196 /* Use the new generic approach to specifying event queue 3197 * configuration, requesting lower latency or higher throughput. 3198 * The options that actually get used appear in the output. 3199 */ 3200 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, 3201 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, 3202 INIT_EVQ_V2_IN_FLAG_TYPE, 3203 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); 3204 } else { 3205 bool cut_thru = !(nic_data->datapath_caps & 3206 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); 3207 3208 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, 3209 INIT_EVQ_IN_FLAG_INTERRUPTING, 1, 3210 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 3211 INIT_EVQ_IN_FLAG_TX_MERGE, 1, 3212 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru); 3213 } 3214 3215 dma_addr = channel->eventq.buf.dma_addr; 3216 for (i = 0; i < entries; ++i) { 3217 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); 3218 dma_addr += EFX_BUF_SIZE; 3219 } 3220 3221 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); 3222 3223 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, 3224 outbuf, sizeof(outbuf), &outlen); 3225 3226 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) 3227 netif_dbg(efx, drv, efx->net_dev, 3228 "Channel %d using event queue flags %08x\n", 3229 channel->channel, 3230 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); 3231 3232 /* IRQ return is ignored */ 3233 if (channel->channel || rc) 3234 return rc; 3235 3236 /* Successfully created event queue on channel 0 */ 3237 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 3238 if (rc == -ENOSYS) { 3239 /* GET_WORKAROUNDS was implemented before this workaround, 3240 * thus it must be unavailable in this firmware. 3241 */ 3242 nic_data->workaround_26807 = false; 3243 rc = 0; 3244 } else if (rc) { 3245 goto fail; 3246 } else { 3247 nic_data->workaround_26807 = 3248 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); 3249 3250 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && 3251 !nic_data->workaround_26807) { 3252 unsigned int flags; 3253 3254 rc = efx_mcdi_set_workaround(efx, 3255 MC_CMD_WORKAROUND_BUG26807, 3256 true, &flags); 3257 3258 if (!rc) { 3259 if (flags & 3260 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { 3261 netif_info(efx, drv, efx->net_dev, 3262 "other functions on NIC have been reset\n"); 3263 3264 /* With MCFW v4.6.x and earlier, the 3265 * boot count will have incremented, 3266 * so re-read the warm_boot_count 3267 * value now to ensure this function 3268 * doesn't think it has changed next 3269 * time it checks. 3270 */ 3271 rc = efx_ef10_get_warm_boot_count(efx); 3272 if (rc >= 0) { 3273 nic_data->warm_boot_count = rc; 3274 rc = 0; 3275 } 3276 } 3277 nic_data->workaround_26807 = true; 3278 } else if (rc == -EPERM) { 3279 rc = 0; 3280 } 3281 } 3282 } 3283 3284 if (!rc) 3285 return 0; 3286 3287 fail: 3288 efx_ef10_ev_fini(channel); 3289 return rc; 3290 } 3291 3292 static void efx_ef10_ev_remove(struct efx_channel *channel) 3293 { 3294 efx_nic_free_buffer(channel->efx, &channel->eventq.buf); 3295 } 3296 3297 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, 3298 unsigned int rx_queue_label) 3299 { 3300 struct efx_nic *efx = rx_queue->efx; 3301 3302 netif_info(efx, hw, efx->net_dev, 3303 "rx event arrived on queue %d labeled as queue %u\n", 3304 efx_rx_queue_index(rx_queue), rx_queue_label); 3305 3306 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 3307 } 3308 3309 static void 3310 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, 3311 unsigned int actual, unsigned int expected) 3312 { 3313 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; 3314 struct efx_nic *efx = rx_queue->efx; 3315 3316 netif_info(efx, hw, efx->net_dev, 3317 "dropped %d events (index=%d expected=%d)\n", 3318 dropped, actual, expected); 3319 3320 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 3321 } 3322 3323 /* partially received RX was aborted. clean up. */ 3324 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) 3325 { 3326 unsigned int rx_desc_ptr; 3327 3328 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, 3329 "scattered RX aborted (dropping %u buffers)\n", 3330 rx_queue->scatter_n); 3331 3332 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 3333 3334 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, 3335 0, EFX_RX_PKT_DISCARD); 3336 3337 rx_queue->removed_count += rx_queue->scatter_n; 3338 rx_queue->scatter_n = 0; 3339 rx_queue->scatter_len = 0; 3340 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; 3341 } 3342 3343 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, 3344 unsigned int n_packets, 3345 unsigned int rx_encap_hdr, 3346 unsigned int rx_l3_class, 3347 unsigned int rx_l4_class, 3348 const efx_qword_t *event) 3349 { 3350 struct efx_nic *efx = channel->efx; 3351 bool handled = false; 3352 3353 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { 3354 if (!(efx->net_dev->features & NETIF_F_RXALL)) { 3355 if (!efx->loopback_selftest) 3356 channel->n_rx_eth_crc_err += n_packets; 3357 return EFX_RX_PKT_DISCARD; 3358 } 3359 handled = true; 3360 } 3361 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { 3362 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3363 rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3364 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3365 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3366 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3367 netdev_WARN(efx->net_dev, 3368 "invalid class for RX_IPCKSUM_ERR: event=" 3369 EFX_QWORD_FMT "\n", 3370 EFX_QWORD_VAL(*event)); 3371 if (!efx->loopback_selftest) 3372 *(rx_encap_hdr ? 3373 &channel->n_rx_outer_ip_hdr_chksum_err : 3374 &channel->n_rx_ip_hdr_chksum_err) += n_packets; 3375 return 0; 3376 } 3377 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 3378 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3379 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3380 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3381 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 3382 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) 3383 netdev_WARN(efx->net_dev, 3384 "invalid class for RX_TCPUDP_CKSUM_ERR: event=" 3385 EFX_QWORD_FMT "\n", 3386 EFX_QWORD_VAL(*event)); 3387 if (!efx->loopback_selftest) 3388 *(rx_encap_hdr ? 3389 &channel->n_rx_outer_tcp_udp_chksum_err : 3390 &channel->n_rx_tcp_udp_chksum_err) += n_packets; 3391 return 0; 3392 } 3393 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { 3394 if (unlikely(!rx_encap_hdr)) 3395 netdev_WARN(efx->net_dev, 3396 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" 3397 EFX_QWORD_FMT "\n", 3398 EFX_QWORD_VAL(*event)); 3399 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3400 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3401 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3402 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3403 netdev_WARN(efx->net_dev, 3404 "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" 3405 EFX_QWORD_FMT "\n", 3406 EFX_QWORD_VAL(*event)); 3407 if (!efx->loopback_selftest) 3408 channel->n_rx_inner_ip_hdr_chksum_err += n_packets; 3409 return 0; 3410 } 3411 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { 3412 if (unlikely(!rx_encap_hdr)) 3413 netdev_WARN(efx->net_dev, 3414 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3415 EFX_QWORD_FMT "\n", 3416 EFX_QWORD_VAL(*event)); 3417 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3418 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3419 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 3420 rx_l4_class != ESE_FZ_L4_CLASS_UDP))) 3421 netdev_WARN(efx->net_dev, 3422 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3423 EFX_QWORD_FMT "\n", 3424 EFX_QWORD_VAL(*event)); 3425 if (!efx->loopback_selftest) 3426 channel->n_rx_inner_tcp_udp_chksum_err += n_packets; 3427 return 0; 3428 } 3429 3430 WARN_ON(!handled); /* No error bits were recognised */ 3431 return 0; 3432 } 3433 3434 static int efx_ef10_handle_rx_event(struct efx_channel *channel, 3435 const efx_qword_t *event) 3436 { 3437 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; 3438 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; 3439 unsigned int n_descs, n_packets, i; 3440 struct efx_nic *efx = channel->efx; 3441 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3442 struct efx_rx_queue *rx_queue; 3443 efx_qword_t errors; 3444 bool rx_cont; 3445 u16 flags = 0; 3446 3447 if (unlikely(READ_ONCE(efx->reset_pending))) 3448 return 0; 3449 3450 /* Basic packet information */ 3451 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); 3452 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); 3453 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); 3454 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); 3455 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); 3456 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); 3457 rx_encap_hdr = 3458 nic_data->datapath_caps & 3459 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? 3460 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : 3461 ESE_EZ_ENCAP_HDR_NONE; 3462 3463 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) 3464 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" 3465 EFX_QWORD_FMT "\n", 3466 EFX_QWORD_VAL(*event)); 3467 3468 rx_queue = efx_channel_get_rx_queue(channel); 3469 3470 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) 3471 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); 3472 3473 n_descs = ((next_ptr_lbits - rx_queue->removed_count) & 3474 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 3475 3476 if (n_descs != rx_queue->scatter_n + 1) { 3477 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3478 3479 /* detect rx abort */ 3480 if (unlikely(n_descs == rx_queue->scatter_n)) { 3481 if (rx_queue->scatter_n == 0 || rx_bytes != 0) 3482 netdev_WARN(efx->net_dev, 3483 "invalid RX abort: scatter_n=%u event=" 3484 EFX_QWORD_FMT "\n", 3485 rx_queue->scatter_n, 3486 EFX_QWORD_VAL(*event)); 3487 efx_ef10_handle_rx_abort(rx_queue); 3488 return 0; 3489 } 3490 3491 /* Check that RX completion merging is valid, i.e. 3492 * the current firmware supports it and this is a 3493 * non-scattered packet. 3494 */ 3495 if (!(nic_data->datapath_caps & 3496 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || 3497 rx_queue->scatter_n != 0 || rx_cont) { 3498 efx_ef10_handle_rx_bad_lbits( 3499 rx_queue, next_ptr_lbits, 3500 (rx_queue->removed_count + 3501 rx_queue->scatter_n + 1) & 3502 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 3503 return 0; 3504 } 3505 3506 /* Merged completion for multiple non-scattered packets */ 3507 rx_queue->scatter_n = 1; 3508 rx_queue->scatter_len = 0; 3509 n_packets = n_descs; 3510 ++channel->n_rx_merge_events; 3511 channel->n_rx_merge_packets += n_packets; 3512 flags |= EFX_RX_PKT_PREFIX_LEN; 3513 } else { 3514 ++rx_queue->scatter_n; 3515 rx_queue->scatter_len += rx_bytes; 3516 if (rx_cont) 3517 return 0; 3518 n_packets = 1; 3519 } 3520 3521 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, 3522 ESF_DZ_RX_IPCKSUM_ERR, 1, 3523 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, 3524 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, 3525 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); 3526 EFX_AND_QWORD(errors, *event, errors); 3527 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { 3528 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, 3529 rx_encap_hdr, 3530 rx_l3_class, rx_l4_class, 3531 event); 3532 } else { 3533 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || 3534 rx_l4_class == ESE_FZ_L4_CLASS_UDP; 3535 3536 switch (rx_encap_hdr) { 3537 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ 3538 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ 3539 if (tcpudp) 3540 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ 3541 break; 3542 case ESE_EZ_ENCAP_HDR_GRE: 3543 case ESE_EZ_ENCAP_HDR_NONE: 3544 if (tcpudp) 3545 flags |= EFX_RX_PKT_CSUMMED; 3546 break; 3547 default: 3548 netdev_WARN(efx->net_dev, 3549 "unknown encapsulation type: event=" 3550 EFX_QWORD_FMT "\n", 3551 EFX_QWORD_VAL(*event)); 3552 } 3553 } 3554 3555 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) 3556 flags |= EFX_RX_PKT_TCP; 3557 3558 channel->irq_mod_score += 2 * n_packets; 3559 3560 /* Handle received packet(s) */ 3561 for (i = 0; i < n_packets; i++) { 3562 efx_rx_packet(rx_queue, 3563 rx_queue->removed_count & rx_queue->ptr_mask, 3564 rx_queue->scatter_n, rx_queue->scatter_len, 3565 flags); 3566 rx_queue->removed_count += rx_queue->scatter_n; 3567 } 3568 3569 rx_queue->scatter_n = 0; 3570 rx_queue->scatter_len = 0; 3571 3572 return n_packets; 3573 } 3574 3575 static int 3576 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 3577 { 3578 struct efx_nic *efx = channel->efx; 3579 struct efx_tx_queue *tx_queue; 3580 unsigned int tx_ev_desc_ptr; 3581 unsigned int tx_ev_q_label; 3582 int tx_descs = 0; 3583 3584 if (unlikely(READ_ONCE(efx->reset_pending))) 3585 return 0; 3586 3587 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) 3588 return 0; 3589 3590 /* Transmit completion */ 3591 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); 3592 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); 3593 tx_queue = efx_channel_get_tx_queue(channel, 3594 tx_ev_q_label % EFX_TXQ_TYPES); 3595 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) & 3596 tx_queue->ptr_mask); 3597 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); 3598 3599 return tx_descs; 3600 } 3601 3602 static void 3603 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 3604 { 3605 struct efx_nic *efx = channel->efx; 3606 int subcode; 3607 3608 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); 3609 3610 switch (subcode) { 3611 case ESE_DZ_DRV_TIMER_EV: 3612 case ESE_DZ_DRV_WAKE_UP_EV: 3613 break; 3614 case ESE_DZ_DRV_START_UP_EV: 3615 /* event queue init complete. ok. */ 3616 break; 3617 default: 3618 netif_err(efx, hw, efx->net_dev, 3619 "channel %d unknown driver event type %d" 3620 " (data " EFX_QWORD_FMT ")\n", 3621 channel->channel, subcode, 3622 EFX_QWORD_VAL(*event)); 3623 3624 } 3625 } 3626 3627 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, 3628 efx_qword_t *event) 3629 { 3630 struct efx_nic *efx = channel->efx; 3631 u32 subcode; 3632 3633 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); 3634 3635 switch (subcode) { 3636 case EFX_EF10_TEST: 3637 channel->event_test_cpu = raw_smp_processor_id(); 3638 break; 3639 case EFX_EF10_REFILL: 3640 /* The queue must be empty, so we won't receive any rx 3641 * events, so efx_process_channel() won't refill the 3642 * queue. Refill it here 3643 */ 3644 efx_fast_push_rx_descriptors(&channel->rx_queue, true); 3645 break; 3646 default: 3647 netif_err(efx, hw, efx->net_dev, 3648 "channel %d unknown driver event type %u" 3649 " (data " EFX_QWORD_FMT ")\n", 3650 channel->channel, (unsigned) subcode, 3651 EFX_QWORD_VAL(*event)); 3652 } 3653 } 3654 3655 static int efx_ef10_ev_process(struct efx_channel *channel, int quota) 3656 { 3657 struct efx_nic *efx = channel->efx; 3658 efx_qword_t event, *p_event; 3659 unsigned int read_ptr; 3660 int ev_code; 3661 int tx_descs = 0; 3662 int spent = 0; 3663 3664 if (quota <= 0) 3665 return spent; 3666 3667 read_ptr = channel->eventq_read_ptr; 3668 3669 for (;;) { 3670 p_event = efx_event(channel, read_ptr); 3671 event = *p_event; 3672 3673 if (!efx_event_present(&event)) 3674 break; 3675 3676 EFX_SET_QWORD(*p_event); 3677 3678 ++read_ptr; 3679 3680 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); 3681 3682 netif_vdbg(efx, drv, efx->net_dev, 3683 "processing event on %d " EFX_QWORD_FMT "\n", 3684 channel->channel, EFX_QWORD_VAL(event)); 3685 3686 switch (ev_code) { 3687 case ESE_DZ_EV_CODE_MCDI_EV: 3688 efx_mcdi_process_event(channel, &event); 3689 break; 3690 case ESE_DZ_EV_CODE_RX_EV: 3691 spent += efx_ef10_handle_rx_event(channel, &event); 3692 if (spent >= quota) { 3693 /* XXX can we split a merged event to 3694 * avoid going over-quota? 3695 */ 3696 spent = quota; 3697 goto out; 3698 } 3699 break; 3700 case ESE_DZ_EV_CODE_TX_EV: 3701 tx_descs += efx_ef10_handle_tx_event(channel, &event); 3702 if (tx_descs > efx->txq_entries) { 3703 spent = quota; 3704 goto out; 3705 } else if (++spent == quota) { 3706 goto out; 3707 } 3708 break; 3709 case ESE_DZ_EV_CODE_DRIVER_EV: 3710 efx_ef10_handle_driver_event(channel, &event); 3711 if (++spent == quota) 3712 goto out; 3713 break; 3714 case EFX_EF10_DRVGEN_EV: 3715 efx_ef10_handle_driver_generated_event(channel, &event); 3716 break; 3717 default: 3718 netif_err(efx, hw, efx->net_dev, 3719 "channel %d unknown event type %d" 3720 " (data " EFX_QWORD_FMT ")\n", 3721 channel->channel, ev_code, 3722 EFX_QWORD_VAL(event)); 3723 } 3724 } 3725 3726 out: 3727 channel->eventq_read_ptr = read_ptr; 3728 return spent; 3729 } 3730 3731 static void efx_ef10_ev_read_ack(struct efx_channel *channel) 3732 { 3733 struct efx_nic *efx = channel->efx; 3734 efx_dword_t rptr; 3735 3736 if (EFX_EF10_WORKAROUND_35388(efx)) { 3737 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < 3738 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 3739 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > 3740 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 3741 3742 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3743 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 3744 ERF_DD_EVQ_IND_RPTR, 3745 (channel->eventq_read_ptr & 3746 channel->eventq_mask) >> 3747 ERF_DD_EVQ_IND_RPTR_WIDTH); 3748 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3749 channel->channel); 3750 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3751 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 3752 ERF_DD_EVQ_IND_RPTR, 3753 channel->eventq_read_ptr & 3754 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 3755 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3756 channel->channel); 3757 } else { 3758 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, 3759 channel->eventq_read_ptr & 3760 channel->eventq_mask); 3761 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); 3762 } 3763 } 3764 3765 static void efx_ef10_ev_test_generate(struct efx_channel *channel) 3766 { 3767 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3768 struct efx_nic *efx = channel->efx; 3769 efx_qword_t event; 3770 int rc; 3771 3772 EFX_POPULATE_QWORD_2(event, 3773 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3774 ESF_DZ_EV_DATA, EFX_EF10_TEST); 3775 3776 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3777 3778 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3779 * already swapped the data to little-endian order. 3780 */ 3781 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3782 sizeof(efx_qword_t)); 3783 3784 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), 3785 NULL, 0, NULL); 3786 if (rc != 0) 3787 goto fail; 3788 3789 return; 3790 3791 fail: 3792 WARN_ON(true); 3793 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 3794 } 3795 3796 void efx_ef10_handle_drain_event(struct efx_nic *efx) 3797 { 3798 if (atomic_dec_and_test(&efx->active_queues)) 3799 wake_up(&efx->flush_wq); 3800 3801 WARN_ON(atomic_read(&efx->active_queues) < 0); 3802 } 3803 3804 static int efx_ef10_fini_dmaq(struct efx_nic *efx) 3805 { 3806 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3807 struct efx_channel *channel; 3808 struct efx_tx_queue *tx_queue; 3809 struct efx_rx_queue *rx_queue; 3810 int pending; 3811 3812 /* If the MC has just rebooted, the TX/RX queues will have already been 3813 * torn down, but efx->active_queues needs to be set to zero. 3814 */ 3815 if (nic_data->must_realloc_vis) { 3816 atomic_set(&efx->active_queues, 0); 3817 return 0; 3818 } 3819 3820 /* Do not attempt to write to the NIC during EEH recovery */ 3821 if (efx->state != STATE_RECOVERY) { 3822 efx_for_each_channel(channel, efx) { 3823 efx_for_each_channel_rx_queue(rx_queue, channel) 3824 efx_ef10_rx_fini(rx_queue); 3825 efx_for_each_channel_tx_queue(tx_queue, channel) 3826 efx_ef10_tx_fini(tx_queue); 3827 } 3828 3829 wait_event_timeout(efx->flush_wq, 3830 atomic_read(&efx->active_queues) == 0, 3831 msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); 3832 pending = atomic_read(&efx->active_queues); 3833 if (pending) { 3834 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", 3835 pending); 3836 return -ETIMEDOUT; 3837 } 3838 } 3839 3840 return 0; 3841 } 3842 3843 static void efx_ef10_prepare_flr(struct efx_nic *efx) 3844 { 3845 atomic_set(&efx->active_queues, 0); 3846 } 3847 3848 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, 3849 const struct efx_filter_spec *right) 3850 { 3851 if ((left->match_flags ^ right->match_flags) | 3852 ((left->flags ^ right->flags) & 3853 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) 3854 return false; 3855 3856 return memcmp(&left->outer_vid, &right->outer_vid, 3857 sizeof(struct efx_filter_spec) - 3858 offsetof(struct efx_filter_spec, outer_vid)) == 0; 3859 } 3860 3861 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) 3862 { 3863 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); 3864 return jhash2((const u32 *)&spec->outer_vid, 3865 (sizeof(struct efx_filter_spec) - 3866 offsetof(struct efx_filter_spec, outer_vid)) / 4, 3867 0); 3868 /* XXX should we randomise the initval? */ 3869 } 3870 3871 /* Decide whether a filter should be exclusive or else should allow 3872 * delivery to additional recipients. Currently we decide that 3873 * filters for specific local unicast MAC and IP addresses are 3874 * exclusive. 3875 */ 3876 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) 3877 { 3878 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && 3879 !is_multicast_ether_addr(spec->loc_mac)) 3880 return true; 3881 3882 if ((spec->match_flags & 3883 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 3884 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 3885 if (spec->ether_type == htons(ETH_P_IP) && 3886 !ipv4_is_multicast(spec->loc_host[0])) 3887 return true; 3888 if (spec->ether_type == htons(ETH_P_IPV6) && 3889 ((const u8 *)spec->loc_host)[0] != 0xff) 3890 return true; 3891 } 3892 3893 return false; 3894 } 3895 3896 static struct efx_filter_spec * 3897 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, 3898 unsigned int filter_idx) 3899 { 3900 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & 3901 ~EFX_EF10_FILTER_FLAGS); 3902 } 3903 3904 static unsigned int 3905 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, 3906 unsigned int filter_idx) 3907 { 3908 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; 3909 } 3910 3911 static void 3912 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, 3913 unsigned int filter_idx, 3914 const struct efx_filter_spec *spec, 3915 unsigned int flags) 3916 { 3917 table->entry[filter_idx].spec = (unsigned long)spec | flags; 3918 } 3919 3920 static void 3921 efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, 3922 const struct efx_filter_spec *spec, 3923 efx_dword_t *inbuf) 3924 { 3925 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 3926 u32 match_fields = 0, uc_match, mc_match; 3927 3928 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 3929 efx_ef10_filter_is_exclusive(spec) ? 3930 MC_CMD_FILTER_OP_IN_OP_INSERT : 3931 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); 3932 3933 /* Convert match flags and values. Unlike almost 3934 * everything else in MCDI, these fields are in 3935 * network byte order. 3936 */ 3937 #define COPY_VALUE(value, mcdi_field) \ 3938 do { \ 3939 match_fields |= \ 3940 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ 3941 mcdi_field ## _LBN; \ 3942 BUILD_BUG_ON( \ 3943 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ 3944 sizeof(value)); \ 3945 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ 3946 &value, sizeof(value)); \ 3947 } while (0) 3948 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ 3949 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ 3950 COPY_VALUE(spec->gen_field, mcdi_field); \ 3951 } 3952 /* Handle encap filters first. They will always be mismatch 3953 * (unknown UC or MC) filters 3954 */ 3955 if (encap_type) { 3956 /* ether_type and outer_ip_proto need to be variables 3957 * because COPY_VALUE wants to memcpy them 3958 */ 3959 __be16 ether_type = 3960 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? 3961 ETH_P_IPV6 : ETH_P_IP); 3962 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; 3963 u8 outer_ip_proto; 3964 3965 switch (encap_type & EFX_ENCAP_TYPES_MASK) { 3966 case EFX_ENCAP_TYPE_VXLAN: 3967 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; 3968 /* fallthrough */ 3969 case EFX_ENCAP_TYPE_GENEVE: 3970 COPY_VALUE(ether_type, ETHER_TYPE); 3971 outer_ip_proto = IPPROTO_UDP; 3972 COPY_VALUE(outer_ip_proto, IP_PROTO); 3973 /* We always need to set the type field, even 3974 * though we're not matching on the TNI. 3975 */ 3976 MCDI_POPULATE_DWORD_1(inbuf, 3977 FILTER_OP_EXT_IN_VNI_OR_VSID, 3978 FILTER_OP_EXT_IN_VNI_TYPE, 3979 vni_type); 3980 break; 3981 case EFX_ENCAP_TYPE_NVGRE: 3982 COPY_VALUE(ether_type, ETHER_TYPE); 3983 outer_ip_proto = IPPROTO_GRE; 3984 COPY_VALUE(outer_ip_proto, IP_PROTO); 3985 break; 3986 default: 3987 WARN_ON(1); 3988 } 3989 3990 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 3991 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 3992 } else { 3993 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 3994 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 3995 } 3996 3997 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) 3998 match_fields |= 3999 is_multicast_ether_addr(spec->loc_mac) ? 4000 1 << mc_match : 4001 1 << uc_match; 4002 COPY_FIELD(REM_HOST, rem_host, SRC_IP); 4003 COPY_FIELD(LOC_HOST, loc_host, DST_IP); 4004 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); 4005 COPY_FIELD(REM_PORT, rem_port, SRC_PORT); 4006 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); 4007 COPY_FIELD(LOC_PORT, loc_port, DST_PORT); 4008 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); 4009 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); 4010 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); 4011 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); 4012 #undef COPY_FIELD 4013 #undef COPY_VALUE 4014 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, 4015 match_fields); 4016 } 4017 4018 static void efx_ef10_filter_push_prep(struct efx_nic *efx, 4019 const struct efx_filter_spec *spec, 4020 efx_dword_t *inbuf, u64 handle, 4021 bool replacing) 4022 { 4023 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4024 u32 flags = spec->flags; 4025 4026 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); 4027 4028 /* Remove RSS flag if we don't have an RSS context. */ 4029 if (flags & EFX_FILTER_FLAG_RX_RSS && 4030 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT && 4031 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) 4032 flags &= ~EFX_FILTER_FLAG_RX_RSS; 4033 4034 if (replacing) { 4035 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4036 MC_CMD_FILTER_OP_IN_OP_REPLACE); 4037 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); 4038 } else { 4039 efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); 4040 } 4041 4042 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); 4043 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, 4044 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 4045 MC_CMD_FILTER_OP_IN_RX_DEST_DROP : 4046 MC_CMD_FILTER_OP_IN_RX_DEST_HOST); 4047 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); 4048 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, 4049 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); 4050 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, 4051 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 4052 0 : spec->dmaq_id); 4053 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 4054 (flags & EFX_FILTER_FLAG_RX_RSS) ? 4055 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 4056 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 4057 if (flags & EFX_FILTER_FLAG_RX_RSS) 4058 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, 4059 spec->rss_context != 4060 EFX_FILTER_RSS_CONTEXT_DEFAULT ? 4061 spec->rss_context : nic_data->rx_rss_context); 4062 } 4063 4064 static int efx_ef10_filter_push(struct efx_nic *efx, 4065 const struct efx_filter_spec *spec, 4066 u64 *handle, bool replacing) 4067 { 4068 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4069 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); 4070 int rc; 4071 4072 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); 4073 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 4074 outbuf, sizeof(outbuf), NULL); 4075 if (rc == 0) 4076 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 4077 if (rc == -ENOSPC) 4078 rc = -EBUSY; /* to match efx_farch_filter_insert() */ 4079 return rc; 4080 } 4081 4082 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) 4083 { 4084 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 4085 unsigned int match_flags = spec->match_flags; 4086 unsigned int uc_match, mc_match; 4087 u32 mcdi_flags = 0; 4088 4089 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ 4090 unsigned int old_match_flags = match_flags; \ 4091 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ 4092 if (match_flags != old_match_flags) \ 4093 mcdi_flags |= \ 4094 (1 << ((encap) ? \ 4095 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ 4096 mcdi_field ## _LBN : \ 4097 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ 4098 mcdi_field ## _LBN)); \ 4099 } 4100 /* inner or outer based on encap type */ 4101 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); 4102 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); 4103 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); 4104 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); 4105 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); 4106 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); 4107 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); 4108 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); 4109 /* always outer */ 4110 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); 4111 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); 4112 #undef MAP_FILTER_TO_MCDI_FLAG 4113 4114 /* special handling for encap type, and mismatch */ 4115 if (encap_type) { 4116 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; 4117 mcdi_flags |= 4118 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 4119 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 4120 4121 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 4122 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 4123 } else { 4124 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 4125 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 4126 } 4127 4128 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { 4129 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; 4130 mcdi_flags |= 4131 is_multicast_ether_addr(spec->loc_mac) ? 4132 1 << mc_match : 4133 1 << uc_match; 4134 } 4135 4136 /* Did we map them all? */ 4137 WARN_ON_ONCE(match_flags); 4138 4139 return mcdi_flags; 4140 } 4141 4142 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, 4143 const struct efx_filter_spec *spec) 4144 { 4145 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); 4146 unsigned int match_pri; 4147 4148 for (match_pri = 0; 4149 match_pri < table->rx_match_count; 4150 match_pri++) 4151 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) 4152 return match_pri; 4153 4154 return -EPROTONOSUPPORT; 4155 } 4156 4157 static s32 efx_ef10_filter_insert(struct efx_nic *efx, 4158 struct efx_filter_spec *spec, 4159 bool replace_equal) 4160 { 4161 struct efx_ef10_filter_table *table = efx->filter_state; 4162 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4163 struct efx_filter_spec *saved_spec; 4164 unsigned int match_pri, hash; 4165 unsigned int priv_flags; 4166 bool replacing = false; 4167 int ins_index = -1; 4168 DEFINE_WAIT(wait); 4169 bool is_mc_recip; 4170 s32 rc; 4171 4172 /* For now, only support RX filters */ 4173 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != 4174 EFX_FILTER_FLAG_RX) 4175 return -EINVAL; 4176 4177 rc = efx_ef10_filter_pri(table, spec); 4178 if (rc < 0) 4179 return rc; 4180 match_pri = rc; 4181 4182 hash = efx_ef10_filter_hash(spec); 4183 is_mc_recip = efx_filter_is_mc_recipient(spec); 4184 if (is_mc_recip) 4185 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4186 4187 /* Find any existing filters with the same match tuple or 4188 * else a free slot to insert at. If any of them are busy, 4189 * we have to wait and retry. 4190 */ 4191 for (;;) { 4192 unsigned int depth = 1; 4193 unsigned int i; 4194 4195 spin_lock_bh(&efx->filter_lock); 4196 4197 for (;;) { 4198 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4199 saved_spec = efx_ef10_filter_entry_spec(table, i); 4200 4201 if (!saved_spec) { 4202 if (ins_index < 0) 4203 ins_index = i; 4204 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 4205 if (table->entry[i].spec & 4206 EFX_EF10_FILTER_FLAG_BUSY) 4207 break; 4208 if (spec->priority < saved_spec->priority && 4209 spec->priority != EFX_FILTER_PRI_AUTO) { 4210 rc = -EPERM; 4211 goto out_unlock; 4212 } 4213 if (!is_mc_recip) { 4214 /* This is the only one */ 4215 if (spec->priority == 4216 saved_spec->priority && 4217 !replace_equal) { 4218 rc = -EEXIST; 4219 goto out_unlock; 4220 } 4221 ins_index = i; 4222 goto found; 4223 } else if (spec->priority > 4224 saved_spec->priority || 4225 (spec->priority == 4226 saved_spec->priority && 4227 replace_equal)) { 4228 if (ins_index < 0) 4229 ins_index = i; 4230 else 4231 __set_bit(depth, mc_rem_map); 4232 } 4233 } 4234 4235 /* Once we reach the maximum search depth, use 4236 * the first suitable slot or return -EBUSY if 4237 * there was none 4238 */ 4239 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { 4240 if (ins_index < 0) { 4241 rc = -EBUSY; 4242 goto out_unlock; 4243 } 4244 goto found; 4245 } 4246 4247 ++depth; 4248 } 4249 4250 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); 4251 spin_unlock_bh(&efx->filter_lock); 4252 schedule(); 4253 } 4254 4255 found: 4256 /* Create a software table entry if necessary, and mark it 4257 * busy. We might yet fail to insert, but any attempt to 4258 * insert a conflicting filter while we're waiting for the 4259 * firmware must find the busy entry. 4260 */ 4261 saved_spec = efx_ef10_filter_entry_spec(table, ins_index); 4262 if (saved_spec) { 4263 if (spec->priority == EFX_FILTER_PRI_AUTO && 4264 saved_spec->priority >= EFX_FILTER_PRI_AUTO) { 4265 /* Just make sure it won't be removed */ 4266 if (saved_spec->priority > EFX_FILTER_PRI_AUTO) 4267 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 4268 table->entry[ins_index].spec &= 4269 ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 4270 rc = ins_index; 4271 goto out_unlock; 4272 } 4273 replacing = true; 4274 priv_flags = efx_ef10_filter_entry_flags(table, ins_index); 4275 } else { 4276 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 4277 if (!saved_spec) { 4278 rc = -ENOMEM; 4279 goto out_unlock; 4280 } 4281 *saved_spec = *spec; 4282 priv_flags = 0; 4283 } 4284 efx_ef10_filter_set_entry(table, ins_index, saved_spec, 4285 priv_flags | EFX_EF10_FILTER_FLAG_BUSY); 4286 4287 /* Mark lower-priority multicast recipients busy prior to removal */ 4288 if (is_mc_recip) { 4289 unsigned int depth, i; 4290 4291 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 4292 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4293 if (test_bit(depth, mc_rem_map)) 4294 table->entry[i].spec |= 4295 EFX_EF10_FILTER_FLAG_BUSY; 4296 } 4297 } 4298 4299 spin_unlock_bh(&efx->filter_lock); 4300 4301 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, 4302 replacing); 4303 4304 /* Finalise the software table entry */ 4305 spin_lock_bh(&efx->filter_lock); 4306 if (rc == 0) { 4307 if (replacing) { 4308 /* Update the fields that may differ */ 4309 if (saved_spec->priority == EFX_FILTER_PRI_AUTO) 4310 saved_spec->flags |= 4311 EFX_FILTER_FLAG_RX_OVER_AUTO; 4312 saved_spec->priority = spec->priority; 4313 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; 4314 saved_spec->flags |= spec->flags; 4315 saved_spec->rss_context = spec->rss_context; 4316 saved_spec->dmaq_id = spec->dmaq_id; 4317 } 4318 } else if (!replacing) { 4319 kfree(saved_spec); 4320 saved_spec = NULL; 4321 } 4322 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); 4323 4324 /* Remove and finalise entries for lower-priority multicast 4325 * recipients 4326 */ 4327 if (is_mc_recip) { 4328 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4329 unsigned int depth, i; 4330 4331 memset(inbuf, 0, sizeof(inbuf)); 4332 4333 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 4334 if (!test_bit(depth, mc_rem_map)) 4335 continue; 4336 4337 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4338 saved_spec = efx_ef10_filter_entry_spec(table, i); 4339 priv_flags = efx_ef10_filter_entry_flags(table, i); 4340 4341 if (rc == 0) { 4342 spin_unlock_bh(&efx->filter_lock); 4343 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4344 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 4345 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4346 table->entry[i].handle); 4347 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, 4348 inbuf, sizeof(inbuf), 4349 NULL, 0, NULL); 4350 spin_lock_bh(&efx->filter_lock); 4351 } 4352 4353 if (rc == 0) { 4354 kfree(saved_spec); 4355 saved_spec = NULL; 4356 priv_flags = 0; 4357 } else { 4358 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; 4359 } 4360 efx_ef10_filter_set_entry(table, i, saved_spec, 4361 priv_flags); 4362 } 4363 } 4364 4365 /* If successful, return the inserted filter ID */ 4366 if (rc == 0) 4367 rc = efx_ef10_make_filter_id(match_pri, ins_index); 4368 4369 wake_up_all(&table->waitq); 4370 out_unlock: 4371 spin_unlock_bh(&efx->filter_lock); 4372 finish_wait(&table->waitq, &wait); 4373 return rc; 4374 } 4375 4376 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 4377 { 4378 /* no need to do anything here on EF10 */ 4379 } 4380 4381 /* Remove a filter. 4382 * If !by_index, remove by ID 4383 * If by_index, remove by index 4384 * Filter ID may come from userland and must be range-checked. 4385 */ 4386 static int efx_ef10_filter_remove_internal(struct efx_nic *efx, 4387 unsigned int priority_mask, 4388 u32 filter_id, bool by_index) 4389 { 4390 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); 4391 struct efx_ef10_filter_table *table = efx->filter_state; 4392 MCDI_DECLARE_BUF(inbuf, 4393 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 4394 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 4395 struct efx_filter_spec *spec; 4396 DEFINE_WAIT(wait); 4397 int rc; 4398 4399 /* Find the software table entry and mark it busy. Don't 4400 * remove it yet; any attempt to update while we're waiting 4401 * for the firmware must find the busy entry. 4402 */ 4403 for (;;) { 4404 spin_lock_bh(&efx->filter_lock); 4405 if (!(table->entry[filter_idx].spec & 4406 EFX_EF10_FILTER_FLAG_BUSY)) 4407 break; 4408 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); 4409 spin_unlock_bh(&efx->filter_lock); 4410 schedule(); 4411 } 4412 4413 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4414 if (!spec || 4415 (!by_index && 4416 efx_ef10_filter_pri(table, spec) != 4417 efx_ef10_filter_get_unsafe_pri(filter_id))) { 4418 rc = -ENOENT; 4419 goto out_unlock; 4420 } 4421 4422 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && 4423 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { 4424 /* Just remove flags */ 4425 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; 4426 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 4427 rc = 0; 4428 goto out_unlock; 4429 } 4430 4431 if (!(priority_mask & (1U << spec->priority))) { 4432 rc = -ENOENT; 4433 goto out_unlock; 4434 } 4435 4436 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 4437 spin_unlock_bh(&efx->filter_lock); 4438 4439 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 4440 /* Reset to an automatic filter */ 4441 4442 struct efx_filter_spec new_spec = *spec; 4443 4444 new_spec.priority = EFX_FILTER_PRI_AUTO; 4445 new_spec.flags = (EFX_FILTER_FLAG_RX | 4446 (efx_rss_enabled(efx) ? 4447 EFX_FILTER_FLAG_RX_RSS : 0)); 4448 new_spec.dmaq_id = 0; 4449 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; 4450 rc = efx_ef10_filter_push(efx, &new_spec, 4451 &table->entry[filter_idx].handle, 4452 true); 4453 4454 spin_lock_bh(&efx->filter_lock); 4455 if (rc == 0) 4456 *spec = new_spec; 4457 } else { 4458 /* Really remove the filter */ 4459 4460 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4461 efx_ef10_filter_is_exclusive(spec) ? 4462 MC_CMD_FILTER_OP_IN_OP_REMOVE : 4463 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 4464 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4465 table->entry[filter_idx].handle); 4466 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, 4467 inbuf, sizeof(inbuf), NULL, 0, NULL); 4468 4469 spin_lock_bh(&efx->filter_lock); 4470 if ((rc == 0) || (rc == -ENOENT)) { 4471 /* Filter removed OK or didn't actually exist */ 4472 kfree(spec); 4473 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4474 } else { 4475 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 4476 MC_CMD_FILTER_OP_EXT_IN_LEN, 4477 NULL, 0, rc); 4478 } 4479 } 4480 4481 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; 4482 wake_up_all(&table->waitq); 4483 out_unlock: 4484 spin_unlock_bh(&efx->filter_lock); 4485 finish_wait(&table->waitq, &wait); 4486 return rc; 4487 } 4488 4489 static int efx_ef10_filter_remove_safe(struct efx_nic *efx, 4490 enum efx_filter_priority priority, 4491 u32 filter_id) 4492 { 4493 return efx_ef10_filter_remove_internal(efx, 1U << priority, 4494 filter_id, false); 4495 } 4496 4497 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, 4498 enum efx_filter_priority priority, 4499 u32 filter_id) 4500 { 4501 if (filter_id == EFX_EF10_FILTER_ID_INVALID) 4502 return; 4503 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true); 4504 } 4505 4506 static int efx_ef10_filter_get_safe(struct efx_nic *efx, 4507 enum efx_filter_priority priority, 4508 u32 filter_id, struct efx_filter_spec *spec) 4509 { 4510 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); 4511 struct efx_ef10_filter_table *table = efx->filter_state; 4512 const struct efx_filter_spec *saved_spec; 4513 int rc; 4514 4515 spin_lock_bh(&efx->filter_lock); 4516 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); 4517 if (saved_spec && saved_spec->priority == priority && 4518 efx_ef10_filter_pri(table, saved_spec) == 4519 efx_ef10_filter_get_unsafe_pri(filter_id)) { 4520 *spec = *saved_spec; 4521 rc = 0; 4522 } else { 4523 rc = -ENOENT; 4524 } 4525 spin_unlock_bh(&efx->filter_lock); 4526 return rc; 4527 } 4528 4529 static int efx_ef10_filter_clear_rx(struct efx_nic *efx, 4530 enum efx_filter_priority priority) 4531 { 4532 unsigned int priority_mask; 4533 unsigned int i; 4534 int rc; 4535 4536 priority_mask = (((1U << (priority + 1)) - 1) & 4537 ~(1U << EFX_FILTER_PRI_AUTO)); 4538 4539 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 4540 rc = efx_ef10_filter_remove_internal(efx, priority_mask, 4541 i, true); 4542 if (rc && rc != -ENOENT) 4543 return rc; 4544 } 4545 4546 return 0; 4547 } 4548 4549 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, 4550 enum efx_filter_priority priority) 4551 { 4552 struct efx_ef10_filter_table *table = efx->filter_state; 4553 unsigned int filter_idx; 4554 s32 count = 0; 4555 4556 spin_lock_bh(&efx->filter_lock); 4557 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 4558 if (table->entry[filter_idx].spec && 4559 efx_ef10_filter_entry_spec(table, filter_idx)->priority == 4560 priority) 4561 ++count; 4562 } 4563 spin_unlock_bh(&efx->filter_lock); 4564 return count; 4565 } 4566 4567 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) 4568 { 4569 struct efx_ef10_filter_table *table = efx->filter_state; 4570 4571 return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2; 4572 } 4573 4574 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, 4575 enum efx_filter_priority priority, 4576 u32 *buf, u32 size) 4577 { 4578 struct efx_ef10_filter_table *table = efx->filter_state; 4579 struct efx_filter_spec *spec; 4580 unsigned int filter_idx; 4581 s32 count = 0; 4582 4583 spin_lock_bh(&efx->filter_lock); 4584 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 4585 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4586 if (spec && spec->priority == priority) { 4587 if (count == size) { 4588 count = -EMSGSIZE; 4589 break; 4590 } 4591 buf[count++] = 4592 efx_ef10_make_filter_id( 4593 efx_ef10_filter_pri(table, spec), 4594 filter_idx); 4595 } 4596 } 4597 spin_unlock_bh(&efx->filter_lock); 4598 return count; 4599 } 4600 4601 #ifdef CONFIG_RFS_ACCEL 4602 4603 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; 4604 4605 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, 4606 struct efx_filter_spec *spec) 4607 { 4608 struct efx_ef10_filter_table *table = efx->filter_state; 4609 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4610 struct efx_filter_spec *saved_spec; 4611 unsigned int hash, i, depth = 1; 4612 bool replacing = false; 4613 int ins_index = -1; 4614 u64 cookie; 4615 s32 rc; 4616 4617 /* Must be an RX filter without RSS and not for a multicast 4618 * destination address (RFS only works for connected sockets). 4619 * These restrictions allow us to pass only a tiny amount of 4620 * data through to the completion function. 4621 */ 4622 EFX_WARN_ON_PARANOID(spec->flags != 4623 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); 4624 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); 4625 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); 4626 4627 hash = efx_ef10_filter_hash(spec); 4628 4629 spin_lock_bh(&efx->filter_lock); 4630 4631 /* Find any existing filter with the same match tuple or else 4632 * a free slot to insert at. If an existing filter is busy, 4633 * we have to give up. 4634 */ 4635 for (;;) { 4636 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4637 saved_spec = efx_ef10_filter_entry_spec(table, i); 4638 4639 if (!saved_spec) { 4640 if (ins_index < 0) 4641 ins_index = i; 4642 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 4643 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { 4644 rc = -EBUSY; 4645 goto fail_unlock; 4646 } 4647 if (spec->priority < saved_spec->priority) { 4648 rc = -EPERM; 4649 goto fail_unlock; 4650 } 4651 ins_index = i; 4652 break; 4653 } 4654 4655 /* Once we reach the maximum search depth, use the 4656 * first suitable slot or return -EBUSY if there was 4657 * none 4658 */ 4659 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { 4660 if (ins_index < 0) { 4661 rc = -EBUSY; 4662 goto fail_unlock; 4663 } 4664 break; 4665 } 4666 4667 ++depth; 4668 } 4669 4670 /* Create a software table entry if necessary, and mark it 4671 * busy. We might yet fail to insert, but any attempt to 4672 * insert a conflicting filter while we're waiting for the 4673 * firmware must find the busy entry. 4674 */ 4675 saved_spec = efx_ef10_filter_entry_spec(table, ins_index); 4676 if (saved_spec) { 4677 replacing = true; 4678 } else { 4679 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 4680 if (!saved_spec) { 4681 rc = -ENOMEM; 4682 goto fail_unlock; 4683 } 4684 *saved_spec = *spec; 4685 } 4686 efx_ef10_filter_set_entry(table, ins_index, saved_spec, 4687 EFX_EF10_FILTER_FLAG_BUSY); 4688 4689 spin_unlock_bh(&efx->filter_lock); 4690 4691 /* Pack up the variables needed on completion */ 4692 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; 4693 4694 efx_ef10_filter_push_prep(efx, spec, inbuf, 4695 table->entry[ins_index].handle, replacing); 4696 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 4697 MC_CMD_FILTER_OP_OUT_LEN, 4698 efx_ef10_filter_rfs_insert_complete, cookie); 4699 4700 return ins_index; 4701 4702 fail_unlock: 4703 spin_unlock_bh(&efx->filter_lock); 4704 return rc; 4705 } 4706 4707 static void 4708 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, 4709 int rc, efx_dword_t *outbuf, 4710 size_t outlen_actual) 4711 { 4712 struct efx_ef10_filter_table *table = efx->filter_state; 4713 unsigned int ins_index, dmaq_id; 4714 struct efx_filter_spec *spec; 4715 bool replacing; 4716 4717 /* Unpack the cookie */ 4718 replacing = cookie >> 31; 4719 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); 4720 dmaq_id = cookie & 0xffff; 4721 4722 spin_lock_bh(&efx->filter_lock); 4723 spec = efx_ef10_filter_entry_spec(table, ins_index); 4724 if (rc == 0) { 4725 table->entry[ins_index].handle = 4726 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 4727 if (replacing) 4728 spec->dmaq_id = dmaq_id; 4729 } else if (!replacing) { 4730 kfree(spec); 4731 spec = NULL; 4732 } 4733 efx_ef10_filter_set_entry(table, ins_index, spec, 0); 4734 spin_unlock_bh(&efx->filter_lock); 4735 4736 wake_up_all(&table->waitq); 4737 } 4738 4739 static void 4740 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, 4741 unsigned long filter_idx, 4742 int rc, efx_dword_t *outbuf, 4743 size_t outlen_actual); 4744 4745 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 4746 unsigned int filter_idx) 4747 { 4748 struct efx_ef10_filter_table *table = efx->filter_state; 4749 struct efx_filter_spec *spec = 4750 efx_ef10_filter_entry_spec(table, filter_idx); 4751 MCDI_DECLARE_BUF(inbuf, 4752 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 4753 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 4754 4755 if (!spec || 4756 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || 4757 spec->priority != EFX_FILTER_PRI_HINT || 4758 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, 4759 flow_id, filter_idx)) 4760 return false; 4761 4762 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4763 MC_CMD_FILTER_OP_IN_OP_REMOVE); 4764 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4765 table->entry[filter_idx].handle); 4766 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, 4767 efx_ef10_filter_rfs_expire_complete, filter_idx)) 4768 return false; 4769 4770 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 4771 return true; 4772 } 4773 4774 static void 4775 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, 4776 unsigned long filter_idx, 4777 int rc, efx_dword_t *outbuf, 4778 size_t outlen_actual) 4779 { 4780 struct efx_ef10_filter_table *table = efx->filter_state; 4781 struct efx_filter_spec *spec = 4782 efx_ef10_filter_entry_spec(table, filter_idx); 4783 4784 spin_lock_bh(&efx->filter_lock); 4785 if (rc == 0) { 4786 kfree(spec); 4787 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4788 } 4789 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; 4790 wake_up_all(&table->waitq); 4791 spin_unlock_bh(&efx->filter_lock); 4792 } 4793 4794 #endif /* CONFIG_RFS_ACCEL */ 4795 4796 static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) 4797 { 4798 int match_flags = 0; 4799 4800 #define MAP_FLAG(gen_flag, mcdi_field) do { \ 4801 u32 old_mcdi_flags = mcdi_flags; \ 4802 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ 4803 mcdi_field ## _LBN); \ 4804 if (mcdi_flags != old_mcdi_flags) \ 4805 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ 4806 } while (0) 4807 4808 if (encap) { 4809 /* encap filters must specify encap type */ 4810 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 4811 /* and imply ethertype and ip proto */ 4812 mcdi_flags &= 4813 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 4814 mcdi_flags &= 4815 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 4816 /* VLAN tags refer to the outer packet */ 4817 MAP_FLAG(INNER_VID, INNER_VLAN); 4818 MAP_FLAG(OUTER_VID, OUTER_VLAN); 4819 /* everything else refers to the inner packet */ 4820 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); 4821 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); 4822 MAP_FLAG(REM_HOST, IFRM_SRC_IP); 4823 MAP_FLAG(LOC_HOST, IFRM_DST_IP); 4824 MAP_FLAG(REM_MAC, IFRM_SRC_MAC); 4825 MAP_FLAG(REM_PORT, IFRM_SRC_PORT); 4826 MAP_FLAG(LOC_MAC, IFRM_DST_MAC); 4827 MAP_FLAG(LOC_PORT, IFRM_DST_PORT); 4828 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); 4829 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); 4830 } else { 4831 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); 4832 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); 4833 MAP_FLAG(REM_HOST, SRC_IP); 4834 MAP_FLAG(LOC_HOST, DST_IP); 4835 MAP_FLAG(REM_MAC, SRC_MAC); 4836 MAP_FLAG(REM_PORT, SRC_PORT); 4837 MAP_FLAG(LOC_MAC, DST_MAC); 4838 MAP_FLAG(LOC_PORT, DST_PORT); 4839 MAP_FLAG(ETHER_TYPE, ETHER_TYPE); 4840 MAP_FLAG(INNER_VID, INNER_VLAN); 4841 MAP_FLAG(OUTER_VID, OUTER_VLAN); 4842 MAP_FLAG(IP_PROTO, IP_PROTO); 4843 } 4844 #undef MAP_FLAG 4845 4846 /* Did we map them all? */ 4847 if (mcdi_flags) 4848 return -EINVAL; 4849 4850 return match_flags; 4851 } 4852 4853 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) 4854 { 4855 struct efx_ef10_filter_table *table = efx->filter_state; 4856 struct efx_ef10_filter_vlan *vlan, *next_vlan; 4857 4858 /* See comment in efx_ef10_filter_table_remove() */ 4859 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 4860 return; 4861 4862 if (!table) 4863 return; 4864 4865 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) 4866 efx_ef10_filter_del_vlan_internal(efx, vlan); 4867 } 4868 4869 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, 4870 bool encap, 4871 enum efx_filter_match_flags match_flags) 4872 { 4873 unsigned int match_pri; 4874 int mf; 4875 4876 for (match_pri = 0; 4877 match_pri < table->rx_match_count; 4878 match_pri++) { 4879 mf = efx_ef10_filter_match_flags_from_mcdi(encap, 4880 table->rx_match_mcdi_flags[match_pri]); 4881 if (mf == match_flags) 4882 return true; 4883 } 4884 4885 return false; 4886 } 4887 4888 static int 4889 efx_ef10_filter_table_probe_matches(struct efx_nic *efx, 4890 struct efx_ef10_filter_table *table, 4891 bool encap) 4892 { 4893 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); 4894 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); 4895 unsigned int pd_match_pri, pd_match_count; 4896 size_t outlen; 4897 int rc; 4898 4899 /* Find out which RX filter types are supported, and their priorities */ 4900 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, 4901 encap ? 4902 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : 4903 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); 4904 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, 4905 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), 4906 &outlen); 4907 if (rc) 4908 return rc; 4909 4910 pd_match_count = MCDI_VAR_ARRAY_LEN( 4911 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); 4912 4913 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { 4914 u32 mcdi_flags = 4915 MCDI_ARRAY_DWORD( 4916 outbuf, 4917 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, 4918 pd_match_pri); 4919 rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); 4920 if (rc < 0) { 4921 netif_dbg(efx, probe, efx->net_dev, 4922 "%s: fw flags %#x pri %u not supported in driver\n", 4923 __func__, mcdi_flags, pd_match_pri); 4924 } else { 4925 netif_dbg(efx, probe, efx->net_dev, 4926 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", 4927 __func__, mcdi_flags, pd_match_pri, 4928 rc, table->rx_match_count); 4929 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; 4930 table->rx_match_count++; 4931 } 4932 } 4933 4934 return 0; 4935 } 4936 4937 static int efx_ef10_filter_table_probe(struct efx_nic *efx) 4938 { 4939 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4940 struct net_device *net_dev = efx->net_dev; 4941 struct efx_ef10_filter_table *table; 4942 struct efx_ef10_vlan *vlan; 4943 int rc; 4944 4945 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 4946 return -EINVAL; 4947 4948 if (efx->filter_state) /* already probed */ 4949 return 0; 4950 4951 table = kzalloc(sizeof(*table), GFP_KERNEL); 4952 if (!table) 4953 return -ENOMEM; 4954 4955 table->rx_match_count = 0; 4956 rc = efx_ef10_filter_table_probe_matches(efx, table, false); 4957 if (rc) 4958 goto fail; 4959 if (nic_data->datapath_caps & 4960 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 4961 rc = efx_ef10_filter_table_probe_matches(efx, table, true); 4962 if (rc) 4963 goto fail; 4964 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && 4965 !(efx_ef10_filter_match_supported(table, false, 4966 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && 4967 efx_ef10_filter_match_supported(table, false, 4968 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { 4969 netif_info(efx, probe, net_dev, 4970 "VLAN filters are not supported in this firmware variant\n"); 4971 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4972 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4973 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 4974 } 4975 4976 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); 4977 if (!table->entry) { 4978 rc = -ENOMEM; 4979 goto fail; 4980 } 4981 4982 table->mc_promisc_last = false; 4983 table->vlan_filter = 4984 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 4985 INIT_LIST_HEAD(&table->vlan_list); 4986 4987 efx->filter_state = table; 4988 init_waitqueue_head(&table->waitq); 4989 4990 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 4991 rc = efx_ef10_filter_add_vlan(efx, vlan->vid); 4992 if (rc) 4993 goto fail_add_vlan; 4994 } 4995 4996 return 0; 4997 4998 fail_add_vlan: 4999 efx_ef10_filter_cleanup_vlans(efx); 5000 efx->filter_state = NULL; 5001 fail: 5002 kfree(table); 5003 return rc; 5004 } 5005 5006 /* Caller must hold efx->filter_sem for read if race against 5007 * efx_ef10_filter_table_remove() is possible 5008 */ 5009 static void efx_ef10_filter_table_restore(struct efx_nic *efx) 5010 { 5011 struct efx_ef10_filter_table *table = efx->filter_state; 5012 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5013 unsigned int invalid_filters = 0, failed = 0; 5014 struct efx_ef10_filter_vlan *vlan; 5015 struct efx_filter_spec *spec; 5016 unsigned int filter_idx; 5017 u32 mcdi_flags; 5018 int match_pri; 5019 int rc, i; 5020 5021 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 5022 5023 if (!nic_data->must_restore_filters) 5024 return; 5025 5026 if (!table) 5027 return; 5028 5029 spin_lock_bh(&efx->filter_lock); 5030 5031 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 5032 spec = efx_ef10_filter_entry_spec(table, filter_idx); 5033 if (!spec) 5034 continue; 5035 5036 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); 5037 match_pri = 0; 5038 while (match_pri < table->rx_match_count && 5039 table->rx_match_mcdi_flags[match_pri] != mcdi_flags) 5040 ++match_pri; 5041 if (match_pri >= table->rx_match_count) { 5042 invalid_filters++; 5043 goto not_restored; 5044 } 5045 if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && 5046 spec->rss_context != nic_data->rx_rss_context) 5047 netif_warn(efx, drv, efx->net_dev, 5048 "Warning: unable to restore a filter with specific RSS context.\n"); 5049 5050 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 5051 spin_unlock_bh(&efx->filter_lock); 5052 5053 rc = efx_ef10_filter_push(efx, spec, 5054 &table->entry[filter_idx].handle, 5055 false); 5056 if (rc) 5057 failed++; 5058 spin_lock_bh(&efx->filter_lock); 5059 5060 if (rc) { 5061 not_restored: 5062 list_for_each_entry(vlan, &table->vlan_list, list) 5063 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) 5064 if (vlan->default_filters[i] == filter_idx) 5065 vlan->default_filters[i] = 5066 EFX_EF10_FILTER_ID_INVALID; 5067 5068 kfree(spec); 5069 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 5070 } else { 5071 table->entry[filter_idx].spec &= 5072 ~EFX_EF10_FILTER_FLAG_BUSY; 5073 } 5074 } 5075 5076 spin_unlock_bh(&efx->filter_lock); 5077 5078 /* This can happen validly if the MC's capabilities have changed, so 5079 * is not an error. 5080 */ 5081 if (invalid_filters) 5082 netif_dbg(efx, drv, efx->net_dev, 5083 "Did not restore %u filters that are now unsupported.\n", 5084 invalid_filters); 5085 5086 if (failed) 5087 netif_err(efx, hw, efx->net_dev, 5088 "unable to restore %u filters\n", failed); 5089 else 5090 nic_data->must_restore_filters = false; 5091 } 5092 5093 static void efx_ef10_filter_table_remove(struct efx_nic *efx) 5094 { 5095 struct efx_ef10_filter_table *table = efx->filter_state; 5096 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 5097 struct efx_filter_spec *spec; 5098 unsigned int filter_idx; 5099 int rc; 5100 5101 efx_ef10_filter_cleanup_vlans(efx); 5102 efx->filter_state = NULL; 5103 /* If we were called without locking, then it's not safe to free 5104 * the table as others might be using it. So we just WARN, leak 5105 * the memory, and potentially get an inconsistent filter table 5106 * state. 5107 * This should never actually happen. 5108 */ 5109 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5110 return; 5111 5112 if (!table) 5113 return; 5114 5115 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 5116 spec = efx_ef10_filter_entry_spec(table, filter_idx); 5117 if (!spec) 5118 continue; 5119 5120 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 5121 efx_ef10_filter_is_exclusive(spec) ? 5122 MC_CMD_FILTER_OP_IN_OP_REMOVE : 5123 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 5124 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 5125 table->entry[filter_idx].handle); 5126 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, 5127 sizeof(inbuf), NULL, 0, NULL); 5128 if (rc) 5129 netif_info(efx, drv, efx->net_dev, 5130 "%s: filter %04x remove failed\n", 5131 __func__, filter_idx); 5132 kfree(spec); 5133 } 5134 5135 vfree(table->entry); 5136 kfree(table); 5137 } 5138 5139 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) 5140 { 5141 struct efx_ef10_filter_table *table = efx->filter_state; 5142 unsigned int filter_idx; 5143 5144 if (*id != EFX_EF10_FILTER_ID_INVALID) { 5145 filter_idx = efx_ef10_filter_get_unsafe_id(*id); 5146 if (!table->entry[filter_idx].spec) 5147 netif_dbg(efx, drv, efx->net_dev, 5148 "marked null spec old %04x:%04x\n", *id, 5149 filter_idx); 5150 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; 5151 *id = EFX_EF10_FILTER_ID_INVALID; 5152 } 5153 } 5154 5155 /* Mark old per-VLAN filters that may need to be removed */ 5156 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, 5157 struct efx_ef10_filter_vlan *vlan) 5158 { 5159 struct efx_ef10_filter_table *table = efx->filter_state; 5160 unsigned int i; 5161 5162 for (i = 0; i < table->dev_uc_count; i++) 5163 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); 5164 for (i = 0; i < table->dev_mc_count; i++) 5165 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); 5166 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5167 efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); 5168 } 5169 5170 /* Mark old filters that may need to be removed. 5171 * Caller must hold efx->filter_sem for read if race against 5172 * efx_ef10_filter_table_remove() is possible 5173 */ 5174 static void efx_ef10_filter_mark_old(struct efx_nic *efx) 5175 { 5176 struct efx_ef10_filter_table *table = efx->filter_state; 5177 struct efx_ef10_filter_vlan *vlan; 5178 5179 spin_lock_bh(&efx->filter_lock); 5180 list_for_each_entry(vlan, &table->vlan_list, list) 5181 _efx_ef10_filter_vlan_mark_old(efx, vlan); 5182 spin_unlock_bh(&efx->filter_lock); 5183 } 5184 5185 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) 5186 { 5187 struct efx_ef10_filter_table *table = efx->filter_state; 5188 struct net_device *net_dev = efx->net_dev; 5189 struct netdev_hw_addr *uc; 5190 unsigned int i; 5191 5192 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); 5193 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 5194 i = 1; 5195 netdev_for_each_uc_addr(uc, net_dev) { 5196 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { 5197 table->uc_promisc = true; 5198 break; 5199 } 5200 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 5201 i++; 5202 } 5203 5204 table->dev_uc_count = i; 5205 } 5206 5207 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) 5208 { 5209 struct efx_ef10_filter_table *table = efx->filter_state; 5210 struct net_device *net_dev = efx->net_dev; 5211 struct netdev_hw_addr *mc; 5212 unsigned int i; 5213 5214 table->mc_overflow = false; 5215 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); 5216 5217 i = 0; 5218 netdev_for_each_mc_addr(mc, net_dev) { 5219 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { 5220 table->mc_promisc = true; 5221 table->mc_overflow = true; 5222 break; 5223 } 5224 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); 5225 i++; 5226 } 5227 5228 table->dev_mc_count = i; 5229 } 5230 5231 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, 5232 struct efx_ef10_filter_vlan *vlan, 5233 bool multicast, bool rollback) 5234 { 5235 struct efx_ef10_filter_table *table = efx->filter_state; 5236 struct efx_ef10_dev_addr *addr_list; 5237 enum efx_filter_flags filter_flags; 5238 struct efx_filter_spec spec; 5239 u8 baddr[ETH_ALEN]; 5240 unsigned int i, j; 5241 int addr_count; 5242 u16 *ids; 5243 int rc; 5244 5245 if (multicast) { 5246 addr_list = table->dev_mc_list; 5247 addr_count = table->dev_mc_count; 5248 ids = vlan->mc; 5249 } else { 5250 addr_list = table->dev_uc_list; 5251 addr_count = table->dev_uc_count; 5252 ids = vlan->uc; 5253 } 5254 5255 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5256 5257 /* Insert/renew filters */ 5258 for (i = 0; i < addr_count; i++) { 5259 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 5260 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5261 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5262 rc = efx_ef10_filter_insert(efx, &spec, true); 5263 if (rc < 0) { 5264 if (rollback) { 5265 netif_info(efx, drv, efx->net_dev, 5266 "efx_ef10_filter_insert failed rc=%d\n", 5267 rc); 5268 /* Fall back to promiscuous */ 5269 for (j = 0; j < i; j++) { 5270 efx_ef10_filter_remove_unsafe( 5271 efx, EFX_FILTER_PRI_AUTO, 5272 ids[j]); 5273 ids[j] = EFX_EF10_FILTER_ID_INVALID; 5274 } 5275 return rc; 5276 } else { 5277 /* keep invalid ID, and carry on */ 5278 } 5279 } else { 5280 ids[i] = efx_ef10_filter_get_unsafe_id(rc); 5281 } 5282 } 5283 5284 if (multicast && rollback) { 5285 /* Also need an Ethernet broadcast filter */ 5286 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != 5287 EFX_EF10_FILTER_ID_INVALID); 5288 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5289 eth_broadcast_addr(baddr); 5290 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5291 rc = efx_ef10_filter_insert(efx, &spec, true); 5292 if (rc < 0) { 5293 netif_warn(efx, drv, efx->net_dev, 5294 "Broadcast filter insert failed rc=%d\n", rc); 5295 /* Fall back to promiscuous */ 5296 for (j = 0; j < i; j++) { 5297 efx_ef10_filter_remove_unsafe( 5298 efx, EFX_FILTER_PRI_AUTO, 5299 ids[j]); 5300 ids[j] = EFX_EF10_FILTER_ID_INVALID; 5301 } 5302 return rc; 5303 } else { 5304 vlan->default_filters[EFX_EF10_BCAST] = 5305 efx_ef10_filter_get_unsafe_id(rc); 5306 } 5307 } 5308 5309 return 0; 5310 } 5311 5312 static int efx_ef10_filter_insert_def(struct efx_nic *efx, 5313 struct efx_ef10_filter_vlan *vlan, 5314 enum efx_encap_type encap_type, 5315 bool multicast, bool rollback) 5316 { 5317 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5318 enum efx_filter_flags filter_flags; 5319 struct efx_filter_spec spec; 5320 u8 baddr[ETH_ALEN]; 5321 int rc; 5322 u16 *id; 5323 5324 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5325 5326 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5327 5328 if (multicast) 5329 efx_filter_set_mc_def(&spec); 5330 else 5331 efx_filter_set_uc_def(&spec); 5332 5333 if (encap_type) { 5334 if (nic_data->datapath_caps & 5335 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 5336 efx_filter_set_encap_type(&spec, encap_type); 5337 else 5338 /* don't insert encap filters on non-supporting 5339 * platforms. ID will be left as INVALID. 5340 */ 5341 return 0; 5342 } 5343 5344 if (vlan->vid != EFX_FILTER_VID_UNSPEC) 5345 efx_filter_set_eth_local(&spec, vlan->vid, NULL); 5346 5347 rc = efx_ef10_filter_insert(efx, &spec, true); 5348 if (rc < 0) { 5349 const char *um = multicast ? "Multicast" : "Unicast"; 5350 const char *encap_name = ""; 5351 const char *encap_ipv = ""; 5352 5353 if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5354 EFX_ENCAP_TYPE_VXLAN) 5355 encap_name = "VXLAN "; 5356 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5357 EFX_ENCAP_TYPE_NVGRE) 5358 encap_name = "NVGRE "; 5359 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5360 EFX_ENCAP_TYPE_GENEVE) 5361 encap_name = "GENEVE "; 5362 if (encap_type & EFX_ENCAP_FLAG_IPV6) 5363 encap_ipv = "IPv6 "; 5364 else if (encap_type) 5365 encap_ipv = "IPv4 "; 5366 5367 /* unprivileged functions can't insert mismatch filters 5368 * for encapsulated or unicast traffic, so downgrade 5369 * those warnings to debug. 5370 */ 5371 netif_cond_dbg(efx, drv, efx->net_dev, 5372 rc == -EPERM && (encap_type || !multicast), warn, 5373 "%s%s%s mismatch filter insert failed rc=%d\n", 5374 encap_name, encap_ipv, um, rc); 5375 } else if (multicast) { 5376 /* mapping from encap types to default filter IDs (multicast) */ 5377 static enum efx_ef10_default_filters map[] = { 5378 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, 5379 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, 5380 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, 5381 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, 5382 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 5383 EFX_EF10_VXLAN6_MCDEF, 5384 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 5385 EFX_EF10_NVGRE6_MCDEF, 5386 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 5387 EFX_EF10_GENEVE6_MCDEF, 5388 }; 5389 5390 /* quick bounds check (BCAST result impossible) */ 5391 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 5392 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 5393 WARN_ON(1); 5394 return -EINVAL; 5395 } 5396 /* then follow map */ 5397 id = &vlan->default_filters[map[encap_type]]; 5398 5399 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 5400 *id = efx_ef10_filter_get_unsafe_id(rc); 5401 if (!nic_data->workaround_26807 && !encap_type) { 5402 /* Also need an Ethernet broadcast filter */ 5403 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 5404 filter_flags, 0); 5405 eth_broadcast_addr(baddr); 5406 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5407 rc = efx_ef10_filter_insert(efx, &spec, true); 5408 if (rc < 0) { 5409 netif_warn(efx, drv, efx->net_dev, 5410 "Broadcast filter insert failed rc=%d\n", 5411 rc); 5412 if (rollback) { 5413 /* Roll back the mc_def filter */ 5414 efx_ef10_filter_remove_unsafe( 5415 efx, EFX_FILTER_PRI_AUTO, 5416 *id); 5417 *id = EFX_EF10_FILTER_ID_INVALID; 5418 return rc; 5419 } 5420 } else { 5421 EFX_WARN_ON_PARANOID( 5422 vlan->default_filters[EFX_EF10_BCAST] != 5423 EFX_EF10_FILTER_ID_INVALID); 5424 vlan->default_filters[EFX_EF10_BCAST] = 5425 efx_ef10_filter_get_unsafe_id(rc); 5426 } 5427 } 5428 rc = 0; 5429 } else { 5430 /* mapping from encap types to default filter IDs (unicast) */ 5431 static enum efx_ef10_default_filters map[] = { 5432 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, 5433 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, 5434 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, 5435 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, 5436 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 5437 EFX_EF10_VXLAN6_UCDEF, 5438 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 5439 EFX_EF10_NVGRE6_UCDEF, 5440 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 5441 EFX_EF10_GENEVE6_UCDEF, 5442 }; 5443 5444 /* quick bounds check (BCAST result impossible) */ 5445 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 5446 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 5447 WARN_ON(1); 5448 return -EINVAL; 5449 } 5450 /* then follow map */ 5451 id = &vlan->default_filters[map[encap_type]]; 5452 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 5453 *id = rc; 5454 rc = 0; 5455 } 5456 return rc; 5457 } 5458 5459 /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD 5460 * flag or removes these filters, we don't need to hold the filter_lock while 5461 * scanning for these filters. 5462 */ 5463 static void efx_ef10_filter_remove_old(struct efx_nic *efx) 5464 { 5465 struct efx_ef10_filter_table *table = efx->filter_state; 5466 int remove_failed = 0; 5467 int remove_noent = 0; 5468 int rc; 5469 int i; 5470 5471 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 5472 if (READ_ONCE(table->entry[i].spec) & 5473 EFX_EF10_FILTER_FLAG_AUTO_OLD) { 5474 rc = efx_ef10_filter_remove_internal(efx, 5475 1U << EFX_FILTER_PRI_AUTO, i, true); 5476 if (rc == -ENOENT) 5477 remove_noent++; 5478 else if (rc) 5479 remove_failed++; 5480 } 5481 } 5482 5483 if (remove_failed) 5484 netif_info(efx, drv, efx->net_dev, 5485 "%s: failed to remove %d filters\n", 5486 __func__, remove_failed); 5487 if (remove_noent) 5488 netif_info(efx, drv, efx->net_dev, 5489 "%s: failed to remove %d non-existent filters\n", 5490 __func__, remove_noent); 5491 } 5492 5493 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) 5494 { 5495 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5496 u8 mac_old[ETH_ALEN]; 5497 int rc, rc2; 5498 5499 /* Only reconfigure a PF-created vport */ 5500 if (is_zero_ether_addr(nic_data->vport_mac)) 5501 return 0; 5502 5503 efx_device_detach_sync(efx); 5504 efx_net_stop(efx->net_dev); 5505 down_write(&efx->filter_sem); 5506 efx_ef10_filter_table_remove(efx); 5507 up_write(&efx->filter_sem); 5508 5509 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); 5510 if (rc) 5511 goto restore_filters; 5512 5513 ether_addr_copy(mac_old, nic_data->vport_mac); 5514 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, 5515 nic_data->vport_mac); 5516 if (rc) 5517 goto restore_vadaptor; 5518 5519 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, 5520 efx->net_dev->dev_addr); 5521 if (!rc) { 5522 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); 5523 } else { 5524 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); 5525 if (rc2) { 5526 /* Failed to add original MAC, so clear vport_mac */ 5527 eth_zero_addr(nic_data->vport_mac); 5528 goto reset_nic; 5529 } 5530 } 5531 5532 restore_vadaptor: 5533 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); 5534 if (rc2) 5535 goto reset_nic; 5536 restore_filters: 5537 down_write(&efx->filter_sem); 5538 rc2 = efx_ef10_filter_table_probe(efx); 5539 up_write(&efx->filter_sem); 5540 if (rc2) 5541 goto reset_nic; 5542 5543 rc2 = efx_net_open(efx->net_dev); 5544 if (rc2) 5545 goto reset_nic; 5546 5547 efx_device_attach_if_not_resetting(efx); 5548 5549 return rc; 5550 5551 reset_nic: 5552 netif_err(efx, drv, efx->net_dev, 5553 "Failed to restore when changing MAC address - scheduling reset\n"); 5554 efx_schedule_reset(efx, RESET_TYPE_DATAPATH); 5555 5556 return rc ? rc : rc2; 5557 } 5558 5559 /* Caller must hold efx->filter_sem for read if race against 5560 * efx_ef10_filter_table_remove() is possible 5561 */ 5562 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, 5563 struct efx_ef10_filter_vlan *vlan) 5564 { 5565 struct efx_ef10_filter_table *table = efx->filter_state; 5566 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5567 5568 /* Do not install unspecified VID if VLAN filtering is enabled. 5569 * Do not install all specified VIDs if VLAN filtering is disabled. 5570 */ 5571 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) 5572 return; 5573 5574 /* Insert/renew unicast filters */ 5575 if (table->uc_promisc) { 5576 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, 5577 false, false); 5578 efx_ef10_filter_insert_addr_list(efx, vlan, false, false); 5579 } else { 5580 /* If any of the filters failed to insert, fall back to 5581 * promiscuous mode - add in the uc_def filter. But keep 5582 * our individual unicast filters. 5583 */ 5584 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) 5585 efx_ef10_filter_insert_def(efx, vlan, 5586 EFX_ENCAP_TYPE_NONE, 5587 false, false); 5588 } 5589 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 5590 false, false); 5591 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 5592 EFX_ENCAP_FLAG_IPV6, 5593 false, false); 5594 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 5595 false, false); 5596 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 5597 EFX_ENCAP_FLAG_IPV6, 5598 false, false); 5599 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 5600 false, false); 5601 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 5602 EFX_ENCAP_FLAG_IPV6, 5603 false, false); 5604 5605 /* Insert/renew multicast filters */ 5606 /* If changing promiscuous state with cascaded multicast filters, remove 5607 * old filters first, so that packets are dropped rather than duplicated 5608 */ 5609 if (nic_data->workaround_26807 && 5610 table->mc_promisc_last != table->mc_promisc) 5611 efx_ef10_filter_remove_old(efx); 5612 if (table->mc_promisc) { 5613 if (nic_data->workaround_26807) { 5614 /* If we failed to insert promiscuous filters, rollback 5615 * and fall back to individual multicast filters 5616 */ 5617 if (efx_ef10_filter_insert_def(efx, vlan, 5618 EFX_ENCAP_TYPE_NONE, 5619 true, true)) { 5620 /* Changing promisc state, so remove old filters */ 5621 efx_ef10_filter_remove_old(efx); 5622 efx_ef10_filter_insert_addr_list(efx, vlan, 5623 true, false); 5624 } 5625 } else { 5626 /* If we failed to insert promiscuous filters, don't 5627 * rollback. Regardless, also insert the mc_list, 5628 * unless it's incomplete due to overflow 5629 */ 5630 efx_ef10_filter_insert_def(efx, vlan, 5631 EFX_ENCAP_TYPE_NONE, 5632 true, false); 5633 if (!table->mc_overflow) 5634 efx_ef10_filter_insert_addr_list(efx, vlan, 5635 true, false); 5636 } 5637 } else { 5638 /* If any filters failed to insert, rollback and fall back to 5639 * promiscuous mode - mc_def filter and maybe broadcast. If 5640 * that fails, roll back again and insert as many of our 5641 * individual multicast filters as we can. 5642 */ 5643 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) { 5644 /* Changing promisc state, so remove old filters */ 5645 if (nic_data->workaround_26807) 5646 efx_ef10_filter_remove_old(efx); 5647 if (efx_ef10_filter_insert_def(efx, vlan, 5648 EFX_ENCAP_TYPE_NONE, 5649 true, true)) 5650 efx_ef10_filter_insert_addr_list(efx, vlan, 5651 true, false); 5652 } 5653 } 5654 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 5655 true, false); 5656 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 5657 EFX_ENCAP_FLAG_IPV6, 5658 true, false); 5659 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 5660 true, false); 5661 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 5662 EFX_ENCAP_FLAG_IPV6, 5663 true, false); 5664 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 5665 true, false); 5666 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 5667 EFX_ENCAP_FLAG_IPV6, 5668 true, false); 5669 } 5670 5671 /* Caller must hold efx->filter_sem for read if race against 5672 * efx_ef10_filter_table_remove() is possible 5673 */ 5674 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) 5675 { 5676 struct efx_ef10_filter_table *table = efx->filter_state; 5677 struct net_device *net_dev = efx->net_dev; 5678 struct efx_ef10_filter_vlan *vlan; 5679 bool vlan_filter; 5680 5681 if (!efx_dev_registered(efx)) 5682 return; 5683 5684 if (!table) 5685 return; 5686 5687 efx_ef10_filter_mark_old(efx); 5688 5689 /* Copy/convert the address lists; add the primary station 5690 * address and broadcast address 5691 */ 5692 netif_addr_lock_bh(net_dev); 5693 efx_ef10_filter_uc_addr_list(efx); 5694 efx_ef10_filter_mc_addr_list(efx); 5695 netif_addr_unlock_bh(net_dev); 5696 5697 /* If VLAN filtering changes, all old filters are finally removed. 5698 * Do it in advance to avoid conflicts for unicast untagged and 5699 * VLAN 0 tagged filters. 5700 */ 5701 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 5702 if (table->vlan_filter != vlan_filter) { 5703 table->vlan_filter = vlan_filter; 5704 efx_ef10_filter_remove_old(efx); 5705 } 5706 5707 list_for_each_entry(vlan, &table->vlan_list, list) 5708 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); 5709 5710 efx_ef10_filter_remove_old(efx); 5711 table->mc_promisc_last = table->mc_promisc; 5712 } 5713 5714 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid) 5715 { 5716 struct efx_ef10_filter_table *table = efx->filter_state; 5717 struct efx_ef10_filter_vlan *vlan; 5718 5719 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 5720 5721 list_for_each_entry(vlan, &table->vlan_list, list) { 5722 if (vlan->vid == vid) 5723 return vlan; 5724 } 5725 5726 return NULL; 5727 } 5728 5729 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) 5730 { 5731 struct efx_ef10_filter_table *table = efx->filter_state; 5732 struct efx_ef10_filter_vlan *vlan; 5733 unsigned int i; 5734 5735 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5736 return -EINVAL; 5737 5738 vlan = efx_ef10_filter_find_vlan(efx, vid); 5739 if (WARN_ON(vlan)) { 5740 netif_err(efx, drv, efx->net_dev, 5741 "VLAN %u already added\n", vid); 5742 return -EALREADY; 5743 } 5744 5745 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 5746 if (!vlan) 5747 return -ENOMEM; 5748 5749 vlan->vid = vid; 5750 5751 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 5752 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; 5753 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 5754 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; 5755 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5756 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; 5757 5758 list_add_tail(&vlan->list, &table->vlan_list); 5759 5760 if (efx_dev_registered(efx)) 5761 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); 5762 5763 return 0; 5764 } 5765 5766 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, 5767 struct efx_ef10_filter_vlan *vlan) 5768 { 5769 unsigned int i; 5770 5771 /* See comment in efx_ef10_filter_table_remove() */ 5772 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5773 return; 5774 5775 list_del(&vlan->list); 5776 5777 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 5778 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5779 vlan->uc[i]); 5780 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 5781 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5782 vlan->mc[i]); 5783 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5784 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) 5785 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5786 vlan->default_filters[i]); 5787 5788 kfree(vlan); 5789 } 5790 5791 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid) 5792 { 5793 struct efx_ef10_filter_vlan *vlan; 5794 5795 /* See comment in efx_ef10_filter_table_remove() */ 5796 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5797 return; 5798 5799 vlan = efx_ef10_filter_find_vlan(efx, vid); 5800 if (!vlan) { 5801 netif_err(efx, drv, efx->net_dev, 5802 "VLAN %u not found in filter state\n", vid); 5803 return; 5804 } 5805 5806 efx_ef10_filter_del_vlan_internal(efx, vlan); 5807 } 5808 5809 static int efx_ef10_set_mac_address(struct efx_nic *efx) 5810 { 5811 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 5812 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5813 bool was_enabled = efx->port_enabled; 5814 int rc; 5815 5816 efx_device_detach_sync(efx); 5817 efx_net_stop(efx->net_dev); 5818 5819 mutex_lock(&efx->mac_lock); 5820 down_write(&efx->filter_sem); 5821 efx_ef10_filter_table_remove(efx); 5822 5823 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), 5824 efx->net_dev->dev_addr); 5825 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 5826 nic_data->vport_id); 5827 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 5828 sizeof(inbuf), NULL, 0, NULL); 5829 5830 efx_ef10_filter_table_probe(efx); 5831 up_write(&efx->filter_sem); 5832 mutex_unlock(&efx->mac_lock); 5833 5834 if (was_enabled) 5835 efx_net_open(efx->net_dev); 5836 efx_device_attach_if_not_resetting(efx); 5837 5838 #ifdef CONFIG_SFC_SRIOV 5839 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { 5840 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 5841 5842 if (rc == -EPERM) { 5843 struct efx_nic *efx_pf; 5844 5845 /* Switch to PF and change MAC address on vport */ 5846 efx_pf = pci_get_drvdata(pci_dev_pf); 5847 5848 rc = efx_ef10_sriov_set_vf_mac(efx_pf, 5849 nic_data->vf_index, 5850 efx->net_dev->dev_addr); 5851 } else if (!rc) { 5852 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 5853 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 5854 unsigned int i; 5855 5856 /* MAC address successfully changed by VF (with MAC 5857 * spoofing) so update the parent PF if possible. 5858 */ 5859 for (i = 0; i < efx_pf->vf_count; ++i) { 5860 struct ef10_vf *vf = nic_data->vf + i; 5861 5862 if (vf->efx == efx) { 5863 ether_addr_copy(vf->mac, 5864 efx->net_dev->dev_addr); 5865 return 0; 5866 } 5867 } 5868 } 5869 } else 5870 #endif 5871 if (rc == -EPERM) { 5872 netif_err(efx, drv, efx->net_dev, 5873 "Cannot change MAC address; use sfboot to enable" 5874 " mac-spoofing on this interface\n"); 5875 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { 5876 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC 5877 * fall-back to the method of changing the MAC address on the 5878 * vport. This only applies to PFs because such versions of 5879 * MCFW do not support VFs. 5880 */ 5881 rc = efx_ef10_vport_set_mac_address(efx); 5882 } else if (rc) { 5883 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, 5884 sizeof(inbuf), NULL, 0, rc); 5885 } 5886 5887 return rc; 5888 } 5889 5890 static int efx_ef10_mac_reconfigure(struct efx_nic *efx) 5891 { 5892 efx_ef10_filter_sync_rx_mode(efx); 5893 5894 return efx_mcdi_set_mac(efx); 5895 } 5896 5897 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) 5898 { 5899 efx_ef10_filter_sync_rx_mode(efx); 5900 5901 return 0; 5902 } 5903 5904 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) 5905 { 5906 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); 5907 5908 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); 5909 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), 5910 NULL, 0, NULL); 5911 } 5912 5913 /* MC BISTs follow a different poll mechanism to phy BISTs. 5914 * The BIST is done in the poll handler on the MC, and the MCDI command 5915 * will block until the BIST is done. 5916 */ 5917 static int efx_ef10_poll_bist(struct efx_nic *efx) 5918 { 5919 int rc; 5920 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); 5921 size_t outlen; 5922 u32 result; 5923 5924 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 5925 outbuf, sizeof(outbuf), &outlen); 5926 if (rc != 0) 5927 return rc; 5928 5929 if (outlen < MC_CMD_POLL_BIST_OUT_LEN) 5930 return -EIO; 5931 5932 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); 5933 switch (result) { 5934 case MC_CMD_POLL_BIST_PASSED: 5935 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); 5936 return 0; 5937 case MC_CMD_POLL_BIST_TIMEOUT: 5938 netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); 5939 return -EIO; 5940 case MC_CMD_POLL_BIST_FAILED: 5941 netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); 5942 return -EIO; 5943 default: 5944 netif_err(efx, hw, efx->net_dev, 5945 "BIST returned unknown result %u", result); 5946 return -EIO; 5947 } 5948 } 5949 5950 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) 5951 { 5952 int rc; 5953 5954 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); 5955 5956 rc = efx_ef10_start_bist(efx, bist_type); 5957 if (rc != 0) 5958 return rc; 5959 5960 return efx_ef10_poll_bist(efx); 5961 } 5962 5963 static int 5964 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 5965 { 5966 int rc, rc2; 5967 5968 efx_reset_down(efx, RESET_TYPE_WORLD); 5969 5970 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, 5971 NULL, 0, NULL, 0, NULL); 5972 if (rc != 0) 5973 goto out; 5974 5975 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; 5976 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; 5977 5978 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); 5979 5980 out: 5981 if (rc == -EPERM) 5982 rc = 0; 5983 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); 5984 return rc ? rc : rc2; 5985 } 5986 5987 #ifdef CONFIG_SFC_MTD 5988 5989 struct efx_ef10_nvram_type_info { 5990 u16 type, type_mask; 5991 u8 port; 5992 const char *name; 5993 }; 5994 5995 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { 5996 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, 5997 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, 5998 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, 5999 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, 6000 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, 6001 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, 6002 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, 6003 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, 6004 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, 6005 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, 6006 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, 6007 }; 6008 6009 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 6010 struct efx_mcdi_mtd_partition *part, 6011 unsigned int type) 6012 { 6013 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 6014 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 6015 const struct efx_ef10_nvram_type_info *info; 6016 size_t size, erase_size, outlen; 6017 bool protected; 6018 int rc; 6019 6020 for (info = efx_ef10_nvram_types; ; info++) { 6021 if (info == 6022 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) 6023 return -ENODEV; 6024 if ((type & ~info->type_mask) == info->type) 6025 break; 6026 } 6027 if (info->port != efx_port_num(efx)) 6028 return -ENODEV; 6029 6030 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); 6031 if (rc) 6032 return rc; 6033 if (protected) 6034 return -ENODEV; /* hide it */ 6035 6036 part->nvram_type = type; 6037 6038 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 6039 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), 6040 outbuf, sizeof(outbuf), &outlen); 6041 if (rc) 6042 return rc; 6043 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) 6044 return -EIO; 6045 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & 6046 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) 6047 part->fw_subtype = MCDI_DWORD(outbuf, 6048 NVRAM_METADATA_OUT_SUBTYPE); 6049 6050 part->common.dev_type_name = "EF10 NVRAM manager"; 6051 part->common.type_name = info->name; 6052 6053 part->common.mtd.type = MTD_NORFLASH; 6054 part->common.mtd.flags = MTD_CAP_NORFLASH; 6055 part->common.mtd.size = size; 6056 part->common.mtd.erasesize = erase_size; 6057 6058 return 0; 6059 } 6060 6061 static int efx_ef10_mtd_probe(struct efx_nic *efx) 6062 { 6063 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 6064 struct efx_mcdi_mtd_partition *parts; 6065 size_t outlen, n_parts_total, i, n_parts; 6066 unsigned int type; 6067 int rc; 6068 6069 ASSERT_RTNL(); 6070 6071 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); 6072 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, 6073 outbuf, sizeof(outbuf), &outlen); 6074 if (rc) 6075 return rc; 6076 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) 6077 return -EIO; 6078 6079 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); 6080 if (n_parts_total > 6081 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) 6082 return -EIO; 6083 6084 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); 6085 if (!parts) 6086 return -ENOMEM; 6087 6088 n_parts = 0; 6089 for (i = 0; i < n_parts_total; i++) { 6090 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 6091 i); 6092 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); 6093 if (rc == 0) 6094 n_parts++; 6095 else if (rc != -ENODEV) 6096 goto fail; 6097 } 6098 6099 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 6100 fail: 6101 if (rc) 6102 kfree(parts); 6103 return rc; 6104 } 6105 6106 #endif /* CONFIG_SFC_MTD */ 6107 6108 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) 6109 { 6110 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); 6111 } 6112 6113 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, 6114 u32 host_time) {} 6115 6116 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, 6117 bool temp) 6118 { 6119 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); 6120 int rc; 6121 6122 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || 6123 channel->sync_events_state == SYNC_EVENTS_VALID || 6124 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) 6125 return 0; 6126 channel->sync_events_state = SYNC_EVENTS_REQUESTED; 6127 6128 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); 6129 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 6130 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, 6131 channel->channel); 6132 6133 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 6134 inbuf, sizeof(inbuf), NULL, 0, NULL); 6135 6136 if (rc != 0) 6137 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 6138 SYNC_EVENTS_DISABLED; 6139 6140 return rc; 6141 } 6142 6143 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, 6144 bool temp) 6145 { 6146 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); 6147 int rc; 6148 6149 if (channel->sync_events_state == SYNC_EVENTS_DISABLED || 6150 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) 6151 return 0; 6152 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { 6153 channel->sync_events_state = SYNC_EVENTS_DISABLED; 6154 return 0; 6155 } 6156 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 6157 SYNC_EVENTS_DISABLED; 6158 6159 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); 6160 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 6161 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, 6162 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); 6163 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, 6164 channel->channel); 6165 6166 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 6167 inbuf, sizeof(inbuf), NULL, 0, NULL); 6168 6169 return rc; 6170 } 6171 6172 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, 6173 bool temp) 6174 { 6175 int (*set)(struct efx_channel *channel, bool temp); 6176 struct efx_channel *channel; 6177 6178 set = en ? 6179 efx_ef10_rx_enable_timestamping : 6180 efx_ef10_rx_disable_timestamping; 6181 6182 efx_for_each_channel(channel, efx) { 6183 int rc = set(channel, temp); 6184 if (en && rc != 0) { 6185 efx_ef10_ptp_set_ts_sync_events(efx, false, temp); 6186 return rc; 6187 } 6188 } 6189 6190 return 0; 6191 } 6192 6193 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, 6194 struct hwtstamp_config *init) 6195 { 6196 return -EOPNOTSUPP; 6197 } 6198 6199 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, 6200 struct hwtstamp_config *init) 6201 { 6202 int rc; 6203 6204 switch (init->rx_filter) { 6205 case HWTSTAMP_FILTER_NONE: 6206 efx_ef10_ptp_set_ts_sync_events(efx, false, false); 6207 /* if TX timestamping is still requested then leave PTP on */ 6208 return efx_ptp_change_mode(efx, 6209 init->tx_type != HWTSTAMP_TX_OFF, 0); 6210 case HWTSTAMP_FILTER_ALL: 6211 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 6212 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 6213 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 6214 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 6215 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 6216 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 6217 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 6218 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 6219 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 6220 case HWTSTAMP_FILTER_PTP_V2_EVENT: 6221 case HWTSTAMP_FILTER_PTP_V2_SYNC: 6222 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 6223 case HWTSTAMP_FILTER_NTP_ALL: 6224 init->rx_filter = HWTSTAMP_FILTER_ALL; 6225 rc = efx_ptp_change_mode(efx, true, 0); 6226 if (!rc) 6227 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); 6228 if (rc) 6229 efx_ptp_change_mode(efx, false, 0); 6230 return rc; 6231 default: 6232 return -ERANGE; 6233 } 6234 } 6235 6236 static int efx_ef10_get_phys_port_id(struct efx_nic *efx, 6237 struct netdev_phys_item_id *ppid) 6238 { 6239 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6240 6241 if (!is_valid_ether_addr(nic_data->port_id)) 6242 return -EOPNOTSUPP; 6243 6244 ppid->id_len = ETH_ALEN; 6245 memcpy(ppid->id, nic_data->port_id, ppid->id_len); 6246 6247 return 0; 6248 } 6249 6250 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) 6251 { 6252 if (proto != htons(ETH_P_8021Q)) 6253 return -EINVAL; 6254 6255 return efx_ef10_add_vlan(efx, vid); 6256 } 6257 6258 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) 6259 { 6260 if (proto != htons(ETH_P_8021Q)) 6261 return -EINVAL; 6262 6263 return efx_ef10_del_vlan(efx, vid); 6264 } 6265 6266 /* We rely on the MCDI wiping out our TX rings if it made any changes to the 6267 * ports table, ensuring that any TSO descriptors that were made on a now- 6268 * removed tunnel port will be blown away and won't break things when we try 6269 * to transmit them using the new ports table. 6270 */ 6271 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) 6272 { 6273 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6274 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); 6275 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); 6276 bool will_reset = false; 6277 size_t num_entries = 0; 6278 size_t inlen, outlen; 6279 size_t i; 6280 int rc; 6281 efx_dword_t flags_and_num_entries; 6282 6283 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); 6284 6285 nic_data->udp_tunnels_dirty = false; 6286 6287 if (!(nic_data->datapath_caps & 6288 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { 6289 efx_device_attach_if_not_resetting(efx); 6290 return 0; 6291 } 6292 6293 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > 6294 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); 6295 6296 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 6297 if (nic_data->udp_tunnels[i].count && 6298 nic_data->udp_tunnels[i].port) { 6299 efx_dword_t entry; 6300 6301 EFX_POPULATE_DWORD_2(entry, 6302 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, 6303 ntohs(nic_data->udp_tunnels[i].port), 6304 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, 6305 nic_data->udp_tunnels[i].type); 6306 *_MCDI_ARRAY_DWORD(inbuf, 6307 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, 6308 num_entries++) = entry; 6309 } 6310 } 6311 6312 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - 6313 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != 6314 EFX_WORD_1_LBN); 6315 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != 6316 EFX_WORD_1_WIDTH); 6317 EFX_POPULATE_DWORD_2(flags_and_num_entries, 6318 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, 6319 !!unloading, 6320 EFX_WORD_1, num_entries); 6321 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = 6322 flags_and_num_entries; 6323 6324 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); 6325 6326 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, 6327 inbuf, inlen, outbuf, sizeof(outbuf), &outlen); 6328 if (rc == -EIO) { 6329 /* Most likely the MC rebooted due to another function also 6330 * setting its tunnel port list. Mark the tunnel port list as 6331 * dirty, so it will be pushed upon coming up from the reboot. 6332 */ 6333 nic_data->udp_tunnels_dirty = true; 6334 return 0; 6335 } 6336 6337 if (rc) { 6338 /* expected not available on unprivileged functions */ 6339 if (rc != -EPERM) 6340 netif_warn(efx, drv, efx->net_dev, 6341 "Unable to set UDP tunnel ports; rc=%d.\n", rc); 6342 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & 6343 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { 6344 netif_info(efx, drv, efx->net_dev, 6345 "Rebooting MC due to UDP tunnel port list change\n"); 6346 will_reset = true; 6347 if (unloading) 6348 /* Delay for the MC reset to complete. This will make 6349 * unloading other functions a bit smoother. This is a 6350 * race, but the other unload will work whichever way 6351 * it goes, this just avoids an unnecessary error 6352 * message. 6353 */ 6354 msleep(100); 6355 } 6356 if (!will_reset && !unloading) { 6357 /* The caller will have detached, relying on the MC reset to 6358 * trigger a re-attach. Since there won't be an MC reset, we 6359 * have to do the attach ourselves. 6360 */ 6361 efx_device_attach_if_not_resetting(efx); 6362 } 6363 6364 return rc; 6365 } 6366 6367 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) 6368 { 6369 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6370 int rc = 0; 6371 6372 mutex_lock(&nic_data->udp_tunnels_lock); 6373 if (nic_data->udp_tunnels_dirty) { 6374 /* Make sure all TX are stopped while we modify the table, else 6375 * we might race against an efx_features_check(). 6376 */ 6377 efx_device_detach_sync(efx); 6378 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6379 } 6380 mutex_unlock(&nic_data->udp_tunnels_lock); 6381 return rc; 6382 } 6383 6384 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, 6385 __be16 port) 6386 { 6387 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6388 size_t i; 6389 6390 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 6391 if (!nic_data->udp_tunnels[i].count) 6392 continue; 6393 if (nic_data->udp_tunnels[i].port == port) 6394 return &nic_data->udp_tunnels[i]; 6395 } 6396 return NULL; 6397 } 6398 6399 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, 6400 struct efx_udp_tunnel tnl) 6401 { 6402 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6403 struct efx_udp_tunnel *match; 6404 char typebuf[8]; 6405 size_t i; 6406 int rc; 6407 6408 if (!(nic_data->datapath_caps & 6409 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6410 return 0; 6411 6412 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 6413 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", 6414 typebuf, ntohs(tnl.port)); 6415 6416 mutex_lock(&nic_data->udp_tunnels_lock); 6417 /* Make sure all TX are stopped while we add to the table, else we 6418 * might race against an efx_features_check(). 6419 */ 6420 efx_device_detach_sync(efx); 6421 6422 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 6423 if (match != NULL) { 6424 if (match->type == tnl.type) { 6425 netif_dbg(efx, drv, efx->net_dev, 6426 "Referencing existing tunnel entry\n"); 6427 match->count++; 6428 /* No need to cause an MCDI update */ 6429 rc = 0; 6430 goto unlock_out; 6431 } 6432 efx_get_udp_tunnel_type_name(match->type, 6433 typebuf, sizeof(typebuf)); 6434 netif_dbg(efx, drv, efx->net_dev, 6435 "UDP port %d is already in use by %s\n", 6436 ntohs(tnl.port), typebuf); 6437 rc = -EEXIST; 6438 goto unlock_out; 6439 } 6440 6441 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) 6442 if (!nic_data->udp_tunnels[i].count) { 6443 nic_data->udp_tunnels[i] = tnl; 6444 nic_data->udp_tunnels[i].count = 1; 6445 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6446 goto unlock_out; 6447 } 6448 6449 netif_dbg(efx, drv, efx->net_dev, 6450 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", 6451 typebuf, ntohs(tnl.port)); 6452 6453 rc = -ENOMEM; 6454 6455 unlock_out: 6456 mutex_unlock(&nic_data->udp_tunnels_lock); 6457 return rc; 6458 } 6459 6460 /* Called under the TX lock with the TX queue running, hence no-one can be 6461 * in the middle of updating the UDP tunnels table. However, they could 6462 * have tried and failed the MCDI, in which case they'll have set the dirty 6463 * flag before dropping their locks. 6464 */ 6465 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) 6466 { 6467 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6468 6469 if (!(nic_data->datapath_caps & 6470 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6471 return false; 6472 6473 if (nic_data->udp_tunnels_dirty) 6474 /* SW table may not match HW state, so just assume we can't 6475 * use any UDP tunnel offloads. 6476 */ 6477 return false; 6478 6479 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; 6480 } 6481 6482 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, 6483 struct efx_udp_tunnel tnl) 6484 { 6485 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6486 struct efx_udp_tunnel *match; 6487 char typebuf[8]; 6488 int rc; 6489 6490 if (!(nic_data->datapath_caps & 6491 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6492 return 0; 6493 6494 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 6495 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", 6496 typebuf, ntohs(tnl.port)); 6497 6498 mutex_lock(&nic_data->udp_tunnels_lock); 6499 /* Make sure all TX are stopped while we remove from the table, else we 6500 * might race against an efx_features_check(). 6501 */ 6502 efx_device_detach_sync(efx); 6503 6504 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 6505 if (match != NULL) { 6506 if (match->type == tnl.type) { 6507 if (--match->count) { 6508 /* Port is still in use, so nothing to do */ 6509 netif_dbg(efx, drv, efx->net_dev, 6510 "UDP tunnel port %d remains active\n", 6511 ntohs(tnl.port)); 6512 rc = 0; 6513 goto out_unlock; 6514 } 6515 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6516 goto out_unlock; 6517 } 6518 efx_get_udp_tunnel_type_name(match->type, 6519 typebuf, sizeof(typebuf)); 6520 netif_warn(efx, drv, efx->net_dev, 6521 "UDP port %d is actually in use by %s, not removing\n", 6522 ntohs(tnl.port), typebuf); 6523 } 6524 rc = -ENOENT; 6525 6526 out_unlock: 6527 mutex_unlock(&nic_data->udp_tunnels_lock); 6528 return rc; 6529 } 6530 6531 #define EF10_OFFLOAD_FEATURES \ 6532 (NETIF_F_IP_CSUM | \ 6533 NETIF_F_HW_VLAN_CTAG_FILTER | \ 6534 NETIF_F_IPV6_CSUM | \ 6535 NETIF_F_RXHASH | \ 6536 NETIF_F_NTUPLE) 6537 6538 const struct efx_nic_type efx_hunt_a0_vf_nic_type = { 6539 .is_vf = true, 6540 .mem_bar = efx_ef10_vf_mem_bar, 6541 .mem_map_size = efx_ef10_mem_map_size, 6542 .probe = efx_ef10_probe_vf, 6543 .remove = efx_ef10_remove, 6544 .dimension_resources = efx_ef10_dimension_resources, 6545 .init = efx_ef10_init_nic, 6546 .fini = efx_port_dummy_op_void, 6547 .map_reset_reason = efx_ef10_map_reset_reason, 6548 .map_reset_flags = efx_ef10_map_reset_flags, 6549 .reset = efx_ef10_reset, 6550 .probe_port = efx_mcdi_port_probe, 6551 .remove_port = efx_mcdi_port_remove, 6552 .fini_dmaq = efx_ef10_fini_dmaq, 6553 .prepare_flr = efx_ef10_prepare_flr, 6554 .finish_flr = efx_port_dummy_op_void, 6555 .describe_stats = efx_ef10_describe_stats, 6556 .update_stats = efx_ef10_update_stats_vf, 6557 .start_stats = efx_port_dummy_op_void, 6558 .pull_stats = efx_port_dummy_op_void, 6559 .stop_stats = efx_port_dummy_op_void, 6560 .set_id_led = efx_mcdi_set_id_led, 6561 .push_irq_moderation = efx_ef10_push_irq_moderation, 6562 .reconfigure_mac = efx_ef10_mac_reconfigure_vf, 6563 .check_mac_fault = efx_mcdi_mac_check_fault, 6564 .reconfigure_port = efx_mcdi_port_reconfigure, 6565 .get_wol = efx_ef10_get_wol_vf, 6566 .set_wol = efx_ef10_set_wol_vf, 6567 .resume_wol = efx_port_dummy_op_void, 6568 .mcdi_request = efx_ef10_mcdi_request, 6569 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 6570 .mcdi_read_response = efx_ef10_mcdi_read_response, 6571 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 6572 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 6573 .irq_enable_master = efx_port_dummy_op_void, 6574 .irq_test_generate = efx_ef10_irq_test_generate, 6575 .irq_disable_non_ev = efx_port_dummy_op_void, 6576 .irq_handle_msi = efx_ef10_msi_interrupt, 6577 .irq_handle_legacy = efx_ef10_legacy_interrupt, 6578 .tx_probe = efx_ef10_tx_probe, 6579 .tx_init = efx_ef10_tx_init, 6580 .tx_remove = efx_ef10_tx_remove, 6581 .tx_write = efx_ef10_tx_write, 6582 .tx_limit_len = efx_ef10_tx_limit_len, 6583 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, 6584 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, 6585 .rx_probe = efx_ef10_rx_probe, 6586 .rx_init = efx_ef10_rx_init, 6587 .rx_remove = efx_ef10_rx_remove, 6588 .rx_write = efx_ef10_rx_write, 6589 .rx_defer_refill = efx_ef10_rx_defer_refill, 6590 .ev_probe = efx_ef10_ev_probe, 6591 .ev_init = efx_ef10_ev_init, 6592 .ev_fini = efx_ef10_ev_fini, 6593 .ev_remove = efx_ef10_ev_remove, 6594 .ev_process = efx_ef10_ev_process, 6595 .ev_read_ack = efx_ef10_ev_read_ack, 6596 .ev_test_generate = efx_ef10_ev_test_generate, 6597 .filter_table_probe = efx_ef10_filter_table_probe, 6598 .filter_table_restore = efx_ef10_filter_table_restore, 6599 .filter_table_remove = efx_ef10_filter_table_remove, 6600 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 6601 .filter_insert = efx_ef10_filter_insert, 6602 .filter_remove_safe = efx_ef10_filter_remove_safe, 6603 .filter_get_safe = efx_ef10_filter_get_safe, 6604 .filter_clear_rx = efx_ef10_filter_clear_rx, 6605 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 6606 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 6607 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 6608 #ifdef CONFIG_RFS_ACCEL 6609 .filter_rfs_insert = efx_ef10_filter_rfs_insert, 6610 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 6611 #endif 6612 #ifdef CONFIG_SFC_MTD 6613 .mtd_probe = efx_port_dummy_op_int, 6614 #endif 6615 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, 6616 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, 6617 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 6618 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 6619 #ifdef CONFIG_SFC_SRIOV 6620 .vswitching_probe = efx_ef10_vswitching_probe_vf, 6621 .vswitching_restore = efx_ef10_vswitching_restore_vf, 6622 .vswitching_remove = efx_ef10_vswitching_remove_vf, 6623 #endif 6624 .get_mac_address = efx_ef10_get_mac_address_vf, 6625 .set_mac_address = efx_ef10_set_mac_address, 6626 6627 .get_phys_port_id = efx_ef10_get_phys_port_id, 6628 .revision = EFX_REV_HUNT_A0, 6629 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 6630 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 6631 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 6632 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 6633 .can_rx_scatter = true, 6634 .always_rx_scatter = true, 6635 .min_interrupt_mode = EFX_INT_MODE_MSIX, 6636 .max_interrupt_mode = EFX_INT_MODE_MSIX, 6637 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 6638 .offload_features = EF10_OFFLOAD_FEATURES, 6639 .mcdi_max_ver = 2, 6640 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 6641 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 6642 1 << HWTSTAMP_FILTER_ALL, 6643 .rx_hash_key_size = 40, 6644 }; 6645 6646 const struct efx_nic_type efx_hunt_a0_nic_type = { 6647 .is_vf = false, 6648 .mem_bar = efx_ef10_pf_mem_bar, 6649 .mem_map_size = efx_ef10_mem_map_size, 6650 .probe = efx_ef10_probe_pf, 6651 .remove = efx_ef10_remove, 6652 .dimension_resources = efx_ef10_dimension_resources, 6653 .init = efx_ef10_init_nic, 6654 .fini = efx_port_dummy_op_void, 6655 .map_reset_reason = efx_ef10_map_reset_reason, 6656 .map_reset_flags = efx_ef10_map_reset_flags, 6657 .reset = efx_ef10_reset, 6658 .probe_port = efx_mcdi_port_probe, 6659 .remove_port = efx_mcdi_port_remove, 6660 .fini_dmaq = efx_ef10_fini_dmaq, 6661 .prepare_flr = efx_ef10_prepare_flr, 6662 .finish_flr = efx_port_dummy_op_void, 6663 .describe_stats = efx_ef10_describe_stats, 6664 .update_stats = efx_ef10_update_stats_pf, 6665 .start_stats = efx_mcdi_mac_start_stats, 6666 .pull_stats = efx_mcdi_mac_pull_stats, 6667 .stop_stats = efx_mcdi_mac_stop_stats, 6668 .set_id_led = efx_mcdi_set_id_led, 6669 .push_irq_moderation = efx_ef10_push_irq_moderation, 6670 .reconfigure_mac = efx_ef10_mac_reconfigure, 6671 .check_mac_fault = efx_mcdi_mac_check_fault, 6672 .reconfigure_port = efx_mcdi_port_reconfigure, 6673 .get_wol = efx_ef10_get_wol, 6674 .set_wol = efx_ef10_set_wol, 6675 .resume_wol = efx_port_dummy_op_void, 6676 .test_chip = efx_ef10_test_chip, 6677 .test_nvram = efx_mcdi_nvram_test_all, 6678 .mcdi_request = efx_ef10_mcdi_request, 6679 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 6680 .mcdi_read_response = efx_ef10_mcdi_read_response, 6681 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 6682 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 6683 .irq_enable_master = efx_port_dummy_op_void, 6684 .irq_test_generate = efx_ef10_irq_test_generate, 6685 .irq_disable_non_ev = efx_port_dummy_op_void, 6686 .irq_handle_msi = efx_ef10_msi_interrupt, 6687 .irq_handle_legacy = efx_ef10_legacy_interrupt, 6688 .tx_probe = efx_ef10_tx_probe, 6689 .tx_init = efx_ef10_tx_init, 6690 .tx_remove = efx_ef10_tx_remove, 6691 .tx_write = efx_ef10_tx_write, 6692 .tx_limit_len = efx_ef10_tx_limit_len, 6693 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, 6694 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, 6695 .rx_probe = efx_ef10_rx_probe, 6696 .rx_init = efx_ef10_rx_init, 6697 .rx_remove = efx_ef10_rx_remove, 6698 .rx_write = efx_ef10_rx_write, 6699 .rx_defer_refill = efx_ef10_rx_defer_refill, 6700 .ev_probe = efx_ef10_ev_probe, 6701 .ev_init = efx_ef10_ev_init, 6702 .ev_fini = efx_ef10_ev_fini, 6703 .ev_remove = efx_ef10_ev_remove, 6704 .ev_process = efx_ef10_ev_process, 6705 .ev_read_ack = efx_ef10_ev_read_ack, 6706 .ev_test_generate = efx_ef10_ev_test_generate, 6707 .filter_table_probe = efx_ef10_filter_table_probe, 6708 .filter_table_restore = efx_ef10_filter_table_restore, 6709 .filter_table_remove = efx_ef10_filter_table_remove, 6710 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 6711 .filter_insert = efx_ef10_filter_insert, 6712 .filter_remove_safe = efx_ef10_filter_remove_safe, 6713 .filter_get_safe = efx_ef10_filter_get_safe, 6714 .filter_clear_rx = efx_ef10_filter_clear_rx, 6715 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 6716 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 6717 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 6718 #ifdef CONFIG_RFS_ACCEL 6719 .filter_rfs_insert = efx_ef10_filter_rfs_insert, 6720 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 6721 #endif 6722 #ifdef CONFIG_SFC_MTD 6723 .mtd_probe = efx_ef10_mtd_probe, 6724 .mtd_rename = efx_mcdi_mtd_rename, 6725 .mtd_read = efx_mcdi_mtd_read, 6726 .mtd_erase = efx_mcdi_mtd_erase, 6727 .mtd_write = efx_mcdi_mtd_write, 6728 .mtd_sync = efx_mcdi_mtd_sync, 6729 #endif 6730 .ptp_write_host_time = efx_ef10_ptp_write_host_time, 6731 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, 6732 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, 6733 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 6734 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 6735 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, 6736 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, 6737 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, 6738 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, 6739 #ifdef CONFIG_SFC_SRIOV 6740 .sriov_configure = efx_ef10_sriov_configure, 6741 .sriov_init = efx_ef10_sriov_init, 6742 .sriov_fini = efx_ef10_sriov_fini, 6743 .sriov_wanted = efx_ef10_sriov_wanted, 6744 .sriov_reset = efx_ef10_sriov_reset, 6745 .sriov_flr = efx_ef10_sriov_flr, 6746 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, 6747 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, 6748 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, 6749 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, 6750 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, 6751 .vswitching_probe = efx_ef10_vswitching_probe_pf, 6752 .vswitching_restore = efx_ef10_vswitching_restore_pf, 6753 .vswitching_remove = efx_ef10_vswitching_remove_pf, 6754 #endif 6755 .get_mac_address = efx_ef10_get_mac_address_pf, 6756 .set_mac_address = efx_ef10_set_mac_address, 6757 .tso_versions = efx_ef10_tso_versions, 6758 6759 .get_phys_port_id = efx_ef10_get_phys_port_id, 6760 .revision = EFX_REV_HUNT_A0, 6761 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 6762 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 6763 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 6764 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 6765 .can_rx_scatter = true, 6766 .always_rx_scatter = true, 6767 .option_descriptors = true, 6768 .min_interrupt_mode = EFX_INT_MODE_LEGACY, 6769 .max_interrupt_mode = EFX_INT_MODE_MSIX, 6770 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 6771 .offload_features = EF10_OFFLOAD_FEATURES, 6772 .mcdi_max_ver = 2, 6773 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 6774 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 6775 1 << HWTSTAMP_FILTER_ALL, 6776 .rx_hash_key_size = 40, 6777 }; 6778