1 /**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2012-2013 Solarflare Communications Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published 7 * by the Free Software Foundation, incorporated herein by reference. 8 */ 9 10 #include "net_driver.h" 11 #include "ef10_regs.h" 12 #include "io.h" 13 #include "mcdi.h" 14 #include "mcdi_pcol.h" 15 #include "nic.h" 16 #include "workarounds.h" 17 #include "selftest.h" 18 #include "ef10_sriov.h" 19 #include <linux/in.h> 20 #include <linux/jhash.h> 21 #include <linux/wait.h> 22 #include <linux/workqueue.h> 23 24 /* Hardware control for EF10 architecture including 'Huntington'. */ 25 26 #define EFX_EF10_DRVGEN_EV 7 27 enum { 28 EFX_EF10_TEST = 1, 29 EFX_EF10_REFILL, 30 }; 31 32 /* The reserved RSS context value */ 33 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff 34 /* The maximum size of a shared RSS context */ 35 /* TODO: this should really be from the mcdi protocol export */ 36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL 37 38 /* The filter table(s) are managed by firmware and we have write-only 39 * access. When removing filters we must identify them to the 40 * firmware by a 64-bit handle, but this is too wide for Linux kernel 41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to 42 * be able to tell in advance whether a requested insertion will 43 * replace an existing filter. Therefore we maintain a software hash 44 * table, which should be at least as large as the hardware hash 45 * table. 46 * 47 * Huntington has a single 8K filter table shared between all filter 48 * types and both ports. 49 */ 50 #define HUNT_FILTER_TBL_ROWS 8192 51 52 #define EFX_EF10_FILTER_ID_INVALID 0xffff 53 54 #define EFX_EF10_FILTER_DEV_UC_MAX 32 55 #define EFX_EF10_FILTER_DEV_MC_MAX 256 56 57 /* VLAN list entry */ 58 struct efx_ef10_vlan { 59 struct list_head list; 60 u16 vid; 61 }; 62 63 enum efx_ef10_default_filters { 64 EFX_EF10_BCAST, 65 EFX_EF10_UCDEF, 66 EFX_EF10_MCDEF, 67 EFX_EF10_VXLAN4_UCDEF, 68 EFX_EF10_VXLAN4_MCDEF, 69 EFX_EF10_VXLAN6_UCDEF, 70 EFX_EF10_VXLAN6_MCDEF, 71 EFX_EF10_NVGRE4_UCDEF, 72 EFX_EF10_NVGRE4_MCDEF, 73 EFX_EF10_NVGRE6_UCDEF, 74 EFX_EF10_NVGRE6_MCDEF, 75 EFX_EF10_GENEVE4_UCDEF, 76 EFX_EF10_GENEVE4_MCDEF, 77 EFX_EF10_GENEVE6_UCDEF, 78 EFX_EF10_GENEVE6_MCDEF, 79 80 EFX_EF10_NUM_DEFAULT_FILTERS 81 }; 82 83 /* Per-VLAN filters information */ 84 struct efx_ef10_filter_vlan { 85 struct list_head list; 86 u16 vid; 87 u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; 88 u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; 89 u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS]; 90 }; 91 92 struct efx_ef10_dev_addr { 93 u8 addr[ETH_ALEN]; 94 }; 95 96 struct efx_ef10_filter_table { 97 /* The MCDI match masks supported by this fw & hw, in order of priority */ 98 u32 rx_match_mcdi_flags[ 99 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2]; 100 unsigned int rx_match_count; 101 102 struct { 103 unsigned long spec; /* pointer to spec plus flag bits */ 104 /* BUSY flag indicates that an update is in progress. AUTO_OLD is 105 * used to mark and sweep MAC filters for the device address lists. 106 */ 107 #define EFX_EF10_FILTER_FLAG_BUSY 1UL 108 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL 109 #define EFX_EF10_FILTER_FLAGS 3UL 110 u64 handle; /* firmware handle */ 111 } *entry; 112 wait_queue_head_t waitq; 113 /* Shadow of net_device address lists, guarded by mac_lock */ 114 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; 115 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; 116 int dev_uc_count; 117 int dev_mc_count; 118 bool uc_promisc; 119 bool mc_promisc; 120 /* Whether in multicast promiscuous mode when last changed */ 121 bool mc_promisc_last; 122 bool mc_overflow; /* Too many MC addrs; should always imply mc_promisc */ 123 bool vlan_filter; 124 struct list_head vlan_list; 125 }; 126 127 /* An arbitrary search limit for the software hash table */ 128 #define EFX_EF10_FILTER_SEARCH_LIMIT 200 129 130 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); 131 static void efx_ef10_filter_table_remove(struct efx_nic *efx); 132 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); 133 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, 134 struct efx_ef10_filter_vlan *vlan); 135 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); 136 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); 137 138 static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id) 139 { 140 WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID); 141 return filter_id & (HUNT_FILTER_TBL_ROWS - 1); 142 } 143 144 static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id) 145 { 146 return filter_id / (HUNT_FILTER_TBL_ROWS * 2); 147 } 148 149 static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx) 150 { 151 return pri * HUNT_FILTER_TBL_ROWS * 2 + idx; 152 } 153 154 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) 155 { 156 efx_dword_t reg; 157 158 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS); 159 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? 160 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; 161 } 162 163 /* On all EF10s up to and including SFC9220 (Medford1), all PFs use BAR 0 for 164 * I/O space and BAR 2(&3) for memory. On SFC9250 (Medford2), there is no I/O 165 * bar; PFs use BAR 0/1 for memory. 166 */ 167 static unsigned int efx_ef10_pf_mem_bar(struct efx_nic *efx) 168 { 169 switch (efx->pci_dev->device) { 170 case 0x0b03: /* SFC9250 PF */ 171 return 0; 172 default: 173 return 2; 174 } 175 } 176 177 /* All VFs use BAR 0/1 for memory */ 178 static unsigned int efx_ef10_vf_mem_bar(struct efx_nic *efx) 179 { 180 return 0; 181 } 182 183 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx) 184 { 185 int bar; 186 187 bar = efx->type->mem_bar(efx); 188 return resource_size(&efx->pci_dev->resource[bar]); 189 } 190 191 static bool efx_ef10_is_vf(struct efx_nic *efx) 192 { 193 return efx->type->is_vf; 194 } 195 196 static int efx_ef10_get_pf_index(struct efx_nic *efx) 197 { 198 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 199 struct efx_ef10_nic_data *nic_data = efx->nic_data; 200 size_t outlen; 201 int rc; 202 203 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 204 sizeof(outbuf), &outlen); 205 if (rc) 206 return rc; 207 if (outlen < sizeof(outbuf)) 208 return -EIO; 209 210 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF); 211 return 0; 212 } 213 214 #ifdef CONFIG_SFC_SRIOV 215 static int efx_ef10_get_vf_index(struct efx_nic *efx) 216 { 217 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 218 struct efx_ef10_nic_data *nic_data = efx->nic_data; 219 size_t outlen; 220 int rc; 221 222 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf, 223 sizeof(outbuf), &outlen); 224 if (rc) 225 return rc; 226 if (outlen < sizeof(outbuf)) 227 return -EIO; 228 229 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF); 230 return 0; 231 } 232 #endif 233 234 static int efx_ef10_init_datapath_caps(struct efx_nic *efx) 235 { 236 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); 237 struct efx_ef10_nic_data *nic_data = efx->nic_data; 238 size_t outlen; 239 int rc; 240 241 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); 242 243 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, 244 outbuf, sizeof(outbuf), &outlen); 245 if (rc) 246 return rc; 247 if (outlen < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 248 netif_err(efx, drv, efx->net_dev, 249 "unable to read datapath firmware capabilities\n"); 250 return -EIO; 251 } 252 253 nic_data->datapath_caps = 254 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1); 255 256 if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) { 257 nic_data->datapath_caps2 = MCDI_DWORD(outbuf, 258 GET_CAPABILITIES_V2_OUT_FLAGS2); 259 nic_data->piobuf_size = MCDI_WORD(outbuf, 260 GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF); 261 } else { 262 nic_data->datapath_caps2 = 0; 263 nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE; 264 } 265 266 /* record the DPCPU firmware IDs to determine VEB vswitching support. 267 */ 268 nic_data->rx_dpcpu_fw_id = 269 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 270 nic_data->tx_dpcpu_fw_id = 271 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 272 273 if (!(nic_data->datapath_caps & 274 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) { 275 netif_err(efx, probe, efx->net_dev, 276 "current firmware does not support an RX prefix\n"); 277 return -ENODEV; 278 } 279 280 if (outlen >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { 281 u8 vi_window_mode = MCDI_BYTE(outbuf, 282 GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); 283 284 switch (vi_window_mode) { 285 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: 286 efx->vi_stride = 8192; 287 break; 288 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: 289 efx->vi_stride = 16384; 290 break; 291 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: 292 efx->vi_stride = 65536; 293 break; 294 default: 295 netif_err(efx, probe, efx->net_dev, 296 "Unrecognised VI window mode %d\n", 297 vi_window_mode); 298 return -EIO; 299 } 300 netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n", 301 efx->vi_stride); 302 } else { 303 /* keep default VI stride */ 304 netif_dbg(efx, probe, efx->net_dev, 305 "firmware did not report VI window mode, assuming vi_stride = %u\n", 306 efx->vi_stride); 307 } 308 309 if (outlen >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { 310 efx->num_mac_stats = MCDI_WORD(outbuf, 311 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); 312 netif_dbg(efx, probe, efx->net_dev, 313 "firmware reports num_mac_stats = %u\n", 314 efx->num_mac_stats); 315 } else { 316 /* leave num_mac_stats as the default value, MC_CMD_MAC_NSTATS */ 317 netif_dbg(efx, probe, efx->net_dev, 318 "firmware did not report num_mac_stats, assuming %u\n", 319 efx->num_mac_stats); 320 } 321 322 return 0; 323 } 324 325 static void efx_ef10_read_licensed_features(struct efx_nic *efx) 326 { 327 MCDI_DECLARE_BUF(inbuf, MC_CMD_LICENSING_V3_IN_LEN); 328 MCDI_DECLARE_BUF(outbuf, MC_CMD_LICENSING_V3_OUT_LEN); 329 struct efx_ef10_nic_data *nic_data = efx->nic_data; 330 size_t outlen; 331 int rc; 332 333 MCDI_SET_DWORD(inbuf, LICENSING_V3_IN_OP, 334 MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE); 335 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_LICENSING_V3, inbuf, sizeof(inbuf), 336 outbuf, sizeof(outbuf), &outlen); 337 if (rc || (outlen < MC_CMD_LICENSING_V3_OUT_LEN)) 338 return; 339 340 nic_data->licensed_features = MCDI_QWORD(outbuf, 341 LICENSING_V3_OUT_LICENSED_FEATURES); 342 } 343 344 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx) 345 { 346 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN); 347 int rc; 348 349 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0, 350 outbuf, sizeof(outbuf), NULL); 351 if (rc) 352 return rc; 353 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ); 354 return rc > 0 ? rc : -ERANGE; 355 } 356 357 static int efx_ef10_get_timer_workarounds(struct efx_nic *efx) 358 { 359 struct efx_ef10_nic_data *nic_data = efx->nic_data; 360 unsigned int implemented; 361 unsigned int enabled; 362 int rc; 363 364 nic_data->workaround_35388 = false; 365 nic_data->workaround_61265 = false; 366 367 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 368 369 if (rc == -ENOSYS) { 370 /* Firmware without GET_WORKAROUNDS - not a problem. */ 371 rc = 0; 372 } else if (rc == 0) { 373 /* Bug61265 workaround is always enabled if implemented. */ 374 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG61265) 375 nic_data->workaround_61265 = true; 376 377 if (enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 378 nic_data->workaround_35388 = true; 379 } else if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG35388) { 380 /* Workaround is implemented but not enabled. 381 * Try to enable it. 382 */ 383 rc = efx_mcdi_set_workaround(efx, 384 MC_CMD_WORKAROUND_BUG35388, 385 true, NULL); 386 if (rc == 0) 387 nic_data->workaround_35388 = true; 388 /* If we failed to set the workaround just carry on. */ 389 rc = 0; 390 } 391 } 392 393 netif_dbg(efx, probe, efx->net_dev, 394 "workaround for bug 35388 is %sabled\n", 395 nic_data->workaround_35388 ? "en" : "dis"); 396 netif_dbg(efx, probe, efx->net_dev, 397 "workaround for bug 61265 is %sabled\n", 398 nic_data->workaround_61265 ? "en" : "dis"); 399 400 return rc; 401 } 402 403 static void efx_ef10_process_timer_config(struct efx_nic *efx, 404 const efx_dword_t *data) 405 { 406 unsigned int max_count; 407 408 if (EFX_EF10_WORKAROUND_61265(efx)) { 409 efx->timer_quantum_ns = MCDI_DWORD(data, 410 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS); 411 efx->timer_max_ns = MCDI_DWORD(data, 412 GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS); 413 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 414 efx->timer_quantum_ns = MCDI_DWORD(data, 415 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT); 416 max_count = MCDI_DWORD(data, 417 GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT); 418 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 419 } else { 420 efx->timer_quantum_ns = MCDI_DWORD(data, 421 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT); 422 max_count = MCDI_DWORD(data, 423 GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT); 424 efx->timer_max_ns = max_count * efx->timer_quantum_ns; 425 } 426 427 netif_dbg(efx, probe, efx->net_dev, 428 "got timer properties from MC: quantum %u ns; max %u ns\n", 429 efx->timer_quantum_ns, efx->timer_max_ns); 430 } 431 432 static int efx_ef10_get_timer_config(struct efx_nic *efx) 433 { 434 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN); 435 int rc; 436 437 rc = efx_ef10_get_timer_workarounds(efx); 438 if (rc) 439 return rc; 440 441 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, NULL, 0, 442 outbuf, sizeof(outbuf), NULL); 443 444 if (rc == 0) { 445 efx_ef10_process_timer_config(efx, outbuf); 446 } else if (rc == -ENOSYS || rc == -EPERM) { 447 /* Not available - fall back to Huntington defaults. */ 448 unsigned int quantum; 449 450 rc = efx_ef10_get_sysclk_freq(efx); 451 if (rc < 0) 452 return rc; 453 454 quantum = 1536000 / rc; /* 1536 cycles */ 455 efx->timer_quantum_ns = quantum; 456 efx->timer_max_ns = efx->type->timer_period_max * quantum; 457 rc = 0; 458 } else { 459 efx_mcdi_display_error(efx, MC_CMD_GET_EVQ_TMR_PROPERTIES, 460 MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN, 461 NULL, 0, rc); 462 } 463 464 return rc; 465 } 466 467 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address) 468 { 469 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 470 size_t outlen; 471 int rc; 472 473 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); 474 475 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, 476 outbuf, sizeof(outbuf), &outlen); 477 if (rc) 478 return rc; 479 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 480 return -EIO; 481 482 ether_addr_copy(mac_address, 483 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); 484 return 0; 485 } 486 487 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address) 488 { 489 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN); 490 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 491 size_t outlen; 492 int num_addrs, rc; 493 494 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 495 EVB_PORT_ID_ASSIGNED); 496 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf, 497 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 498 499 if (rc) 500 return rc; 501 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) 502 return -EIO; 503 504 num_addrs = MCDI_DWORD(outbuf, 505 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT); 506 507 WARN_ON(num_addrs != 1); 508 509 ether_addr_copy(mac_address, 510 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR)); 511 512 return 0; 513 } 514 515 static ssize_t efx_ef10_show_link_control_flag(struct device *dev, 516 struct device_attribute *attr, 517 char *buf) 518 { 519 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 520 521 return sprintf(buf, "%d\n", 522 ((efx->mcdi->fn_flags) & 523 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 524 ? 1 : 0); 525 } 526 527 static ssize_t efx_ef10_show_primary_flag(struct device *dev, 528 struct device_attribute *attr, 529 char *buf) 530 { 531 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); 532 533 return sprintf(buf, "%d\n", 534 ((efx->mcdi->fn_flags) & 535 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) 536 ? 1 : 0); 537 } 538 539 static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) 540 { 541 struct efx_ef10_nic_data *nic_data = efx->nic_data; 542 struct efx_ef10_vlan *vlan; 543 544 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 545 546 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 547 if (vlan->vid == vid) 548 return vlan; 549 } 550 551 return NULL; 552 } 553 554 static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) 555 { 556 struct efx_ef10_nic_data *nic_data = efx->nic_data; 557 struct efx_ef10_vlan *vlan; 558 int rc; 559 560 mutex_lock(&nic_data->vlan_lock); 561 562 vlan = efx_ef10_find_vlan(efx, vid); 563 if (vlan) { 564 /* We add VID 0 on init. 8021q adds it on module init 565 * for all interfaces with VLAN filtring feature. 566 */ 567 if (vid == 0) 568 goto done_unlock; 569 netif_warn(efx, drv, efx->net_dev, 570 "VLAN %u already added\n", vid); 571 rc = -EALREADY; 572 goto fail_exist; 573 } 574 575 rc = -ENOMEM; 576 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 577 if (!vlan) 578 goto fail_alloc; 579 580 vlan->vid = vid; 581 582 list_add_tail(&vlan->list, &nic_data->vlan_list); 583 584 if (efx->filter_state) { 585 mutex_lock(&efx->mac_lock); 586 down_write(&efx->filter_sem); 587 rc = efx_ef10_filter_add_vlan(efx, vlan->vid); 588 up_write(&efx->filter_sem); 589 mutex_unlock(&efx->mac_lock); 590 if (rc) 591 goto fail_filter_add_vlan; 592 } 593 594 done_unlock: 595 mutex_unlock(&nic_data->vlan_lock); 596 return 0; 597 598 fail_filter_add_vlan: 599 list_del(&vlan->list); 600 kfree(vlan); 601 fail_alloc: 602 fail_exist: 603 mutex_unlock(&nic_data->vlan_lock); 604 return rc; 605 } 606 607 static void efx_ef10_del_vlan_internal(struct efx_nic *efx, 608 struct efx_ef10_vlan *vlan) 609 { 610 struct efx_ef10_nic_data *nic_data = efx->nic_data; 611 612 WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); 613 614 if (efx->filter_state) { 615 down_write(&efx->filter_sem); 616 efx_ef10_filter_del_vlan(efx, vlan->vid); 617 up_write(&efx->filter_sem); 618 } 619 620 list_del(&vlan->list); 621 kfree(vlan); 622 } 623 624 static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) 625 { 626 struct efx_ef10_nic_data *nic_data = efx->nic_data; 627 struct efx_ef10_vlan *vlan; 628 int rc = 0; 629 630 /* 8021q removes VID 0 on module unload for all interfaces 631 * with VLAN filtering feature. We need to keep it to receive 632 * untagged traffic. 633 */ 634 if (vid == 0) 635 return 0; 636 637 mutex_lock(&nic_data->vlan_lock); 638 639 vlan = efx_ef10_find_vlan(efx, vid); 640 if (!vlan) { 641 netif_err(efx, drv, efx->net_dev, 642 "VLAN %u to be deleted not found\n", vid); 643 rc = -ENOENT; 644 } else { 645 efx_ef10_del_vlan_internal(efx, vlan); 646 } 647 648 mutex_unlock(&nic_data->vlan_lock); 649 650 return rc; 651 } 652 653 static void efx_ef10_cleanup_vlans(struct efx_nic *efx) 654 { 655 struct efx_ef10_nic_data *nic_data = efx->nic_data; 656 struct efx_ef10_vlan *vlan, *next_vlan; 657 658 mutex_lock(&nic_data->vlan_lock); 659 list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) 660 efx_ef10_del_vlan_internal(efx, vlan); 661 mutex_unlock(&nic_data->vlan_lock); 662 } 663 664 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, 665 NULL); 666 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); 667 668 static int efx_ef10_probe(struct efx_nic *efx) 669 { 670 struct efx_ef10_nic_data *nic_data; 671 int i, rc; 672 673 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 674 if (!nic_data) 675 return -ENOMEM; 676 efx->nic_data = nic_data; 677 678 /* we assume later that we can copy from this buffer in dwords */ 679 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); 680 681 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, 682 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL); 683 if (rc) 684 goto fail1; 685 686 /* Get the MC's warm boot count. In case it's rebooting right 687 * now, be prepared to retry. 688 */ 689 i = 0; 690 for (;;) { 691 rc = efx_ef10_get_warm_boot_count(efx); 692 if (rc >= 0) 693 break; 694 if (++i == 5) 695 goto fail2; 696 ssleep(1); 697 } 698 nic_data->warm_boot_count = rc; 699 700 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 701 702 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 703 704 /* In case we're recovering from a crash (kexec), we want to 705 * cancel any outstanding request by the previous user of this 706 * function. We send a special message using the least 707 * significant bits of the 'high' (doorbell) register. 708 */ 709 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD); 710 711 rc = efx_mcdi_init(efx); 712 if (rc) 713 goto fail2; 714 715 mutex_init(&nic_data->udp_tunnels_lock); 716 717 /* Reset (most) configuration for this function */ 718 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); 719 if (rc) 720 goto fail3; 721 722 /* Enable event logging */ 723 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 724 if (rc) 725 goto fail3; 726 727 rc = device_create_file(&efx->pci_dev->dev, 728 &dev_attr_link_control_flag); 729 if (rc) 730 goto fail3; 731 732 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 733 if (rc) 734 goto fail4; 735 736 rc = efx_ef10_get_pf_index(efx); 737 if (rc) 738 goto fail5; 739 740 rc = efx_ef10_init_datapath_caps(efx); 741 if (rc < 0) 742 goto fail5; 743 744 efx_ef10_read_licensed_features(efx); 745 746 /* We can have one VI for each vi_stride-byte region. 747 * However, until we use TX option descriptors we need two TX queues 748 * per channel. 749 */ 750 efx->max_channels = min_t(unsigned int, 751 EFX_MAX_CHANNELS, 752 efx_ef10_mem_map_size(efx) / 753 (efx->vi_stride * EFX_TXQ_TYPES)); 754 efx->max_tx_channels = efx->max_channels; 755 if (WARN_ON(efx->max_channels == 0)) { 756 rc = -EIO; 757 goto fail5; 758 } 759 760 efx->rx_packet_len_offset = 761 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; 762 763 if (nic_data->datapath_caps & 764 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN)) 765 efx->net_dev->hw_features |= NETIF_F_RXFCS; 766 767 rc = efx_mcdi_port_get_number(efx); 768 if (rc < 0) 769 goto fail5; 770 efx->port_num = rc; 771 772 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr); 773 if (rc) 774 goto fail5; 775 776 rc = efx_ef10_get_timer_config(efx); 777 if (rc < 0) 778 goto fail5; 779 780 rc = efx_mcdi_mon_probe(efx); 781 if (rc && rc != -EPERM) 782 goto fail5; 783 784 efx_ptp_defer_probe_with_channel(efx); 785 786 #ifdef CONFIG_SFC_SRIOV 787 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) { 788 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 789 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 790 791 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id); 792 } else 793 #endif 794 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); 795 796 INIT_LIST_HEAD(&nic_data->vlan_list); 797 mutex_init(&nic_data->vlan_lock); 798 799 /* Add unspecified VID to support VLAN filtering being disabled */ 800 rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); 801 if (rc) 802 goto fail_add_vid_unspec; 803 804 /* If VLAN filtering is enabled, we need VID 0 to get untagged 805 * traffic. It is added automatically if 8021q module is loaded, 806 * but we can't rely on it since module may be not loaded. 807 */ 808 rc = efx_ef10_add_vlan(efx, 0); 809 if (rc) 810 goto fail_add_vid_0; 811 812 return 0; 813 814 fail_add_vid_0: 815 efx_ef10_cleanup_vlans(efx); 816 fail_add_vid_unspec: 817 mutex_destroy(&nic_data->vlan_lock); 818 efx_ptp_remove(efx); 819 efx_mcdi_mon_remove(efx); 820 fail5: 821 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 822 fail4: 823 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 824 fail3: 825 efx_mcdi_detach(efx); 826 827 mutex_lock(&nic_data->udp_tunnels_lock); 828 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 829 (void)efx_ef10_set_udp_tnl_ports(efx, true); 830 mutex_unlock(&nic_data->udp_tunnels_lock); 831 mutex_destroy(&nic_data->udp_tunnels_lock); 832 833 efx_mcdi_fini(efx); 834 fail2: 835 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 836 fail1: 837 kfree(nic_data); 838 efx->nic_data = NULL; 839 return rc; 840 } 841 842 static int efx_ef10_free_vis(struct efx_nic *efx) 843 { 844 MCDI_DECLARE_BUF_ERR(outbuf); 845 size_t outlen; 846 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0, 847 outbuf, sizeof(outbuf), &outlen); 848 849 /* -EALREADY means nothing to free, so ignore */ 850 if (rc == -EALREADY) 851 rc = 0; 852 if (rc) 853 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen, 854 rc); 855 return rc; 856 } 857 858 #ifdef EFX_USE_PIO 859 860 static void efx_ef10_free_piobufs(struct efx_nic *efx) 861 { 862 struct efx_ef10_nic_data *nic_data = efx->nic_data; 863 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN); 864 unsigned int i; 865 int rc; 866 867 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0); 868 869 for (i = 0; i < nic_data->n_piobufs; i++) { 870 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE, 871 nic_data->piobuf_handle[i]); 872 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf), 873 NULL, 0, NULL); 874 WARN_ON(rc); 875 } 876 877 nic_data->n_piobufs = 0; 878 } 879 880 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 881 { 882 struct efx_ef10_nic_data *nic_data = efx->nic_data; 883 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN); 884 unsigned int i; 885 size_t outlen; 886 int rc = 0; 887 888 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0); 889 890 for (i = 0; i < n; i++) { 891 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0, 892 outbuf, sizeof(outbuf), &outlen); 893 if (rc) { 894 /* Don't display the MC error if we didn't have space 895 * for a VF. 896 */ 897 if (!(efx_ef10_is_vf(efx) && rc == -ENOSPC)) 898 efx_mcdi_display_error(efx, MC_CMD_ALLOC_PIOBUF, 899 0, outbuf, outlen, rc); 900 break; 901 } 902 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 903 rc = -EIO; 904 break; 905 } 906 nic_data->piobuf_handle[i] = 907 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 908 netif_dbg(efx, probe, efx->net_dev, 909 "allocated PIO buffer %u handle %x\n", i, 910 nic_data->piobuf_handle[i]); 911 } 912 913 nic_data->n_piobufs = i; 914 if (rc) 915 efx_ef10_free_piobufs(efx); 916 return rc; 917 } 918 919 static int efx_ef10_link_piobufs(struct efx_nic *efx) 920 { 921 struct efx_ef10_nic_data *nic_data = efx->nic_data; 922 MCDI_DECLARE_BUF(inbuf, MC_CMD_LINK_PIOBUF_IN_LEN); 923 struct efx_channel *channel; 924 struct efx_tx_queue *tx_queue; 925 unsigned int offset, index; 926 int rc; 927 928 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0); 929 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0); 930 931 /* Link a buffer to each VI in the write-combining mapping */ 932 for (index = 0; index < nic_data->n_piobufs; ++index) { 933 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE, 934 nic_data->piobuf_handle[index]); 935 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE, 936 nic_data->pio_write_vi_base + index); 937 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 938 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 939 NULL, 0, NULL); 940 if (rc) { 941 netif_err(efx, drv, efx->net_dev, 942 "failed to link VI %u to PIO buffer %u (%d)\n", 943 nic_data->pio_write_vi_base + index, index, 944 rc); 945 goto fail; 946 } 947 netif_dbg(efx, probe, efx->net_dev, 948 "linked VI %u to PIO buffer %u\n", 949 nic_data->pio_write_vi_base + index, index); 950 } 951 952 /* Link a buffer to each TX queue */ 953 efx_for_each_channel(channel, efx) { 954 /* Extra channels, even those with TXQs (PTP), do not require 955 * PIO resources. 956 */ 957 if (!channel->type->want_pio) 958 continue; 959 efx_for_each_channel_tx_queue(tx_queue, channel) { 960 /* We assign the PIO buffers to queues in 961 * reverse order to allow for the following 962 * special case. 963 */ 964 offset = ((efx->tx_channel_offset + efx->n_tx_channels - 965 tx_queue->channel->channel - 1) * 966 efx_piobuf_size); 967 index = offset / nic_data->piobuf_size; 968 offset = offset % nic_data->piobuf_size; 969 970 /* When the host page size is 4K, the first 971 * host page in the WC mapping may be within 972 * the same VI page as the last TX queue. We 973 * can only link one buffer to each VI. 974 */ 975 if (tx_queue->queue == nic_data->pio_write_vi_base) { 976 BUG_ON(index != 0); 977 rc = 0; 978 } else { 979 MCDI_SET_DWORD(inbuf, 980 LINK_PIOBUF_IN_PIOBUF_HANDLE, 981 nic_data->piobuf_handle[index]); 982 MCDI_SET_DWORD(inbuf, 983 LINK_PIOBUF_IN_TXQ_INSTANCE, 984 tx_queue->queue); 985 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF, 986 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN, 987 NULL, 0, NULL); 988 } 989 990 if (rc) { 991 /* This is non-fatal; the TX path just 992 * won't use PIO for this queue 993 */ 994 netif_err(efx, drv, efx->net_dev, 995 "failed to link VI %u to PIO buffer %u (%d)\n", 996 tx_queue->queue, index, rc); 997 tx_queue->piobuf = NULL; 998 } else { 999 tx_queue->piobuf = 1000 nic_data->pio_write_base + 1001 index * efx->vi_stride + offset; 1002 tx_queue->piobuf_offset = offset; 1003 netif_dbg(efx, probe, efx->net_dev, 1004 "linked VI %u to PIO buffer %u offset %x addr %p\n", 1005 tx_queue->queue, index, 1006 tx_queue->piobuf_offset, 1007 tx_queue->piobuf); 1008 } 1009 } 1010 } 1011 1012 return 0; 1013 1014 fail: 1015 /* inbuf was defined for MC_CMD_LINK_PIOBUF. We can use the same 1016 * buffer for MC_CMD_UNLINK_PIOBUF because it's shorter. 1017 */ 1018 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_IN_LEN < MC_CMD_UNLINK_PIOBUF_IN_LEN); 1019 while (index--) { 1020 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE, 1021 nic_data->pio_write_vi_base + index); 1022 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF, 1023 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN, 1024 NULL, 0, NULL); 1025 } 1026 return rc; 1027 } 1028 1029 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 1030 { 1031 struct efx_channel *channel; 1032 struct efx_tx_queue *tx_queue; 1033 1034 /* All our existing PIO buffers went away */ 1035 efx_for_each_channel(channel, efx) 1036 efx_for_each_channel_tx_queue(tx_queue, channel) 1037 tx_queue->piobuf = NULL; 1038 } 1039 1040 #else /* !EFX_USE_PIO */ 1041 1042 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) 1043 { 1044 return n == 0 ? 0 : -ENOBUFS; 1045 } 1046 1047 static int efx_ef10_link_piobufs(struct efx_nic *efx) 1048 { 1049 return 0; 1050 } 1051 1052 static void efx_ef10_free_piobufs(struct efx_nic *efx) 1053 { 1054 } 1055 1056 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) 1057 { 1058 } 1059 1060 #endif /* EFX_USE_PIO */ 1061 1062 static void efx_ef10_remove(struct efx_nic *efx) 1063 { 1064 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1065 int rc; 1066 1067 #ifdef CONFIG_SFC_SRIOV 1068 struct efx_ef10_nic_data *nic_data_pf; 1069 struct pci_dev *pci_dev_pf; 1070 struct efx_nic *efx_pf; 1071 struct ef10_vf *vf; 1072 1073 if (efx->pci_dev->is_virtfn) { 1074 pci_dev_pf = efx->pci_dev->physfn; 1075 if (pci_dev_pf) { 1076 efx_pf = pci_get_drvdata(pci_dev_pf); 1077 nic_data_pf = efx_pf->nic_data; 1078 vf = nic_data_pf->vf + nic_data->vf_index; 1079 vf->efx = NULL; 1080 } else 1081 netif_info(efx, drv, efx->net_dev, 1082 "Could not get the PF id from VF\n"); 1083 } 1084 #endif 1085 1086 efx_ef10_cleanup_vlans(efx); 1087 mutex_destroy(&nic_data->vlan_lock); 1088 1089 efx_ptp_remove(efx); 1090 1091 efx_mcdi_mon_remove(efx); 1092 1093 efx_ef10_rx_free_indir_table(efx); 1094 1095 if (nic_data->wc_membase) 1096 iounmap(nic_data->wc_membase); 1097 1098 rc = efx_ef10_free_vis(efx); 1099 WARN_ON(rc != 0); 1100 1101 if (!nic_data->must_restore_piobufs) 1102 efx_ef10_free_piobufs(efx); 1103 1104 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); 1105 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag); 1106 1107 efx_mcdi_detach(efx); 1108 1109 memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels)); 1110 mutex_lock(&nic_data->udp_tunnels_lock); 1111 (void)efx_ef10_set_udp_tnl_ports(efx, true); 1112 mutex_unlock(&nic_data->udp_tunnels_lock); 1113 1114 mutex_destroy(&nic_data->udp_tunnels_lock); 1115 1116 efx_mcdi_fini(efx); 1117 efx_nic_free_buffer(efx, &nic_data->mcdi_buf); 1118 kfree(nic_data); 1119 } 1120 1121 static int efx_ef10_probe_pf(struct efx_nic *efx) 1122 { 1123 return efx_ef10_probe(efx); 1124 } 1125 1126 int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, 1127 u32 *port_flags, u32 *vadaptor_flags, 1128 unsigned int *vlan_tags) 1129 { 1130 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1131 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); 1132 MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); 1133 size_t outlen; 1134 int rc; 1135 1136 if (nic_data->datapath_caps & 1137 (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { 1138 MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, 1139 port_id); 1140 1141 rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), 1142 outbuf, sizeof(outbuf), &outlen); 1143 if (rc) 1144 return rc; 1145 1146 if (outlen < sizeof(outbuf)) { 1147 rc = -EIO; 1148 return rc; 1149 } 1150 } 1151 1152 if (port_flags) 1153 *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); 1154 if (vadaptor_flags) 1155 *vadaptor_flags = 1156 MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); 1157 if (vlan_tags) 1158 *vlan_tags = 1159 MCDI_DWORD(outbuf, 1160 VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); 1161 1162 return 0; 1163 } 1164 1165 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) 1166 { 1167 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); 1168 1169 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 1170 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf), 1171 NULL, 0, NULL); 1172 } 1173 1174 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id) 1175 { 1176 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN); 1177 1178 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 1179 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf), 1180 NULL, 0, NULL); 1181 } 1182 1183 int efx_ef10_vport_add_mac(struct efx_nic *efx, 1184 unsigned int port_id, u8 *mac) 1185 { 1186 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN); 1187 1188 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id); 1189 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac); 1190 1191 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf, 1192 sizeof(inbuf), NULL, 0, NULL); 1193 } 1194 1195 int efx_ef10_vport_del_mac(struct efx_nic *efx, 1196 unsigned int port_id, u8 *mac) 1197 { 1198 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN); 1199 1200 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id); 1201 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac); 1202 1203 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf, 1204 sizeof(inbuf), NULL, 0, NULL); 1205 } 1206 1207 #ifdef CONFIG_SFC_SRIOV 1208 static int efx_ef10_probe_vf(struct efx_nic *efx) 1209 { 1210 int rc; 1211 struct pci_dev *pci_dev_pf; 1212 1213 /* If the parent PF has no VF data structure, it doesn't know about this 1214 * VF so fail probe. The VF needs to be re-created. This can happen 1215 * if the PF driver is unloaded while the VF is assigned to a guest. 1216 */ 1217 pci_dev_pf = efx->pci_dev->physfn; 1218 if (pci_dev_pf) { 1219 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 1220 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data; 1221 1222 if (!nic_data_pf->vf) { 1223 netif_info(efx, drv, efx->net_dev, 1224 "The VF cannot link to its parent PF; " 1225 "please destroy and re-create the VF\n"); 1226 return -EBUSY; 1227 } 1228 } 1229 1230 rc = efx_ef10_probe(efx); 1231 if (rc) 1232 return rc; 1233 1234 rc = efx_ef10_get_vf_index(efx); 1235 if (rc) 1236 goto fail; 1237 1238 if (efx->pci_dev->is_virtfn) { 1239 if (efx->pci_dev->physfn) { 1240 struct efx_nic *efx_pf = 1241 pci_get_drvdata(efx->pci_dev->physfn); 1242 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data; 1243 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1244 1245 nic_data_p->vf[nic_data->vf_index].efx = efx; 1246 nic_data_p->vf[nic_data->vf_index].pci_dev = 1247 efx->pci_dev; 1248 } else 1249 netif_info(efx, drv, efx->net_dev, 1250 "Could not get the PF id from VF\n"); 1251 } 1252 1253 return 0; 1254 1255 fail: 1256 efx_ef10_remove(efx); 1257 return rc; 1258 } 1259 #else 1260 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused))) 1261 { 1262 return 0; 1263 } 1264 #endif 1265 1266 static int efx_ef10_alloc_vis(struct efx_nic *efx, 1267 unsigned int min_vis, unsigned int max_vis) 1268 { 1269 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN); 1270 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN); 1271 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1272 size_t outlen; 1273 int rc; 1274 1275 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis); 1276 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis); 1277 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf), 1278 outbuf, sizeof(outbuf), &outlen); 1279 if (rc != 0) 1280 return rc; 1281 1282 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN) 1283 return -EIO; 1284 1285 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n", 1286 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE)); 1287 1288 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE); 1289 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT); 1290 return 0; 1291 } 1292 1293 /* Note that the failure path of this function does not free 1294 * resources, as this will be done by efx_ef10_remove(). 1295 */ 1296 static int efx_ef10_dimension_resources(struct efx_nic *efx) 1297 { 1298 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1299 unsigned int uc_mem_map_size, wc_mem_map_size; 1300 unsigned int min_vis = max(EFX_TXQ_TYPES, 1301 efx_separate_tx_channels ? 2 : 1); 1302 unsigned int channel_vis, pio_write_vi_base, max_vis; 1303 void __iomem *membase; 1304 int rc; 1305 1306 channel_vis = max(efx->n_channels, 1307 (efx->n_tx_channels + efx->n_extra_tx_channels) * 1308 EFX_TXQ_TYPES); 1309 1310 #ifdef EFX_USE_PIO 1311 /* Try to allocate PIO buffers if wanted and if the full 1312 * number of PIO buffers would be sufficient to allocate one 1313 * copy-buffer per TX channel. Failure is non-fatal, as there 1314 * are only a small number of PIO buffers shared between all 1315 * functions of the controller. 1316 */ 1317 if (efx_piobuf_size != 0 && 1318 nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >= 1319 efx->n_tx_channels) { 1320 unsigned int n_piobufs = 1321 DIV_ROUND_UP(efx->n_tx_channels, 1322 nic_data->piobuf_size / efx_piobuf_size); 1323 1324 rc = efx_ef10_alloc_piobufs(efx, n_piobufs); 1325 if (rc == -ENOSPC) 1326 netif_dbg(efx, probe, efx->net_dev, 1327 "out of PIO buffers; cannot allocate more\n"); 1328 else if (rc == -EPERM) 1329 netif_dbg(efx, probe, efx->net_dev, 1330 "not permitted to allocate PIO buffers\n"); 1331 else if (rc) 1332 netif_err(efx, probe, efx->net_dev, 1333 "failed to allocate PIO buffers (%d)\n", rc); 1334 else 1335 netif_dbg(efx, probe, efx->net_dev, 1336 "allocated %u PIO buffers\n", n_piobufs); 1337 } 1338 #else 1339 nic_data->n_piobufs = 0; 1340 #endif 1341 1342 /* PIO buffers should be mapped with write-combining enabled, 1343 * and we want to make single UC and WC mappings rather than 1344 * several of each (in fact that's the only option if host 1345 * page size is >4K). So we may allocate some extra VIs just 1346 * for writing PIO buffers through. 1347 * 1348 * The UC mapping contains (channel_vis - 1) complete VIs and the 1349 * first 4K of the next VI. Then the WC mapping begins with 1350 * the remainder of this last VI. 1351 */ 1352 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * efx->vi_stride + 1353 ER_DZ_TX_PIOBUF); 1354 if (nic_data->n_piobufs) { 1355 /* pio_write_vi_base rounds down to give the number of complete 1356 * VIs inside the UC mapping. 1357 */ 1358 pio_write_vi_base = uc_mem_map_size / efx->vi_stride; 1359 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base + 1360 nic_data->n_piobufs) * 1361 efx->vi_stride) - 1362 uc_mem_map_size); 1363 max_vis = pio_write_vi_base + nic_data->n_piobufs; 1364 } else { 1365 pio_write_vi_base = 0; 1366 wc_mem_map_size = 0; 1367 max_vis = channel_vis; 1368 } 1369 1370 /* In case the last attached driver failed to free VIs, do it now */ 1371 rc = efx_ef10_free_vis(efx); 1372 if (rc != 0) 1373 return rc; 1374 1375 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis); 1376 if (rc != 0) 1377 return rc; 1378 1379 if (nic_data->n_allocated_vis < channel_vis) { 1380 netif_info(efx, drv, efx->net_dev, 1381 "Could not allocate enough VIs to satisfy RSS" 1382 " requirements. Performance may not be optimal.\n"); 1383 /* We didn't get the VIs to populate our channels. 1384 * We could keep what we got but then we'd have more 1385 * interrupts than we need. 1386 * Instead calculate new max_channels and restart 1387 */ 1388 efx->max_channels = nic_data->n_allocated_vis; 1389 efx->max_tx_channels = 1390 nic_data->n_allocated_vis / EFX_TXQ_TYPES; 1391 1392 efx_ef10_free_vis(efx); 1393 return -EAGAIN; 1394 } 1395 1396 /* If we didn't get enough VIs to map all the PIO buffers, free the 1397 * PIO buffers 1398 */ 1399 if (nic_data->n_piobufs && 1400 nic_data->n_allocated_vis < 1401 pio_write_vi_base + nic_data->n_piobufs) { 1402 netif_dbg(efx, probe, efx->net_dev, 1403 "%u VIs are not sufficient to map %u PIO buffers\n", 1404 nic_data->n_allocated_vis, nic_data->n_piobufs); 1405 efx_ef10_free_piobufs(efx); 1406 } 1407 1408 /* Shrink the original UC mapping of the memory BAR */ 1409 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size); 1410 if (!membase) { 1411 netif_err(efx, probe, efx->net_dev, 1412 "could not shrink memory BAR to %x\n", 1413 uc_mem_map_size); 1414 return -ENOMEM; 1415 } 1416 iounmap(efx->membase); 1417 efx->membase = membase; 1418 1419 /* Set up the WC mapping if needed */ 1420 if (wc_mem_map_size) { 1421 nic_data->wc_membase = ioremap_wc(efx->membase_phys + 1422 uc_mem_map_size, 1423 wc_mem_map_size); 1424 if (!nic_data->wc_membase) { 1425 netif_err(efx, probe, efx->net_dev, 1426 "could not allocate WC mapping of size %x\n", 1427 wc_mem_map_size); 1428 return -ENOMEM; 1429 } 1430 nic_data->pio_write_vi_base = pio_write_vi_base; 1431 nic_data->pio_write_base = 1432 nic_data->wc_membase + 1433 (pio_write_vi_base * efx->vi_stride + ER_DZ_TX_PIOBUF - 1434 uc_mem_map_size); 1435 1436 rc = efx_ef10_link_piobufs(efx); 1437 if (rc) 1438 efx_ef10_free_piobufs(efx); 1439 } 1440 1441 netif_dbg(efx, probe, efx->net_dev, 1442 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n", 1443 &efx->membase_phys, efx->membase, uc_mem_map_size, 1444 nic_data->wc_membase, wc_mem_map_size); 1445 1446 return 0; 1447 } 1448 1449 static int efx_ef10_init_nic(struct efx_nic *efx) 1450 { 1451 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1452 int rc; 1453 1454 if (nic_data->must_check_datapath_caps) { 1455 rc = efx_ef10_init_datapath_caps(efx); 1456 if (rc) 1457 return rc; 1458 nic_data->must_check_datapath_caps = false; 1459 } 1460 1461 if (nic_data->must_realloc_vis) { 1462 /* We cannot let the number of VIs change now */ 1463 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis, 1464 nic_data->n_allocated_vis); 1465 if (rc) 1466 return rc; 1467 nic_data->must_realloc_vis = false; 1468 } 1469 1470 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { 1471 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); 1472 if (rc == 0) { 1473 rc = efx_ef10_link_piobufs(efx); 1474 if (rc) 1475 efx_ef10_free_piobufs(efx); 1476 } 1477 1478 /* Log an error on failure, but this is non-fatal. 1479 * Permission errors are less important - we've presumably 1480 * had the PIO buffer licence removed. 1481 */ 1482 if (rc == -EPERM) 1483 netif_dbg(efx, drv, efx->net_dev, 1484 "not permitted to restore PIO buffers\n"); 1485 else if (rc) 1486 netif_err(efx, drv, efx->net_dev, 1487 "failed to restore PIO buffers (%d)\n", rc); 1488 nic_data->must_restore_piobufs = false; 1489 } 1490 1491 /* don't fail init if RSS setup doesn't work */ 1492 rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL); 1493 efx->rss_active = (rc == 0); 1494 1495 return 0; 1496 } 1497 1498 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) 1499 { 1500 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1501 #ifdef CONFIG_SFC_SRIOV 1502 unsigned int i; 1503 #endif 1504 1505 /* All our allocations have been reset */ 1506 nic_data->must_realloc_vis = true; 1507 nic_data->must_restore_filters = true; 1508 nic_data->must_restore_piobufs = true; 1509 efx_ef10_forget_old_piobufs(efx); 1510 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 1511 1512 /* Driver-created vswitches and vports must be re-created */ 1513 nic_data->must_probe_vswitching = true; 1514 nic_data->vport_id = EVB_PORT_ID_ASSIGNED; 1515 #ifdef CONFIG_SFC_SRIOV 1516 if (nic_data->vf) 1517 for (i = 0; i < efx->vf_count; i++) 1518 nic_data->vf[i].vport_id = 0; 1519 #endif 1520 } 1521 1522 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason) 1523 { 1524 if (reason == RESET_TYPE_MC_FAILURE) 1525 return RESET_TYPE_DATAPATH; 1526 1527 return efx_mcdi_map_reset_reason(reason); 1528 } 1529 1530 static int efx_ef10_map_reset_flags(u32 *flags) 1531 { 1532 enum { 1533 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) << 1534 ETH_RESET_SHARED_SHIFT), 1535 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER | 1536 ETH_RESET_OFFLOAD | ETH_RESET_MAC | 1537 ETH_RESET_PHY | ETH_RESET_MGMT) << 1538 ETH_RESET_SHARED_SHIFT) 1539 }; 1540 1541 /* We assume for now that our PCI function is permitted to 1542 * reset everything. 1543 */ 1544 1545 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) { 1546 *flags &= ~EF10_RESET_MC; 1547 return RESET_TYPE_WORLD; 1548 } 1549 1550 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) { 1551 *flags &= ~EF10_RESET_PORT; 1552 return RESET_TYPE_ALL; 1553 } 1554 1555 /* no invisible reset implemented */ 1556 1557 return -EINVAL; 1558 } 1559 1560 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) 1561 { 1562 int rc = efx_mcdi_reset(efx, reset_type); 1563 1564 /* Unprivileged functions return -EPERM, but need to return success 1565 * here so that the datapath is brought back up. 1566 */ 1567 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM) 1568 rc = 0; 1569 1570 /* If it was a port reset, trigger reallocation of MC resources. 1571 * Note that on an MC reset nothing needs to be done now because we'll 1572 * detect the MC reset later and handle it then. 1573 * For an FLR, we never get an MC reset event, but the MC has reset all 1574 * resources assigned to us, so we have to trigger reallocation now. 1575 */ 1576 if ((reset_type == RESET_TYPE_ALL || 1577 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc) 1578 efx_ef10_reset_mc_allocations(efx); 1579 return rc; 1580 } 1581 1582 #define EF10_DMA_STAT(ext_name, mcdi_name) \ 1583 [EF10_STAT_ ## ext_name] = \ 1584 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1585 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \ 1586 [EF10_STAT_ ## int_name] = \ 1587 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } 1588 #define EF10_OTHER_STAT(ext_name) \ 1589 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1590 #define GENERIC_SW_STAT(ext_name) \ 1591 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } 1592 1593 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { 1594 EF10_DMA_STAT(port_tx_bytes, TX_BYTES), 1595 EF10_DMA_STAT(port_tx_packets, TX_PKTS), 1596 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), 1597 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS), 1598 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), 1599 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), 1600 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), 1601 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), 1602 EF10_DMA_STAT(port_tx_64, TX_64_PKTS), 1603 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), 1604 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), 1605 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), 1606 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), 1607 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), 1608 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), 1609 EF10_DMA_STAT(port_rx_bytes, RX_BYTES), 1610 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES), 1611 EF10_OTHER_STAT(port_rx_good_bytes), 1612 EF10_OTHER_STAT(port_rx_bad_bytes), 1613 EF10_DMA_STAT(port_rx_packets, RX_PKTS), 1614 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS), 1615 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), 1616 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), 1617 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS), 1618 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), 1619 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), 1620 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), 1621 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), 1622 EF10_DMA_STAT(port_rx_64, RX_64_PKTS), 1623 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), 1624 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), 1625 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), 1626 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), 1627 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), 1628 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), 1629 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), 1630 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), 1631 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), 1632 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), 1633 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), 1634 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), 1635 GENERIC_SW_STAT(rx_nodesc_trunc), 1636 GENERIC_SW_STAT(rx_noskb_drops), 1637 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), 1638 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), 1639 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), 1640 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL), 1641 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB), 1642 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB), 1643 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING), 1644 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS), 1645 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS), 1646 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS), 1647 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS), 1648 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS), 1649 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS), 1650 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES), 1651 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS), 1652 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES), 1653 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS), 1654 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES), 1655 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS), 1656 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES), 1657 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW), 1658 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS), 1659 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES), 1660 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS), 1661 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES), 1662 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS), 1663 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES), 1664 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS), 1665 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES), 1666 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW), 1667 EF10_DMA_STAT(fec_uncorrected_errors, FEC_UNCORRECTED_ERRORS), 1668 EF10_DMA_STAT(fec_corrected_errors, FEC_CORRECTED_ERRORS), 1669 EF10_DMA_STAT(fec_corrected_symbols_lane0, FEC_CORRECTED_SYMBOLS_LANE0), 1670 EF10_DMA_STAT(fec_corrected_symbols_lane1, FEC_CORRECTED_SYMBOLS_LANE1), 1671 EF10_DMA_STAT(fec_corrected_symbols_lane2, FEC_CORRECTED_SYMBOLS_LANE2), 1672 EF10_DMA_STAT(fec_corrected_symbols_lane3, FEC_CORRECTED_SYMBOLS_LANE3), 1673 EF10_DMA_STAT(ctpio_dmabuf_start, CTPIO_DMABUF_START), 1674 EF10_DMA_STAT(ctpio_vi_busy_fallback, CTPIO_VI_BUSY_FALLBACK), 1675 EF10_DMA_STAT(ctpio_long_write_success, CTPIO_LONG_WRITE_SUCCESS), 1676 EF10_DMA_STAT(ctpio_missing_dbell_fail, CTPIO_MISSING_DBELL_FAIL), 1677 EF10_DMA_STAT(ctpio_overflow_fail, CTPIO_OVERFLOW_FAIL), 1678 EF10_DMA_STAT(ctpio_underflow_fail, CTPIO_UNDERFLOW_FAIL), 1679 EF10_DMA_STAT(ctpio_timeout_fail, CTPIO_TIMEOUT_FAIL), 1680 EF10_DMA_STAT(ctpio_noncontig_wr_fail, CTPIO_NONCONTIG_WR_FAIL), 1681 EF10_DMA_STAT(ctpio_frm_clobber_fail, CTPIO_FRM_CLOBBER_FAIL), 1682 EF10_DMA_STAT(ctpio_invalid_wr_fail, CTPIO_INVALID_WR_FAIL), 1683 EF10_DMA_STAT(ctpio_vi_clobber_fallback, CTPIO_VI_CLOBBER_FALLBACK), 1684 EF10_DMA_STAT(ctpio_unqualified_fallback, CTPIO_UNQUALIFIED_FALLBACK), 1685 EF10_DMA_STAT(ctpio_runt_fallback, CTPIO_RUNT_FALLBACK), 1686 EF10_DMA_STAT(ctpio_success, CTPIO_SUCCESS), 1687 EF10_DMA_STAT(ctpio_fallback, CTPIO_FALLBACK), 1688 EF10_DMA_STAT(ctpio_poison, CTPIO_POISON), 1689 EF10_DMA_STAT(ctpio_erase, CTPIO_ERASE), 1690 }; 1691 1692 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \ 1693 (1ULL << EF10_STAT_port_tx_packets) | \ 1694 (1ULL << EF10_STAT_port_tx_pause) | \ 1695 (1ULL << EF10_STAT_port_tx_unicast) | \ 1696 (1ULL << EF10_STAT_port_tx_multicast) | \ 1697 (1ULL << EF10_STAT_port_tx_broadcast) | \ 1698 (1ULL << EF10_STAT_port_rx_bytes) | \ 1699 (1ULL << \ 1700 EF10_STAT_port_rx_bytes_minus_good_bytes) | \ 1701 (1ULL << EF10_STAT_port_rx_good_bytes) | \ 1702 (1ULL << EF10_STAT_port_rx_bad_bytes) | \ 1703 (1ULL << EF10_STAT_port_rx_packets) | \ 1704 (1ULL << EF10_STAT_port_rx_good) | \ 1705 (1ULL << EF10_STAT_port_rx_bad) | \ 1706 (1ULL << EF10_STAT_port_rx_pause) | \ 1707 (1ULL << EF10_STAT_port_rx_control) | \ 1708 (1ULL << EF10_STAT_port_rx_unicast) | \ 1709 (1ULL << EF10_STAT_port_rx_multicast) | \ 1710 (1ULL << EF10_STAT_port_rx_broadcast) | \ 1711 (1ULL << EF10_STAT_port_rx_lt64) | \ 1712 (1ULL << EF10_STAT_port_rx_64) | \ 1713 (1ULL << EF10_STAT_port_rx_65_to_127) | \ 1714 (1ULL << EF10_STAT_port_rx_128_to_255) | \ 1715 (1ULL << EF10_STAT_port_rx_256_to_511) | \ 1716 (1ULL << EF10_STAT_port_rx_512_to_1023) |\ 1717 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\ 1718 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\ 1719 (1ULL << EF10_STAT_port_rx_gtjumbo) | \ 1720 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\ 1721 (1ULL << EF10_STAT_port_rx_overflow) | \ 1722 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\ 1723 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \ 1724 (1ULL << GENERIC_STAT_rx_noskb_drops)) 1725 1726 /* On 7000 series NICs, these statistics are only provided by the 10G MAC. 1727 * For a 10G/40G switchable port we do not expose these because they might 1728 * not include all the packets they should. 1729 * On 8000 series NICs these statistics are always provided. 1730 */ 1731 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \ 1732 (1ULL << EF10_STAT_port_tx_lt64) | \ 1733 (1ULL << EF10_STAT_port_tx_64) | \ 1734 (1ULL << EF10_STAT_port_tx_65_to_127) |\ 1735 (1ULL << EF10_STAT_port_tx_128_to_255) |\ 1736 (1ULL << EF10_STAT_port_tx_256_to_511) |\ 1737 (1ULL << EF10_STAT_port_tx_512_to_1023) |\ 1738 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\ 1739 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo)) 1740 1741 /* These statistics are only provided by the 40G MAC. For a 10G/40G 1742 * switchable port we do expose these because the errors will otherwise 1743 * be silent. 1744 */ 1745 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\ 1746 (1ULL << EF10_STAT_port_rx_length_error)) 1747 1748 /* These statistics are only provided if the firmware supports the 1749 * capability PM_AND_RXDP_COUNTERS. 1750 */ 1751 #define HUNT_PM_AND_RXDP_STAT_MASK ( \ 1752 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \ 1753 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \ 1754 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \ 1755 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \ 1756 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \ 1757 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \ 1758 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \ 1759 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \ 1760 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \ 1761 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \ 1762 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \ 1763 (1ULL << EF10_STAT_port_rx_dp_hlb_wait)) 1764 1765 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V2, 1766 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V2 in 1767 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1768 * These bits are in the second u64 of the raw mask. 1769 */ 1770 #define EF10_FEC_STAT_MASK ( \ 1771 (1ULL << (EF10_STAT_fec_uncorrected_errors - 64)) | \ 1772 (1ULL << (EF10_STAT_fec_corrected_errors - 64)) | \ 1773 (1ULL << (EF10_STAT_fec_corrected_symbols_lane0 - 64)) | \ 1774 (1ULL << (EF10_STAT_fec_corrected_symbols_lane1 - 64)) | \ 1775 (1ULL << (EF10_STAT_fec_corrected_symbols_lane2 - 64)) | \ 1776 (1ULL << (EF10_STAT_fec_corrected_symbols_lane3 - 64))) 1777 1778 /* These statistics are only provided if the NIC supports MC_CMD_MAC_STATS_V3, 1779 * indicated by returning a value >= MC_CMD_MAC_NSTATS_V3 in 1780 * MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS. 1781 * These bits are in the second u64 of the raw mask. 1782 */ 1783 #define EF10_CTPIO_STAT_MASK ( \ 1784 (1ULL << (EF10_STAT_ctpio_dmabuf_start - 64)) | \ 1785 (1ULL << (EF10_STAT_ctpio_vi_busy_fallback - 64)) | \ 1786 (1ULL << (EF10_STAT_ctpio_long_write_success - 64)) | \ 1787 (1ULL << (EF10_STAT_ctpio_missing_dbell_fail - 64)) | \ 1788 (1ULL << (EF10_STAT_ctpio_overflow_fail - 64)) | \ 1789 (1ULL << (EF10_STAT_ctpio_underflow_fail - 64)) | \ 1790 (1ULL << (EF10_STAT_ctpio_timeout_fail - 64)) | \ 1791 (1ULL << (EF10_STAT_ctpio_noncontig_wr_fail - 64)) | \ 1792 (1ULL << (EF10_STAT_ctpio_frm_clobber_fail - 64)) | \ 1793 (1ULL << (EF10_STAT_ctpio_invalid_wr_fail - 64)) | \ 1794 (1ULL << (EF10_STAT_ctpio_vi_clobber_fallback - 64)) | \ 1795 (1ULL << (EF10_STAT_ctpio_unqualified_fallback - 64)) | \ 1796 (1ULL << (EF10_STAT_ctpio_runt_fallback - 64)) | \ 1797 (1ULL << (EF10_STAT_ctpio_success - 64)) | \ 1798 (1ULL << (EF10_STAT_ctpio_fallback - 64)) | \ 1799 (1ULL << (EF10_STAT_ctpio_poison - 64)) | \ 1800 (1ULL << (EF10_STAT_ctpio_erase - 64))) 1801 1802 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx) 1803 { 1804 u64 raw_mask = HUNT_COMMON_STAT_MASK; 1805 u32 port_caps = efx_mcdi_phy_get_caps(efx); 1806 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1807 1808 if (!(efx->mcdi->fn_flags & 1809 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL)) 1810 return 0; 1811 1812 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) { 1813 raw_mask |= HUNT_40G_EXTRA_STAT_MASK; 1814 /* 8000 series have everything even at 40G */ 1815 if (nic_data->datapath_caps2 & 1816 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN)) 1817 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1818 } else { 1819 raw_mask |= HUNT_10G_ONLY_STAT_MASK; 1820 } 1821 1822 if (nic_data->datapath_caps & 1823 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN)) 1824 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK; 1825 1826 return raw_mask; 1827 } 1828 1829 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) 1830 { 1831 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1832 u64 raw_mask[2]; 1833 1834 raw_mask[0] = efx_ef10_raw_stat_mask(efx); 1835 1836 /* Only show vadaptor stats when EVB capability is present */ 1837 if (nic_data->datapath_caps & 1838 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) { 1839 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1); 1840 raw_mask[1] = (1ULL << (EF10_STAT_V1_COUNT - 64)) - 1; 1841 } else { 1842 raw_mask[1] = 0; 1843 } 1844 /* Only show FEC stats when NIC supports MC_CMD_MAC_STATS_V2 */ 1845 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V2) 1846 raw_mask[1] |= EF10_FEC_STAT_MASK; 1847 1848 /* CTPIO stats appear in V3. Only show them on devices that actually 1849 * support CTPIO. Although this driver doesn't use CTPIO others might, 1850 * and we may be reporting the stats for the underlying port. 1851 */ 1852 if (efx->num_mac_stats >= MC_CMD_MAC_NSTATS_V3 && 1853 (nic_data->datapath_caps2 & 1854 (1 << MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN))) 1855 raw_mask[1] |= EF10_CTPIO_STAT_MASK; 1856 1857 #if BITS_PER_LONG == 64 1858 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); 1859 mask[0] = raw_mask[0]; 1860 mask[1] = raw_mask[1]; 1861 #else 1862 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); 1863 mask[0] = raw_mask[0] & 0xffffffff; 1864 mask[1] = raw_mask[0] >> 32; 1865 mask[2] = raw_mask[1] & 0xffffffff; 1866 #endif 1867 } 1868 1869 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names) 1870 { 1871 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1872 1873 efx_ef10_get_stat_mask(efx, mask); 1874 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, 1875 mask, names); 1876 } 1877 1878 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, 1879 struct rtnl_link_stats64 *core_stats) 1880 { 1881 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1882 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1883 u64 *stats = nic_data->stats; 1884 size_t stats_count = 0, index; 1885 1886 efx_ef10_get_stat_mask(efx, mask); 1887 1888 if (full_stats) { 1889 for_each_set_bit(index, mask, EF10_STAT_COUNT) { 1890 if (efx_ef10_stat_desc[index].name) { 1891 *full_stats++ = stats[index]; 1892 ++stats_count; 1893 } 1894 } 1895 } 1896 1897 if (!core_stats) 1898 return stats_count; 1899 1900 if (nic_data->datapath_caps & 1901 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) { 1902 /* Use vadaptor stats. */ 1903 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] + 1904 stats[EF10_STAT_rx_multicast] + 1905 stats[EF10_STAT_rx_broadcast]; 1906 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] + 1907 stats[EF10_STAT_tx_multicast] + 1908 stats[EF10_STAT_tx_broadcast]; 1909 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] + 1910 stats[EF10_STAT_rx_multicast_bytes] + 1911 stats[EF10_STAT_rx_broadcast_bytes]; 1912 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] + 1913 stats[EF10_STAT_tx_multicast_bytes] + 1914 stats[EF10_STAT_tx_broadcast_bytes]; 1915 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] + 1916 stats[GENERIC_STAT_rx_noskb_drops]; 1917 core_stats->multicast = stats[EF10_STAT_rx_multicast]; 1918 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad]; 1919 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow]; 1920 core_stats->rx_errors = core_stats->rx_crc_errors; 1921 core_stats->tx_errors = stats[EF10_STAT_tx_bad]; 1922 } else { 1923 /* Use port stats. */ 1924 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets]; 1925 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets]; 1926 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes]; 1927 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes]; 1928 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] + 1929 stats[GENERIC_STAT_rx_nodesc_trunc] + 1930 stats[GENERIC_STAT_rx_noskb_drops]; 1931 core_stats->multicast = stats[EF10_STAT_port_rx_multicast]; 1932 core_stats->rx_length_errors = 1933 stats[EF10_STAT_port_rx_gtjumbo] + 1934 stats[EF10_STAT_port_rx_length_error]; 1935 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad]; 1936 core_stats->rx_frame_errors = 1937 stats[EF10_STAT_port_rx_align_error]; 1938 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow]; 1939 core_stats->rx_errors = (core_stats->rx_length_errors + 1940 core_stats->rx_crc_errors + 1941 core_stats->rx_frame_errors); 1942 } 1943 1944 return stats_count; 1945 } 1946 1947 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) 1948 { 1949 struct efx_ef10_nic_data *nic_data = efx->nic_data; 1950 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 1951 __le64 generation_start, generation_end; 1952 u64 *stats = nic_data->stats; 1953 __le64 *dma_stats; 1954 1955 efx_ef10_get_stat_mask(efx, mask); 1956 1957 dma_stats = efx->stats_buffer.addr; 1958 1959 generation_end = dma_stats[efx->num_mac_stats - 1]; 1960 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) 1961 return 0; 1962 rmb(); 1963 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 1964 stats, efx->stats_buffer.addr, false); 1965 rmb(); 1966 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 1967 if (generation_end != generation_start) 1968 return -EAGAIN; 1969 1970 /* Update derived statistics */ 1971 efx_nic_fix_nodesc_drop_stat(efx, 1972 &stats[EF10_STAT_port_rx_nodesc_drops]); 1973 stats[EF10_STAT_port_rx_good_bytes] = 1974 stats[EF10_STAT_port_rx_bytes] - 1975 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; 1976 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], 1977 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); 1978 efx_update_sw_stats(efx, stats); 1979 return 0; 1980 } 1981 1982 1983 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, 1984 struct rtnl_link_stats64 *core_stats) 1985 { 1986 int retry; 1987 1988 /* If we're unlucky enough to read statistics during the DMA, wait 1989 * up to 10ms for it to finish (typically takes <500us) 1990 */ 1991 for (retry = 0; retry < 100; ++retry) { 1992 if (efx_ef10_try_update_nic_stats_pf(efx) == 0) 1993 break; 1994 udelay(100); 1995 } 1996 1997 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 1998 } 1999 2000 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) 2001 { 2002 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); 2003 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2004 DECLARE_BITMAP(mask, EF10_STAT_COUNT); 2005 __le64 generation_start, generation_end; 2006 u64 *stats = nic_data->stats; 2007 u32 dma_len = efx->num_mac_stats * sizeof(u64); 2008 struct efx_buffer stats_buf; 2009 __le64 *dma_stats; 2010 int rc; 2011 2012 spin_unlock_bh(&efx->stats_lock); 2013 2014 if (in_interrupt()) { 2015 /* If in atomic context, cannot update stats. Just update the 2016 * software stats and return so the caller can continue. 2017 */ 2018 spin_lock_bh(&efx->stats_lock); 2019 efx_update_sw_stats(efx, stats); 2020 return 0; 2021 } 2022 2023 efx_ef10_get_stat_mask(efx, mask); 2024 2025 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC); 2026 if (rc) { 2027 spin_lock_bh(&efx->stats_lock); 2028 return rc; 2029 } 2030 2031 dma_stats = stats_buf.addr; 2032 dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; 2033 2034 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr); 2035 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD, 2036 MAC_STATS_IN_DMA, 1); 2037 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 2038 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED); 2039 2040 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 2041 NULL, 0, NULL); 2042 spin_lock_bh(&efx->stats_lock); 2043 if (rc) { 2044 /* Expect ENOENT if DMA queues have not been set up */ 2045 if (rc != -ENOENT || atomic_read(&efx->active_queues)) 2046 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, 2047 sizeof(inbuf), NULL, 0, rc); 2048 goto out; 2049 } 2050 2051 generation_end = dma_stats[efx->num_mac_stats - 1]; 2052 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) { 2053 WARN_ON_ONCE(1); 2054 goto out; 2055 } 2056 rmb(); 2057 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, 2058 stats, stats_buf.addr, false); 2059 rmb(); 2060 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 2061 if (generation_end != generation_start) { 2062 rc = -EAGAIN; 2063 goto out; 2064 } 2065 2066 efx_update_sw_stats(efx, stats); 2067 out: 2068 efx_nic_free_buffer(efx, &stats_buf); 2069 return rc; 2070 } 2071 2072 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats, 2073 struct rtnl_link_stats64 *core_stats) 2074 { 2075 if (efx_ef10_try_update_nic_stats_vf(efx)) 2076 return 0; 2077 2078 return efx_ef10_update_stats_common(efx, full_stats, core_stats); 2079 } 2080 2081 static void efx_ef10_push_irq_moderation(struct efx_channel *channel) 2082 { 2083 struct efx_nic *efx = channel->efx; 2084 unsigned int mode, usecs; 2085 efx_dword_t timer_cmd; 2086 2087 if (channel->irq_moderation_us) { 2088 mode = 3; 2089 usecs = channel->irq_moderation_us; 2090 } else { 2091 mode = 0; 2092 usecs = 0; 2093 } 2094 2095 if (EFX_EF10_WORKAROUND_61265(efx)) { 2096 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_EVQ_TMR_IN_LEN); 2097 unsigned int ns = usecs * 1000; 2098 2099 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_INSTANCE, 2100 channel->channel); 2101 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, ns); 2102 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, ns); 2103 MCDI_SET_DWORD(inbuf, SET_EVQ_TMR_IN_TMR_MODE, mode); 2104 2105 efx_mcdi_rpc_async(efx, MC_CMD_SET_EVQ_TMR, 2106 inbuf, sizeof(inbuf), 0, NULL, 0); 2107 } else if (EFX_EF10_WORKAROUND_35388(efx)) { 2108 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 2109 2110 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS, 2111 EFE_DD_EVQ_IND_TIMER_FLAGS, 2112 ERF_DD_EVQ_IND_TIMER_MODE, mode, 2113 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 2114 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT, 2115 channel->channel); 2116 } else { 2117 unsigned int ticks = efx_usecs_to_ticks(efx, usecs); 2118 2119 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode, 2120 ERF_DZ_TC_TIMER_VAL, ticks, 2121 ERF_FZ_TC_TMR_REL_VAL, ticks); 2122 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR, 2123 channel->channel); 2124 } 2125 } 2126 2127 static void efx_ef10_get_wol_vf(struct efx_nic *efx, 2128 struct ethtool_wolinfo *wol) {} 2129 2130 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type) 2131 { 2132 return -EOPNOTSUPP; 2133 } 2134 2135 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) 2136 { 2137 wol->supported = 0; 2138 wol->wolopts = 0; 2139 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2140 } 2141 2142 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type) 2143 { 2144 if (type != 0) 2145 return -EINVAL; 2146 return 0; 2147 } 2148 2149 static void efx_ef10_mcdi_request(struct efx_nic *efx, 2150 const efx_dword_t *hdr, size_t hdr_len, 2151 const efx_dword_t *sdu, size_t sdu_len) 2152 { 2153 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2154 u8 *pdu = nic_data->mcdi_buf.addr; 2155 2156 memcpy(pdu, hdr, hdr_len); 2157 memcpy(pdu + hdr_len, sdu, sdu_len); 2158 wmb(); 2159 2160 /* The hardware provides 'low' and 'high' (doorbell) registers 2161 * for passing the 64-bit address of an MCDI request to 2162 * firmware. However the dwords are swapped by firmware. The 2163 * least significant bits of the doorbell are then 0 for all 2164 * MCDI requests due to alignment. 2165 */ 2166 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32), 2167 ER_DZ_MC_DB_LWRD); 2168 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr), 2169 ER_DZ_MC_DB_HWRD); 2170 } 2171 2172 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx) 2173 { 2174 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2175 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr; 2176 2177 rmb(); 2178 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); 2179 } 2180 2181 static void 2182 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf, 2183 size_t offset, size_t outlen) 2184 { 2185 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2186 const u8 *pdu = nic_data->mcdi_buf.addr; 2187 2188 memcpy(outbuf, pdu + offset, outlen); 2189 } 2190 2191 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx) 2192 { 2193 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2194 2195 /* All our allocations have been reset */ 2196 efx_ef10_reset_mc_allocations(efx); 2197 2198 /* The datapath firmware might have been changed */ 2199 nic_data->must_check_datapath_caps = true; 2200 2201 /* MAC statistics have been cleared on the NIC; clear the local 2202 * statistic that we update with efx_update_diff_stat(). 2203 */ 2204 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0; 2205 } 2206 2207 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx) 2208 { 2209 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2210 int rc; 2211 2212 rc = efx_ef10_get_warm_boot_count(efx); 2213 if (rc < 0) { 2214 /* The firmware is presumably in the process of 2215 * rebooting. However, we are supposed to report each 2216 * reboot just once, so we must only do that once we 2217 * can read and store the updated warm boot count. 2218 */ 2219 return 0; 2220 } 2221 2222 if (rc == nic_data->warm_boot_count) 2223 return 0; 2224 2225 nic_data->warm_boot_count = rc; 2226 efx_ef10_mcdi_reboot_detected(efx); 2227 2228 return -EIO; 2229 } 2230 2231 /* Handle an MSI interrupt 2232 * 2233 * Handle an MSI hardware interrupt. This routine schedules event 2234 * queue processing. No interrupt acknowledgement cycle is necessary. 2235 * Also, we never need to check that the interrupt is for us, since 2236 * MSI interrupts cannot be shared. 2237 */ 2238 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) 2239 { 2240 struct efx_msi_context *context = dev_id; 2241 struct efx_nic *efx = context->efx; 2242 2243 netif_vdbg(efx, intr, efx->net_dev, 2244 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); 2245 2246 if (likely(READ_ONCE(efx->irq_soft_enabled))) { 2247 /* Note test interrupts */ 2248 if (context->index == efx->irq_level) 2249 efx->last_irq_cpu = raw_smp_processor_id(); 2250 2251 /* Schedule processing of the channel */ 2252 efx_schedule_channel_irq(efx->channel[context->index]); 2253 } 2254 2255 return IRQ_HANDLED; 2256 } 2257 2258 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) 2259 { 2260 struct efx_nic *efx = dev_id; 2261 bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); 2262 struct efx_channel *channel; 2263 efx_dword_t reg; 2264 u32 queues; 2265 2266 /* Read the ISR which also ACKs the interrupts */ 2267 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR); 2268 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); 2269 2270 if (queues == 0) 2271 return IRQ_NONE; 2272 2273 if (likely(soft_enabled)) { 2274 /* Note test interrupts */ 2275 if (queues & (1U << efx->irq_level)) 2276 efx->last_irq_cpu = raw_smp_processor_id(); 2277 2278 efx_for_each_channel(channel, efx) { 2279 if (queues & 1) 2280 efx_schedule_channel_irq(channel); 2281 queues >>= 1; 2282 } 2283 } 2284 2285 netif_vdbg(efx, intr, efx->net_dev, 2286 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 2287 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 2288 2289 return IRQ_HANDLED; 2290 } 2291 2292 static int efx_ef10_irq_test_generate(struct efx_nic *efx) 2293 { 2294 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); 2295 2296 if (efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG41750, true, 2297 NULL) == 0) 2298 return -ENOTSUPP; 2299 2300 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); 2301 2302 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); 2303 return efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT, 2304 inbuf, sizeof(inbuf), NULL, 0, NULL); 2305 } 2306 2307 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue) 2308 { 2309 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, 2310 (tx_queue->ptr_mask + 1) * 2311 sizeof(efx_qword_t), 2312 GFP_KERNEL); 2313 } 2314 2315 /* This writes to the TX_DESC_WPTR and also pushes data */ 2316 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue, 2317 const efx_qword_t *txd) 2318 { 2319 unsigned int write_ptr; 2320 efx_oword_t reg; 2321 2322 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2323 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr); 2324 reg.qword[0] = *txd; 2325 efx_writeo_page(tx_queue->efx, ®, 2326 ER_DZ_TX_DESC_UPD, tx_queue->queue); 2327 } 2328 2329 /* Add Firmware-Assisted TSO v2 option descriptors to a queue. 2330 */ 2331 static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, 2332 struct sk_buff *skb, 2333 bool *data_mapped) 2334 { 2335 struct efx_tx_buffer *buffer; 2336 struct tcphdr *tcp; 2337 struct iphdr *ip; 2338 2339 u16 ipv4_id; 2340 u32 seqnum; 2341 u32 mss; 2342 2343 EFX_WARN_ON_ONCE_PARANOID(tx_queue->tso_version != 2); 2344 2345 mss = skb_shinfo(skb)->gso_size; 2346 2347 if (unlikely(mss < 4)) { 2348 WARN_ONCE(1, "MSS of %u is too small for TSO v2\n", mss); 2349 return -EINVAL; 2350 } 2351 2352 ip = ip_hdr(skb); 2353 if (ip->version == 4) { 2354 /* Modify IPv4 header if needed. */ 2355 ip->tot_len = 0; 2356 ip->check = 0; 2357 ipv4_id = ntohs(ip->id); 2358 } else { 2359 /* Modify IPv6 header if needed. */ 2360 struct ipv6hdr *ipv6 = ipv6_hdr(skb); 2361 2362 ipv6->payload_len = 0; 2363 ipv4_id = 0; 2364 } 2365 2366 tcp = tcp_hdr(skb); 2367 seqnum = ntohl(tcp->seq); 2368 2369 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2370 2371 buffer->flags = EFX_TX_BUF_OPTION; 2372 buffer->len = 0; 2373 buffer->unmap_len = 0; 2374 EFX_POPULATE_QWORD_5(buffer->option, 2375 ESF_DZ_TX_DESC_IS_OPT, 1, 2376 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2377 ESF_DZ_TX_TSO_OPTION_TYPE, 2378 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A, 2379 ESF_DZ_TX_TSO_IP_ID, ipv4_id, 2380 ESF_DZ_TX_TSO_TCP_SEQNO, seqnum 2381 ); 2382 ++tx_queue->insert_count; 2383 2384 buffer = efx_tx_queue_get_insert_buffer(tx_queue); 2385 2386 buffer->flags = EFX_TX_BUF_OPTION; 2387 buffer->len = 0; 2388 buffer->unmap_len = 0; 2389 EFX_POPULATE_QWORD_4(buffer->option, 2390 ESF_DZ_TX_DESC_IS_OPT, 1, 2391 ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO, 2392 ESF_DZ_TX_TSO_OPTION_TYPE, 2393 ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B, 2394 ESF_DZ_TX_TSO_TCP_MSS, mss 2395 ); 2396 ++tx_queue->insert_count; 2397 2398 return 0; 2399 } 2400 2401 static u32 efx_ef10_tso_versions(struct efx_nic *efx) 2402 { 2403 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2404 u32 tso_versions = 0; 2405 2406 if (nic_data->datapath_caps & 2407 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) 2408 tso_versions |= BIT(1); 2409 if (nic_data->datapath_caps2 & 2410 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) 2411 tso_versions |= BIT(2); 2412 return tso_versions; 2413 } 2414 2415 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) 2416 { 2417 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 2418 EFX_BUF_SIZE)); 2419 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 2420 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; 2421 struct efx_channel *channel = tx_queue->channel; 2422 struct efx_nic *efx = tx_queue->efx; 2423 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2424 bool tso_v2 = false; 2425 size_t inlen; 2426 dma_addr_t dma_addr; 2427 efx_qword_t *txd; 2428 int rc; 2429 int i; 2430 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0); 2431 2432 /* Only attempt to enable TX timestamping if we have the license for it, 2433 * otherwise TXQ init will fail 2434 */ 2435 if (!(nic_data->licensed_features & 2436 (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN))) { 2437 tx_queue->timestamping = false; 2438 /* Disable sync events on this channel. */ 2439 if (efx->type->ptp_set_ts_sync_events) 2440 efx->type->ptp_set_ts_sync_events(efx, false, false); 2441 } 2442 2443 /* TSOv2 is a limited resource that can only be configured on a limited 2444 * number of queues. TSO without checksum offload is not really a thing, 2445 * so we only enable it for those queues. 2446 * TSOv2 cannot be used with Hardware timestamping. 2447 */ 2448 if (csum_offload && (nic_data->datapath_caps2 & 2449 (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) && 2450 !tx_queue->timestamping) { 2451 tso_v2 = true; 2452 netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n", 2453 channel->channel); 2454 } 2455 2456 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); 2457 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); 2458 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); 2459 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); 2460 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); 2461 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id); 2462 2463 dma_addr = tx_queue->txd.buf.dma_addr; 2464 2465 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n", 2466 tx_queue->queue, entries, (u64)dma_addr); 2467 2468 for (i = 0; i < entries; ++i) { 2469 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr); 2470 dma_addr += EFX_BUF_SIZE; 2471 } 2472 2473 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries); 2474 2475 do { 2476 MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS, 2477 /* This flag was removed from mcdi_pcol.h for 2478 * the non-_EXT version of INIT_TXQ. However, 2479 * firmware still honours it. 2480 */ 2481 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2, 2482 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload, 2483 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload, 2484 INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, 2485 tx_queue->timestamping); 2486 2487 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen, 2488 NULL, 0, NULL); 2489 if (rc == -ENOSPC && tso_v2) { 2490 /* Retry without TSOv2 if we're short on contexts. */ 2491 tso_v2 = false; 2492 netif_warn(efx, probe, efx->net_dev, 2493 "TSOv2 context not available to segment in hardware. TCP performance may be reduced.\n"); 2494 } else if (rc) { 2495 efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ, 2496 MC_CMD_INIT_TXQ_EXT_IN_LEN, 2497 NULL, 0, rc); 2498 goto fail; 2499 } 2500 } while (rc); 2501 2502 /* A previous user of this TX queue might have set us up the 2503 * bomb by writing a descriptor to the TX push collector but 2504 * not the doorbell. (Each collector belongs to a port, not a 2505 * queue or function, so cannot easily be reset.) We must 2506 * attempt to push a no-op descriptor in its place. 2507 */ 2508 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION; 2509 tx_queue->insert_count = 1; 2510 txd = efx_tx_desc(tx_queue, 0); 2511 EFX_POPULATE_QWORD_5(*txd, 2512 ESF_DZ_TX_DESC_IS_OPT, true, 2513 ESF_DZ_TX_OPTION_TYPE, 2514 ESE_DZ_TX_OPTION_DESC_CRC_CSUM, 2515 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload, 2516 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload, 2517 ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping); 2518 tx_queue->write_count = 1; 2519 2520 if (tso_v2) { 2521 tx_queue->handle_tso = efx_ef10_tx_tso_desc; 2522 tx_queue->tso_version = 2; 2523 } else if (nic_data->datapath_caps & 2524 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) { 2525 tx_queue->tso_version = 1; 2526 } 2527 2528 wmb(); 2529 efx_ef10_push_tx_desc(tx_queue, txd); 2530 2531 return; 2532 2533 fail: 2534 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n", 2535 tx_queue->queue); 2536 } 2537 2538 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue) 2539 { 2540 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN); 2541 MCDI_DECLARE_BUF_ERR(outbuf); 2542 struct efx_nic *efx = tx_queue->efx; 2543 size_t outlen; 2544 int rc; 2545 2546 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE, 2547 tx_queue->queue); 2548 2549 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf), 2550 outbuf, sizeof(outbuf), &outlen); 2551 2552 if (rc && rc != -EALREADY) 2553 goto fail; 2554 2555 return; 2556 2557 fail: 2558 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN, 2559 outbuf, outlen, rc); 2560 } 2561 2562 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue) 2563 { 2564 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf); 2565 } 2566 2567 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 2568 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue) 2569 { 2570 unsigned int write_ptr; 2571 efx_dword_t reg; 2572 2573 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2574 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr); 2575 efx_writed_page(tx_queue->efx, ®, 2576 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue); 2577 } 2578 2579 #define EFX_EF10_MAX_TX_DESCRIPTOR_LEN 0x3fff 2580 2581 static unsigned int efx_ef10_tx_limit_len(struct efx_tx_queue *tx_queue, 2582 dma_addr_t dma_addr, unsigned int len) 2583 { 2584 if (len > EFX_EF10_MAX_TX_DESCRIPTOR_LEN) { 2585 /* If we need to break across multiple descriptors we should 2586 * stop at a page boundary. This assumes the length limit is 2587 * greater than the page size. 2588 */ 2589 dma_addr_t end = dma_addr + EFX_EF10_MAX_TX_DESCRIPTOR_LEN; 2590 2591 BUILD_BUG_ON(EFX_EF10_MAX_TX_DESCRIPTOR_LEN < EFX_PAGE_SIZE); 2592 len = (end & (~(EFX_PAGE_SIZE - 1))) - dma_addr; 2593 } 2594 2595 return len; 2596 } 2597 2598 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue) 2599 { 2600 unsigned int old_write_count = tx_queue->write_count; 2601 struct efx_tx_buffer *buffer; 2602 unsigned int write_ptr; 2603 efx_qword_t *txd; 2604 2605 tx_queue->xmit_more_available = false; 2606 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 2607 return; 2608 2609 do { 2610 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 2611 buffer = &tx_queue->buffer[write_ptr]; 2612 txd = efx_tx_desc(tx_queue, write_ptr); 2613 ++tx_queue->write_count; 2614 2615 /* Create TX descriptor ring entry */ 2616 if (buffer->flags & EFX_TX_BUF_OPTION) { 2617 *txd = buffer->option; 2618 if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1) 2619 /* PIO descriptor */ 2620 tx_queue->packet_write_count = tx_queue->write_count; 2621 } else { 2622 tx_queue->packet_write_count = tx_queue->write_count; 2623 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 2624 EFX_POPULATE_QWORD_3( 2625 *txd, 2626 ESF_DZ_TX_KER_CONT, 2627 buffer->flags & EFX_TX_BUF_CONT, 2628 ESF_DZ_TX_KER_BYTE_CNT, buffer->len, 2629 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr); 2630 } 2631 } while (tx_queue->write_count != tx_queue->insert_count); 2632 2633 wmb(); /* Ensure descriptors are written before they are fetched */ 2634 2635 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 2636 txd = efx_tx_desc(tx_queue, 2637 old_write_count & tx_queue->ptr_mask); 2638 efx_ef10_push_tx_desc(tx_queue, txd); 2639 ++tx_queue->pushes; 2640 } else { 2641 efx_ef10_notify_tx_desc(tx_queue); 2642 } 2643 } 2644 2645 #define RSS_MODE_HASH_ADDRS (1 << RSS_MODE_HASH_SRC_ADDR_LBN |\ 2646 1 << RSS_MODE_HASH_DST_ADDR_LBN) 2647 #define RSS_MODE_HASH_PORTS (1 << RSS_MODE_HASH_SRC_PORT_LBN |\ 2648 1 << RSS_MODE_HASH_DST_PORT_LBN) 2649 #define RSS_CONTEXT_FLAGS_DEFAULT (1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN |\ 2650 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN |\ 2651 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN |\ 2652 1 << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN |\ 2653 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN |\ 2654 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN |\ 2655 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN |\ 2656 (RSS_MODE_HASH_ADDRS | RSS_MODE_HASH_PORTS) << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN |\ 2657 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ 2658 RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) 2659 2660 static int efx_ef10_get_rss_flags(struct efx_nic *efx, u32 context, u32 *flags) 2661 { 2662 /* Firmware had a bug (sfc bug 61952) where it would not actually 2663 * fill in the flags field in the response to MC_CMD_RSS_CONTEXT_GET_FLAGS. 2664 * This meant that it would always contain whatever was previously 2665 * in the MCDI buffer. Fortunately, all firmware versions with 2666 * this bug have the same default flags value for a newly-allocated 2667 * RSS context, and the only time we want to get the flags is just 2668 * after allocating. Moreover, the response has a 32-bit hole 2669 * where the context ID would be in the request, so we can use an 2670 * overlength buffer in the request and pre-fill the flags field 2671 * with what we believe the default to be. Thus if the firmware 2672 * has the bug, it will leave our pre-filled value in the flags 2673 * field of the response, and we will get the right answer. 2674 * 2675 * However, this does mean that this function should NOT be used if 2676 * the RSS context flags might not be their defaults - it is ONLY 2677 * reliably correct for a newly-allocated RSS context. 2678 */ 2679 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 2680 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN); 2681 size_t outlen; 2682 int rc; 2683 2684 /* Check we have a hole for the context ID */ 2685 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN != MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST); 2686 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID, context); 2687 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS, 2688 RSS_CONTEXT_FLAGS_DEFAULT); 2689 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_FLAGS, inbuf, 2690 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen); 2691 if (rc == 0) { 2692 if (outlen < MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN) 2693 rc = -EIO; 2694 else 2695 *flags = MCDI_DWORD(outbuf, RSS_CONTEXT_GET_FLAGS_OUT_FLAGS); 2696 } 2697 return rc; 2698 } 2699 2700 /* Attempt to enable 4-tuple UDP hashing on the specified RSS context. 2701 * If we fail, we just leave the RSS context at its default hash settings, 2702 * which is safe but may slightly reduce performance. 2703 * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we 2704 * just need to set the UDP ports flags (for both IP versions). 2705 */ 2706 static void efx_ef10_set_rss_flags(struct efx_nic *efx, u32 context) 2707 { 2708 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); 2709 u32 flags; 2710 2711 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN != 0); 2712 2713 if (efx_ef10_get_rss_flags(efx, context, &flags) != 0) 2714 return; 2715 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID, context); 2716 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN; 2717 flags |= RSS_MODE_HASH_PORTS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN; 2718 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_SET_FLAGS_IN_FLAGS, flags); 2719 if (!efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_FLAGS, inbuf, sizeof(inbuf), 2720 NULL, 0, NULL)) 2721 /* Succeeded, so UDP 4-tuple is now enabled */ 2722 efx->rx_hash_udp_4tuple = true; 2723 } 2724 2725 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, 2726 bool exclusive, unsigned *context_size) 2727 { 2728 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); 2729 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN); 2730 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2731 size_t outlen; 2732 int rc; 2733 u32 alloc_type = exclusive ? 2734 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE : 2735 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED; 2736 unsigned rss_spread = exclusive ? 2737 efx->rss_spread : 2738 min(rounddown_pow_of_two(efx->rss_spread), 2739 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE); 2740 2741 if (!exclusive && rss_spread == 1) { 2742 *context = EFX_EF10_RSS_CONTEXT_INVALID; 2743 if (context_size) 2744 *context_size = 1; 2745 return 0; 2746 } 2747 2748 if (nic_data->datapath_caps & 2749 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) 2750 return -EOPNOTSUPP; 2751 2752 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, 2753 nic_data->vport_id); 2754 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); 2755 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread); 2756 2757 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf), 2758 outbuf, sizeof(outbuf), &outlen); 2759 if (rc != 0) 2760 return rc; 2761 2762 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) 2763 return -EIO; 2764 2765 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID); 2766 2767 if (context_size) 2768 *context_size = rss_spread; 2769 2770 if (nic_data->datapath_caps & 2771 1 << MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN) 2772 efx_ef10_set_rss_flags(efx, *context); 2773 2774 return 0; 2775 } 2776 2777 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context) 2778 { 2779 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN); 2780 int rc; 2781 2782 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, 2783 context); 2784 2785 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf), 2786 NULL, 0, NULL); 2787 WARN_ON(rc != 0); 2788 } 2789 2790 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context, 2791 const u32 *rx_indir_table, const u8 *key) 2792 { 2793 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN); 2794 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN); 2795 int i, rc; 2796 2797 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID, 2798 context); 2799 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 2800 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN); 2801 2802 /* This iterates over the length of efx->rx_indir_table, but copies 2803 * bytes from rx_indir_table. That's because the latter is a pointer 2804 * rather than an array, but should have the same length. 2805 * The efx->rx_hash_key loop below is similar. 2806 */ 2807 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i) 2808 MCDI_PTR(tablebuf, 2809 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] = 2810 (u8) rx_indir_table[i]; 2811 2812 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf, 2813 sizeof(tablebuf), NULL, 0, NULL); 2814 if (rc != 0) 2815 return rc; 2816 2817 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID, 2818 context); 2819 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != 2820 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2821 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) 2822 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i]; 2823 2824 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf, 2825 sizeof(keybuf), NULL, 0, NULL); 2826 } 2827 2828 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx) 2829 { 2830 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2831 2832 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) 2833 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context); 2834 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; 2835 } 2836 2837 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx, 2838 unsigned *context_size) 2839 { 2840 u32 new_rx_rss_context; 2841 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2842 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, 2843 false, context_size); 2844 2845 if (rc != 0) 2846 return rc; 2847 2848 nic_data->rx_rss_context = new_rx_rss_context; 2849 nic_data->rx_rss_context_exclusive = false; 2850 efx_set_default_rx_indir_table(efx); 2851 return 0; 2852 } 2853 2854 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx, 2855 const u32 *rx_indir_table, 2856 const u8 *key) 2857 { 2858 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2859 int rc; 2860 u32 new_rx_rss_context; 2861 2862 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID || 2863 !nic_data->rx_rss_context_exclusive) { 2864 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context, 2865 true, NULL); 2866 if (rc == -EOPNOTSUPP) 2867 return rc; 2868 else if (rc != 0) 2869 goto fail1; 2870 } else { 2871 new_rx_rss_context = nic_data->rx_rss_context; 2872 } 2873 2874 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context, 2875 rx_indir_table, key); 2876 if (rc != 0) 2877 goto fail2; 2878 2879 if (nic_data->rx_rss_context != new_rx_rss_context) 2880 efx_ef10_rx_free_indir_table(efx); 2881 nic_data->rx_rss_context = new_rx_rss_context; 2882 nic_data->rx_rss_context_exclusive = true; 2883 if (rx_indir_table != efx->rx_indir_table) 2884 memcpy(efx->rx_indir_table, rx_indir_table, 2885 sizeof(efx->rx_indir_table)); 2886 if (key != efx->rx_hash_key) 2887 memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size); 2888 2889 return 0; 2890 2891 fail2: 2892 if (new_rx_rss_context != nic_data->rx_rss_context) 2893 efx_ef10_free_rss_context(efx, new_rx_rss_context); 2894 fail1: 2895 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 2896 return rc; 2897 } 2898 2899 static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx) 2900 { 2901 struct efx_ef10_nic_data *nic_data = efx->nic_data; 2902 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN); 2903 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN); 2904 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN); 2905 size_t outlen; 2906 int rc, i; 2907 2908 BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != 2909 MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); 2910 2911 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) 2912 return -ENOENT; 2913 2914 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, 2915 nic_data->rx_rss_context); 2916 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 2917 MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); 2918 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), 2919 tablebuf, sizeof(tablebuf), &outlen); 2920 if (rc != 0) 2921 return rc; 2922 2923 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN)) 2924 return -EIO; 2925 2926 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) 2927 efx->rx_indir_table[i] = MCDI_PTR(tablebuf, 2928 RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; 2929 2930 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, 2931 nic_data->rx_rss_context); 2932 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) != 2933 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); 2934 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), 2935 keybuf, sizeof(keybuf), &outlen); 2936 if (rc != 0) 2937 return rc; 2938 2939 if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN)) 2940 return -EIO; 2941 2942 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i) 2943 efx->rx_hash_key[i] = MCDI_PTR( 2944 keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i]; 2945 2946 return 0; 2947 } 2948 2949 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user, 2950 const u32 *rx_indir_table, 2951 const u8 *key) 2952 { 2953 int rc; 2954 2955 if (efx->rss_spread == 1) 2956 return 0; 2957 2958 if (!key) 2959 key = efx->rx_hash_key; 2960 2961 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key); 2962 2963 if (rc == -ENOBUFS && !user) { 2964 unsigned context_size; 2965 bool mismatch = false; 2966 size_t i; 2967 2968 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch; 2969 i++) 2970 mismatch = rx_indir_table[i] != 2971 ethtool_rxfh_indir_default(i, efx->rss_spread); 2972 2973 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size); 2974 if (rc == 0) { 2975 if (context_size != efx->rss_spread) 2976 netif_warn(efx, probe, efx->net_dev, 2977 "Could not allocate an exclusive RSS" 2978 " context; allocated a shared one of" 2979 " different size." 2980 " Wanted %u, got %u.\n", 2981 efx->rss_spread, context_size); 2982 else if (mismatch) 2983 netif_warn(efx, probe, efx->net_dev, 2984 "Could not allocate an exclusive RSS" 2985 " context; allocated a shared one but" 2986 " could not apply custom" 2987 " indirection.\n"); 2988 else 2989 netif_info(efx, probe, efx->net_dev, 2990 "Could not allocate an exclusive RSS" 2991 " context; allocated a shared one.\n"); 2992 } 2993 } 2994 return rc; 2995 } 2996 2997 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user, 2998 const u32 *rx_indir_table 2999 __attribute__ ((unused)), 3000 const u8 *key 3001 __attribute__ ((unused))) 3002 { 3003 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3004 3005 if (user) 3006 return -EOPNOTSUPP; 3007 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID) 3008 return 0; 3009 return efx_ef10_rx_push_shared_rss_config(efx, NULL); 3010 } 3011 3012 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue) 3013 { 3014 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf, 3015 (rx_queue->ptr_mask + 1) * 3016 sizeof(efx_qword_t), 3017 GFP_KERNEL); 3018 } 3019 3020 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue) 3021 { 3022 MCDI_DECLARE_BUF(inbuf, 3023 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / 3024 EFX_BUF_SIZE)); 3025 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 3026 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; 3027 struct efx_nic *efx = rx_queue->efx; 3028 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3029 size_t inlen; 3030 dma_addr_t dma_addr; 3031 int rc; 3032 int i; 3033 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); 3034 3035 rx_queue->scatter_n = 0; 3036 rx_queue->scatter_len = 0; 3037 3038 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); 3039 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); 3040 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue)); 3041 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE, 3042 efx_rx_queue_index(rx_queue)); 3043 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS, 3044 INIT_RXQ_IN_FLAG_PREFIX, 1, 3045 INIT_RXQ_IN_FLAG_TIMESTAMP, 1); 3046 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); 3047 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id); 3048 3049 dma_addr = rx_queue->rxd.buf.dma_addr; 3050 3051 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n", 3052 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr); 3053 3054 for (i = 0; i < entries; ++i) { 3055 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr); 3056 dma_addr += EFX_BUF_SIZE; 3057 } 3058 3059 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); 3060 3061 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, 3062 NULL, 0, NULL); 3063 if (rc) 3064 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", 3065 efx_rx_queue_index(rx_queue)); 3066 } 3067 3068 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue) 3069 { 3070 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN); 3071 MCDI_DECLARE_BUF_ERR(outbuf); 3072 struct efx_nic *efx = rx_queue->efx; 3073 size_t outlen; 3074 int rc; 3075 3076 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE, 3077 efx_rx_queue_index(rx_queue)); 3078 3079 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf), 3080 outbuf, sizeof(outbuf), &outlen); 3081 3082 if (rc && rc != -EALREADY) 3083 goto fail; 3084 3085 return; 3086 3087 fail: 3088 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN, 3089 outbuf, outlen, rc); 3090 } 3091 3092 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue) 3093 { 3094 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf); 3095 } 3096 3097 /* This creates an entry in the RX descriptor queue */ 3098 static inline void 3099 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 3100 { 3101 struct efx_rx_buffer *rx_buf; 3102 efx_qword_t *rxd; 3103 3104 rxd = efx_rx_desc(rx_queue, index); 3105 rx_buf = efx_rx_buffer(rx_queue, index); 3106 EFX_POPULATE_QWORD_2(*rxd, 3107 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len, 3108 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 3109 } 3110 3111 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue) 3112 { 3113 struct efx_nic *efx = rx_queue->efx; 3114 unsigned int write_count; 3115 efx_dword_t reg; 3116 3117 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */ 3118 write_count = rx_queue->added_count & ~7; 3119 if (rx_queue->notified_count == write_count) 3120 return; 3121 3122 do 3123 efx_ef10_build_rx_desc( 3124 rx_queue, 3125 rx_queue->notified_count & rx_queue->ptr_mask); 3126 while (++rx_queue->notified_count != write_count); 3127 3128 wmb(); 3129 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR, 3130 write_count & rx_queue->ptr_mask); 3131 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD, 3132 efx_rx_queue_index(rx_queue)); 3133 } 3134 3135 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete; 3136 3137 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue) 3138 { 3139 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 3140 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3141 efx_qword_t event; 3142 3143 EFX_POPULATE_QWORD_2(event, 3144 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3145 ESF_DZ_EV_DATA, EFX_EF10_REFILL); 3146 3147 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3148 3149 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3150 * already swapped the data to little-endian order. 3151 */ 3152 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3153 sizeof(efx_qword_t)); 3154 3155 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT, 3156 inbuf, sizeof(inbuf), 0, 3157 efx_ef10_rx_defer_refill_complete, 0); 3158 } 3159 3160 static void 3161 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie, 3162 int rc, efx_dword_t *outbuf, 3163 size_t outlen_actual) 3164 { 3165 /* nothing to do */ 3166 } 3167 3168 static int efx_ef10_ev_probe(struct efx_channel *channel) 3169 { 3170 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, 3171 (channel->eventq_mask + 1) * 3172 sizeof(efx_qword_t), 3173 GFP_KERNEL); 3174 } 3175 3176 static void efx_ef10_ev_fini(struct efx_channel *channel) 3177 { 3178 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN); 3179 MCDI_DECLARE_BUF_ERR(outbuf); 3180 struct efx_nic *efx = channel->efx; 3181 size_t outlen; 3182 int rc; 3183 3184 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel); 3185 3186 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf), 3187 outbuf, sizeof(outbuf), &outlen); 3188 3189 if (rc && rc != -EALREADY) 3190 goto fail; 3191 3192 return; 3193 3194 fail: 3195 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN, 3196 outbuf, outlen, rc); 3197 } 3198 3199 static int efx_ef10_ev_init(struct efx_channel *channel) 3200 { 3201 MCDI_DECLARE_BUF(inbuf, 3202 MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 / 3203 EFX_BUF_SIZE)); 3204 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN); 3205 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE; 3206 struct efx_nic *efx = channel->efx; 3207 struct efx_ef10_nic_data *nic_data; 3208 size_t inlen, outlen; 3209 unsigned int enabled, implemented; 3210 dma_addr_t dma_addr; 3211 int rc; 3212 int i; 3213 3214 nic_data = efx->nic_data; 3215 3216 /* Fill event queue with all ones (i.e. empty events) */ 3217 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 3218 3219 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1); 3220 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel); 3221 /* INIT_EVQ expects index in vector table, not absolute */ 3222 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel); 3223 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE, 3224 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS); 3225 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0); 3226 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0); 3227 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE, 3228 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS); 3229 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0); 3230 3231 if (nic_data->datapath_caps2 & 3232 1 << MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN) { 3233 /* Use the new generic approach to specifying event queue 3234 * configuration, requesting lower latency or higher throughput. 3235 * The options that actually get used appear in the output. 3236 */ 3237 MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS, 3238 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1, 3239 INIT_EVQ_V2_IN_FLAG_TYPE, 3240 MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO); 3241 } else { 3242 bool cut_thru = !(nic_data->datapath_caps & 3243 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN); 3244 3245 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS, 3246 INIT_EVQ_IN_FLAG_INTERRUPTING, 1, 3247 INIT_EVQ_IN_FLAG_RX_MERGE, 1, 3248 INIT_EVQ_IN_FLAG_TX_MERGE, 1, 3249 INIT_EVQ_IN_FLAG_CUT_THRU, cut_thru); 3250 } 3251 3252 dma_addr = channel->eventq.buf.dma_addr; 3253 for (i = 0; i < entries; ++i) { 3254 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr); 3255 dma_addr += EFX_BUF_SIZE; 3256 } 3257 3258 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries); 3259 3260 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen, 3261 outbuf, sizeof(outbuf), &outlen); 3262 3263 if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN) 3264 netif_dbg(efx, drv, efx->net_dev, 3265 "Channel %d using event queue flags %08x\n", 3266 channel->channel, 3267 MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS)); 3268 3269 /* IRQ return is ignored */ 3270 if (channel->channel || rc) 3271 return rc; 3272 3273 /* Successfully created event queue on channel 0 */ 3274 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled); 3275 if (rc == -ENOSYS) { 3276 /* GET_WORKAROUNDS was implemented before this workaround, 3277 * thus it must be unavailable in this firmware. 3278 */ 3279 nic_data->workaround_26807 = false; 3280 rc = 0; 3281 } else if (rc) { 3282 goto fail; 3283 } else { 3284 nic_data->workaround_26807 = 3285 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807); 3286 3287 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 && 3288 !nic_data->workaround_26807) { 3289 unsigned int flags; 3290 3291 rc = efx_mcdi_set_workaround(efx, 3292 MC_CMD_WORKAROUND_BUG26807, 3293 true, &flags); 3294 3295 if (!rc) { 3296 if (flags & 3297 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) { 3298 netif_info(efx, drv, efx->net_dev, 3299 "other functions on NIC have been reset\n"); 3300 3301 /* With MCFW v4.6.x and earlier, the 3302 * boot count will have incremented, 3303 * so re-read the warm_boot_count 3304 * value now to ensure this function 3305 * doesn't think it has changed next 3306 * time it checks. 3307 */ 3308 rc = efx_ef10_get_warm_boot_count(efx); 3309 if (rc >= 0) { 3310 nic_data->warm_boot_count = rc; 3311 rc = 0; 3312 } 3313 } 3314 nic_data->workaround_26807 = true; 3315 } else if (rc == -EPERM) { 3316 rc = 0; 3317 } 3318 } 3319 } 3320 3321 if (!rc) 3322 return 0; 3323 3324 fail: 3325 efx_ef10_ev_fini(channel); 3326 return rc; 3327 } 3328 3329 static void efx_ef10_ev_remove(struct efx_channel *channel) 3330 { 3331 efx_nic_free_buffer(channel->efx, &channel->eventq.buf); 3332 } 3333 3334 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue, 3335 unsigned int rx_queue_label) 3336 { 3337 struct efx_nic *efx = rx_queue->efx; 3338 3339 netif_info(efx, hw, efx->net_dev, 3340 "rx event arrived on queue %d labeled as queue %u\n", 3341 efx_rx_queue_index(rx_queue), rx_queue_label); 3342 3343 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 3344 } 3345 3346 static void 3347 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue, 3348 unsigned int actual, unsigned int expected) 3349 { 3350 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask; 3351 struct efx_nic *efx = rx_queue->efx; 3352 3353 netif_info(efx, hw, efx->net_dev, 3354 "dropped %d events (index=%d expected=%d)\n", 3355 dropped, actual, expected); 3356 3357 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 3358 } 3359 3360 /* partially received RX was aborted. clean up. */ 3361 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue) 3362 { 3363 unsigned int rx_desc_ptr; 3364 3365 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev, 3366 "scattered RX aborted (dropping %u buffers)\n", 3367 rx_queue->scatter_n); 3368 3369 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 3370 3371 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n, 3372 0, EFX_RX_PKT_DISCARD); 3373 3374 rx_queue->removed_count += rx_queue->scatter_n; 3375 rx_queue->scatter_n = 0; 3376 rx_queue->scatter_len = 0; 3377 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc; 3378 } 3379 3380 static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, 3381 unsigned int n_packets, 3382 unsigned int rx_encap_hdr, 3383 unsigned int rx_l3_class, 3384 unsigned int rx_l4_class, 3385 const efx_qword_t *event) 3386 { 3387 struct efx_nic *efx = channel->efx; 3388 bool handled = false; 3389 3390 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { 3391 if (!(efx->net_dev->features & NETIF_F_RXALL)) { 3392 if (!efx->loopback_selftest) 3393 channel->n_rx_eth_crc_err += n_packets; 3394 return EFX_RX_PKT_DISCARD; 3395 } 3396 handled = true; 3397 } 3398 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { 3399 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3400 rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3401 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3402 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3403 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3404 netdev_WARN(efx->net_dev, 3405 "invalid class for RX_IPCKSUM_ERR: event=" 3406 EFX_QWORD_FMT "\n", 3407 EFX_QWORD_VAL(*event)); 3408 if (!efx->loopback_selftest) 3409 *(rx_encap_hdr ? 3410 &channel->n_rx_outer_ip_hdr_chksum_err : 3411 &channel->n_rx_ip_hdr_chksum_err) += n_packets; 3412 return 0; 3413 } 3414 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 3415 if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && 3416 ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3417 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3418 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 3419 rx_l4_class != ESE_FZ_L4_CLASS_UDP)))) 3420 netdev_WARN(efx->net_dev, 3421 "invalid class for RX_TCPUDP_CKSUM_ERR: event=" 3422 EFX_QWORD_FMT "\n", 3423 EFX_QWORD_VAL(*event)); 3424 if (!efx->loopback_selftest) 3425 *(rx_encap_hdr ? 3426 &channel->n_rx_outer_tcp_udp_chksum_err : 3427 &channel->n_rx_tcp_udp_chksum_err) += n_packets; 3428 return 0; 3429 } 3430 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) { 3431 if (unlikely(!rx_encap_hdr)) 3432 netdev_WARN(efx->net_dev, 3433 "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event=" 3434 EFX_QWORD_FMT "\n", 3435 EFX_QWORD_VAL(*event)); 3436 else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3437 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG && 3438 rx_l3_class != ESE_DZ_L3_CLASS_IP6 && 3439 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG)) 3440 netdev_WARN(efx->net_dev, 3441 "invalid class for RX_IP_INNER_CHKSUM_ERR: event=" 3442 EFX_QWORD_FMT "\n", 3443 EFX_QWORD_VAL(*event)); 3444 if (!efx->loopback_selftest) 3445 channel->n_rx_inner_ip_hdr_chksum_err += n_packets; 3446 return 0; 3447 } 3448 if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) { 3449 if (unlikely(!rx_encap_hdr)) 3450 netdev_WARN(efx->net_dev, 3451 "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3452 EFX_QWORD_FMT "\n", 3453 EFX_QWORD_VAL(*event)); 3454 else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 && 3455 rx_l3_class != ESE_DZ_L3_CLASS_IP6) || 3456 (rx_l4_class != ESE_FZ_L4_CLASS_TCP && 3457 rx_l4_class != ESE_FZ_L4_CLASS_UDP))) 3458 netdev_WARN(efx->net_dev, 3459 "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event=" 3460 EFX_QWORD_FMT "\n", 3461 EFX_QWORD_VAL(*event)); 3462 if (!efx->loopback_selftest) 3463 channel->n_rx_inner_tcp_udp_chksum_err += n_packets; 3464 return 0; 3465 } 3466 3467 WARN_ON(!handled); /* No error bits were recognised */ 3468 return 0; 3469 } 3470 3471 static int efx_ef10_handle_rx_event(struct efx_channel *channel, 3472 const efx_qword_t *event) 3473 { 3474 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label; 3475 unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr; 3476 unsigned int n_descs, n_packets, i; 3477 struct efx_nic *efx = channel->efx; 3478 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3479 struct efx_rx_queue *rx_queue; 3480 efx_qword_t errors; 3481 bool rx_cont; 3482 u16 flags = 0; 3483 3484 if (unlikely(READ_ONCE(efx->reset_pending))) 3485 return 0; 3486 3487 /* Basic packet information */ 3488 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES); 3489 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS); 3490 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL); 3491 rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS); 3492 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_FZ_RX_L4_CLASS); 3493 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT); 3494 rx_encap_hdr = 3495 nic_data->datapath_caps & 3496 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ? 3497 EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) : 3498 ESE_EZ_ENCAP_HDR_NONE; 3499 3500 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT)) 3501 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event=" 3502 EFX_QWORD_FMT "\n", 3503 EFX_QWORD_VAL(*event)); 3504 3505 rx_queue = efx_channel_get_rx_queue(channel); 3506 3507 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue))) 3508 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label); 3509 3510 n_descs = ((next_ptr_lbits - rx_queue->removed_count) & 3511 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 3512 3513 if (n_descs != rx_queue->scatter_n + 1) { 3514 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3515 3516 /* detect rx abort */ 3517 if (unlikely(n_descs == rx_queue->scatter_n)) { 3518 if (rx_queue->scatter_n == 0 || rx_bytes != 0) 3519 netdev_WARN(efx->net_dev, 3520 "invalid RX abort: scatter_n=%u event=" 3521 EFX_QWORD_FMT "\n", 3522 rx_queue->scatter_n, 3523 EFX_QWORD_VAL(*event)); 3524 efx_ef10_handle_rx_abort(rx_queue); 3525 return 0; 3526 } 3527 3528 /* Check that RX completion merging is valid, i.e. 3529 * the current firmware supports it and this is a 3530 * non-scattered packet. 3531 */ 3532 if (!(nic_data->datapath_caps & 3533 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) || 3534 rx_queue->scatter_n != 0 || rx_cont) { 3535 efx_ef10_handle_rx_bad_lbits( 3536 rx_queue, next_ptr_lbits, 3537 (rx_queue->removed_count + 3538 rx_queue->scatter_n + 1) & 3539 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1)); 3540 return 0; 3541 } 3542 3543 /* Merged completion for multiple non-scattered packets */ 3544 rx_queue->scatter_n = 1; 3545 rx_queue->scatter_len = 0; 3546 n_packets = n_descs; 3547 ++channel->n_rx_merge_events; 3548 channel->n_rx_merge_packets += n_packets; 3549 flags |= EFX_RX_PKT_PREFIX_LEN; 3550 } else { 3551 ++rx_queue->scatter_n; 3552 rx_queue->scatter_len += rx_bytes; 3553 if (rx_cont) 3554 return 0; 3555 n_packets = 1; 3556 } 3557 3558 EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1, 3559 ESF_DZ_RX_IPCKSUM_ERR, 1, 3560 ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1, 3561 ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1, 3562 ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1); 3563 EFX_AND_QWORD(errors, *event, errors); 3564 if (unlikely(!EFX_QWORD_IS_ZERO(errors))) { 3565 flags |= efx_ef10_handle_rx_event_errors(channel, n_packets, 3566 rx_encap_hdr, 3567 rx_l3_class, rx_l4_class, 3568 event); 3569 } else { 3570 bool tcpudp = rx_l4_class == ESE_FZ_L4_CLASS_TCP || 3571 rx_l4_class == ESE_FZ_L4_CLASS_UDP; 3572 3573 switch (rx_encap_hdr) { 3574 case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */ 3575 flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */ 3576 if (tcpudp) 3577 flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */ 3578 break; 3579 case ESE_EZ_ENCAP_HDR_GRE: 3580 case ESE_EZ_ENCAP_HDR_NONE: 3581 if (tcpudp) 3582 flags |= EFX_RX_PKT_CSUMMED; 3583 break; 3584 default: 3585 netdev_WARN(efx->net_dev, 3586 "unknown encapsulation type: event=" 3587 EFX_QWORD_FMT "\n", 3588 EFX_QWORD_VAL(*event)); 3589 } 3590 } 3591 3592 if (rx_l4_class == ESE_FZ_L4_CLASS_TCP) 3593 flags |= EFX_RX_PKT_TCP; 3594 3595 channel->irq_mod_score += 2 * n_packets; 3596 3597 /* Handle received packet(s) */ 3598 for (i = 0; i < n_packets; i++) { 3599 efx_rx_packet(rx_queue, 3600 rx_queue->removed_count & rx_queue->ptr_mask, 3601 rx_queue->scatter_n, rx_queue->scatter_len, 3602 flags); 3603 rx_queue->removed_count += rx_queue->scatter_n; 3604 } 3605 3606 rx_queue->scatter_n = 0; 3607 rx_queue->scatter_len = 0; 3608 3609 return n_packets; 3610 } 3611 3612 static u32 efx_ef10_extract_event_ts(efx_qword_t *event) 3613 { 3614 u32 tstamp; 3615 3616 tstamp = EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI); 3617 tstamp <<= 16; 3618 tstamp |= EFX_QWORD_FIELD(*event, TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO); 3619 3620 return tstamp; 3621 } 3622 3623 static void 3624 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 3625 { 3626 struct efx_nic *efx = channel->efx; 3627 struct efx_tx_queue *tx_queue; 3628 unsigned int tx_ev_desc_ptr; 3629 unsigned int tx_ev_q_label; 3630 unsigned int tx_ev_type; 3631 u64 ts_part; 3632 3633 if (unlikely(READ_ONCE(efx->reset_pending))) 3634 return; 3635 3636 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) 3637 return; 3638 3639 /* Get the transmit queue */ 3640 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL); 3641 tx_queue = efx_channel_get_tx_queue(channel, 3642 tx_ev_q_label % EFX_TXQ_TYPES); 3643 3644 if (!tx_queue->timestamping) { 3645 /* Transmit completion */ 3646 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX); 3647 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask); 3648 return; 3649 } 3650 3651 /* Transmit timestamps are only available for 8XXX series. They result 3652 * in three events per packet. These occur in order, and are: 3653 * - the normal completion event 3654 * - the low part of the timestamp 3655 * - the high part of the timestamp 3656 * 3657 * Each part of the timestamp is itself split across two 16 bit 3658 * fields in the event. 3659 */ 3660 tx_ev_type = EFX_QWORD_FIELD(*event, ESF_EZ_TX_SOFT1); 3661 3662 switch (tx_ev_type) { 3663 case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION: 3664 /* In case of Queue flush or FLR, we might have received 3665 * the previous TX completion event but not the Timestamp 3666 * events. 3667 */ 3668 if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask) 3669 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); 3670 3671 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, 3672 ESF_DZ_TX_DESCR_INDX); 3673 tx_queue->completed_desc_ptr = 3674 tx_ev_desc_ptr & tx_queue->ptr_mask; 3675 break; 3676 3677 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO: 3678 ts_part = efx_ef10_extract_event_ts(event); 3679 tx_queue->completed_timestamp_minor = ts_part; 3680 break; 3681 3682 case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI: 3683 ts_part = efx_ef10_extract_event_ts(event); 3684 tx_queue->completed_timestamp_major = ts_part; 3685 3686 efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr); 3687 tx_queue->completed_desc_ptr = tx_queue->ptr_mask; 3688 break; 3689 3690 default: 3691 netif_err(efx, hw, efx->net_dev, 3692 "channel %d unknown tx event type %d (data " 3693 EFX_QWORD_FMT ")\n", 3694 channel->channel, tx_ev_type, 3695 EFX_QWORD_VAL(*event)); 3696 break; 3697 } 3698 } 3699 3700 static void 3701 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 3702 { 3703 struct efx_nic *efx = channel->efx; 3704 int subcode; 3705 3706 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE); 3707 3708 switch (subcode) { 3709 case ESE_DZ_DRV_TIMER_EV: 3710 case ESE_DZ_DRV_WAKE_UP_EV: 3711 break; 3712 case ESE_DZ_DRV_START_UP_EV: 3713 /* event queue init complete. ok. */ 3714 break; 3715 default: 3716 netif_err(efx, hw, efx->net_dev, 3717 "channel %d unknown driver event type %d" 3718 " (data " EFX_QWORD_FMT ")\n", 3719 channel->channel, subcode, 3720 EFX_QWORD_VAL(*event)); 3721 3722 } 3723 } 3724 3725 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel, 3726 efx_qword_t *event) 3727 { 3728 struct efx_nic *efx = channel->efx; 3729 u32 subcode; 3730 3731 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0); 3732 3733 switch (subcode) { 3734 case EFX_EF10_TEST: 3735 channel->event_test_cpu = raw_smp_processor_id(); 3736 break; 3737 case EFX_EF10_REFILL: 3738 /* The queue must be empty, so we won't receive any rx 3739 * events, so efx_process_channel() won't refill the 3740 * queue. Refill it here 3741 */ 3742 efx_fast_push_rx_descriptors(&channel->rx_queue, true); 3743 break; 3744 default: 3745 netif_err(efx, hw, efx->net_dev, 3746 "channel %d unknown driver event type %u" 3747 " (data " EFX_QWORD_FMT ")\n", 3748 channel->channel, (unsigned) subcode, 3749 EFX_QWORD_VAL(*event)); 3750 } 3751 } 3752 3753 static int efx_ef10_ev_process(struct efx_channel *channel, int quota) 3754 { 3755 struct efx_nic *efx = channel->efx; 3756 efx_qword_t event, *p_event; 3757 unsigned int read_ptr; 3758 int ev_code; 3759 int spent = 0; 3760 3761 if (quota <= 0) 3762 return spent; 3763 3764 read_ptr = channel->eventq_read_ptr; 3765 3766 for (;;) { 3767 p_event = efx_event(channel, read_ptr); 3768 event = *p_event; 3769 3770 if (!efx_event_present(&event)) 3771 break; 3772 3773 EFX_SET_QWORD(*p_event); 3774 3775 ++read_ptr; 3776 3777 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE); 3778 3779 netif_vdbg(efx, drv, efx->net_dev, 3780 "processing event on %d " EFX_QWORD_FMT "\n", 3781 channel->channel, EFX_QWORD_VAL(event)); 3782 3783 switch (ev_code) { 3784 case ESE_DZ_EV_CODE_MCDI_EV: 3785 efx_mcdi_process_event(channel, &event); 3786 break; 3787 case ESE_DZ_EV_CODE_RX_EV: 3788 spent += efx_ef10_handle_rx_event(channel, &event); 3789 if (spent >= quota) { 3790 /* XXX can we split a merged event to 3791 * avoid going over-quota? 3792 */ 3793 spent = quota; 3794 goto out; 3795 } 3796 break; 3797 case ESE_DZ_EV_CODE_TX_EV: 3798 efx_ef10_handle_tx_event(channel, &event); 3799 break; 3800 case ESE_DZ_EV_CODE_DRIVER_EV: 3801 efx_ef10_handle_driver_event(channel, &event); 3802 if (++spent == quota) 3803 goto out; 3804 break; 3805 case EFX_EF10_DRVGEN_EV: 3806 efx_ef10_handle_driver_generated_event(channel, &event); 3807 break; 3808 default: 3809 netif_err(efx, hw, efx->net_dev, 3810 "channel %d unknown event type %d" 3811 " (data " EFX_QWORD_FMT ")\n", 3812 channel->channel, ev_code, 3813 EFX_QWORD_VAL(event)); 3814 } 3815 } 3816 3817 out: 3818 channel->eventq_read_ptr = read_ptr; 3819 return spent; 3820 } 3821 3822 static void efx_ef10_ev_read_ack(struct efx_channel *channel) 3823 { 3824 struct efx_nic *efx = channel->efx; 3825 efx_dword_t rptr; 3826 3827 if (EFX_EF10_WORKAROUND_35388(efx)) { 3828 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE < 3829 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 3830 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE > 3831 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 3832 3833 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3834 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 3835 ERF_DD_EVQ_IND_RPTR, 3836 (channel->eventq_read_ptr & 3837 channel->eventq_mask) >> 3838 ERF_DD_EVQ_IND_RPTR_WIDTH); 3839 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3840 channel->channel); 3841 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS, 3842 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 3843 ERF_DD_EVQ_IND_RPTR, 3844 channel->eventq_read_ptr & 3845 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 3846 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT, 3847 channel->channel); 3848 } else { 3849 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR, 3850 channel->eventq_read_ptr & 3851 channel->eventq_mask); 3852 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel); 3853 } 3854 } 3855 3856 static void efx_ef10_ev_test_generate(struct efx_channel *channel) 3857 { 3858 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); 3859 struct efx_nic *efx = channel->efx; 3860 efx_qword_t event; 3861 int rc; 3862 3863 EFX_POPULATE_QWORD_2(event, 3864 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV, 3865 ESF_DZ_EV_DATA, EFX_EF10_TEST); 3866 3867 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); 3868 3869 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has 3870 * already swapped the data to little-endian order. 3871 */ 3872 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], 3873 sizeof(efx_qword_t)); 3874 3875 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), 3876 NULL, 0, NULL); 3877 if (rc != 0) 3878 goto fail; 3879 3880 return; 3881 3882 fail: 3883 WARN_ON(true); 3884 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); 3885 } 3886 3887 void efx_ef10_handle_drain_event(struct efx_nic *efx) 3888 { 3889 if (atomic_dec_and_test(&efx->active_queues)) 3890 wake_up(&efx->flush_wq); 3891 3892 WARN_ON(atomic_read(&efx->active_queues) < 0); 3893 } 3894 3895 static int efx_ef10_fini_dmaq(struct efx_nic *efx) 3896 { 3897 struct efx_ef10_nic_data *nic_data = efx->nic_data; 3898 struct efx_channel *channel; 3899 struct efx_tx_queue *tx_queue; 3900 struct efx_rx_queue *rx_queue; 3901 int pending; 3902 3903 /* If the MC has just rebooted, the TX/RX queues will have already been 3904 * torn down, but efx->active_queues needs to be set to zero. 3905 */ 3906 if (nic_data->must_realloc_vis) { 3907 atomic_set(&efx->active_queues, 0); 3908 return 0; 3909 } 3910 3911 /* Do not attempt to write to the NIC during EEH recovery */ 3912 if (efx->state != STATE_RECOVERY) { 3913 efx_for_each_channel(channel, efx) { 3914 efx_for_each_channel_rx_queue(rx_queue, channel) 3915 efx_ef10_rx_fini(rx_queue); 3916 efx_for_each_channel_tx_queue(tx_queue, channel) 3917 efx_ef10_tx_fini(tx_queue); 3918 } 3919 3920 wait_event_timeout(efx->flush_wq, 3921 atomic_read(&efx->active_queues) == 0, 3922 msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); 3923 pending = atomic_read(&efx->active_queues); 3924 if (pending) { 3925 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", 3926 pending); 3927 return -ETIMEDOUT; 3928 } 3929 } 3930 3931 return 0; 3932 } 3933 3934 static void efx_ef10_prepare_flr(struct efx_nic *efx) 3935 { 3936 atomic_set(&efx->active_queues, 0); 3937 } 3938 3939 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, 3940 const struct efx_filter_spec *right) 3941 { 3942 if ((left->match_flags ^ right->match_flags) | 3943 ((left->flags ^ right->flags) & 3944 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) 3945 return false; 3946 3947 return memcmp(&left->outer_vid, &right->outer_vid, 3948 sizeof(struct efx_filter_spec) - 3949 offsetof(struct efx_filter_spec, outer_vid)) == 0; 3950 } 3951 3952 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) 3953 { 3954 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); 3955 return jhash2((const u32 *)&spec->outer_vid, 3956 (sizeof(struct efx_filter_spec) - 3957 offsetof(struct efx_filter_spec, outer_vid)) / 4, 3958 0); 3959 /* XXX should we randomise the initval? */ 3960 } 3961 3962 /* Decide whether a filter should be exclusive or else should allow 3963 * delivery to additional recipients. Currently we decide that 3964 * filters for specific local unicast MAC and IP addresses are 3965 * exclusive. 3966 */ 3967 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec) 3968 { 3969 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC && 3970 !is_multicast_ether_addr(spec->loc_mac)) 3971 return true; 3972 3973 if ((spec->match_flags & 3974 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) == 3975 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) { 3976 if (spec->ether_type == htons(ETH_P_IP) && 3977 !ipv4_is_multicast(spec->loc_host[0])) 3978 return true; 3979 if (spec->ether_type == htons(ETH_P_IPV6) && 3980 ((const u8 *)spec->loc_host)[0] != 0xff) 3981 return true; 3982 } 3983 3984 return false; 3985 } 3986 3987 static struct efx_filter_spec * 3988 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table, 3989 unsigned int filter_idx) 3990 { 3991 return (struct efx_filter_spec *)(table->entry[filter_idx].spec & 3992 ~EFX_EF10_FILTER_FLAGS); 3993 } 3994 3995 static unsigned int 3996 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table, 3997 unsigned int filter_idx) 3998 { 3999 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS; 4000 } 4001 4002 static void 4003 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table, 4004 unsigned int filter_idx, 4005 const struct efx_filter_spec *spec, 4006 unsigned int flags) 4007 { 4008 table->entry[filter_idx].spec = (unsigned long)spec | flags; 4009 } 4010 4011 static void 4012 efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx, 4013 const struct efx_filter_spec *spec, 4014 efx_dword_t *inbuf) 4015 { 4016 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 4017 u32 match_fields = 0, uc_match, mc_match; 4018 4019 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4020 efx_ef10_filter_is_exclusive(spec) ? 4021 MC_CMD_FILTER_OP_IN_OP_INSERT : 4022 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE); 4023 4024 /* Convert match flags and values. Unlike almost 4025 * everything else in MCDI, these fields are in 4026 * network byte order. 4027 */ 4028 #define COPY_VALUE(value, mcdi_field) \ 4029 do { \ 4030 match_fields |= \ 4031 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ 4032 mcdi_field ## _LBN; \ 4033 BUILD_BUG_ON( \ 4034 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \ 4035 sizeof(value)); \ 4036 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \ 4037 &value, sizeof(value)); \ 4038 } while (0) 4039 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \ 4040 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \ 4041 COPY_VALUE(spec->gen_field, mcdi_field); \ 4042 } 4043 /* Handle encap filters first. They will always be mismatch 4044 * (unknown UC or MC) filters 4045 */ 4046 if (encap_type) { 4047 /* ether_type and outer_ip_proto need to be variables 4048 * because COPY_VALUE wants to memcpy them 4049 */ 4050 __be16 ether_type = 4051 htons(encap_type & EFX_ENCAP_FLAG_IPV6 ? 4052 ETH_P_IPV6 : ETH_P_IP); 4053 u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE; 4054 u8 outer_ip_proto; 4055 4056 switch (encap_type & EFX_ENCAP_TYPES_MASK) { 4057 case EFX_ENCAP_TYPE_VXLAN: 4058 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN; 4059 /* fallthrough */ 4060 case EFX_ENCAP_TYPE_GENEVE: 4061 COPY_VALUE(ether_type, ETHER_TYPE); 4062 outer_ip_proto = IPPROTO_UDP; 4063 COPY_VALUE(outer_ip_proto, IP_PROTO); 4064 /* We always need to set the type field, even 4065 * though we're not matching on the TNI. 4066 */ 4067 MCDI_POPULATE_DWORD_1(inbuf, 4068 FILTER_OP_EXT_IN_VNI_OR_VSID, 4069 FILTER_OP_EXT_IN_VNI_TYPE, 4070 vni_type); 4071 break; 4072 case EFX_ENCAP_TYPE_NVGRE: 4073 COPY_VALUE(ether_type, ETHER_TYPE); 4074 outer_ip_proto = IPPROTO_GRE; 4075 COPY_VALUE(outer_ip_proto, IP_PROTO); 4076 break; 4077 default: 4078 WARN_ON(1); 4079 } 4080 4081 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 4082 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 4083 } else { 4084 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 4085 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 4086 } 4087 4088 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) 4089 match_fields |= 4090 is_multicast_ether_addr(spec->loc_mac) ? 4091 1 << mc_match : 4092 1 << uc_match; 4093 COPY_FIELD(REM_HOST, rem_host, SRC_IP); 4094 COPY_FIELD(LOC_HOST, loc_host, DST_IP); 4095 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC); 4096 COPY_FIELD(REM_PORT, rem_port, SRC_PORT); 4097 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC); 4098 COPY_FIELD(LOC_PORT, loc_port, DST_PORT); 4099 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE); 4100 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN); 4101 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN); 4102 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO); 4103 #undef COPY_FIELD 4104 #undef COPY_VALUE 4105 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS, 4106 match_fields); 4107 } 4108 4109 static void efx_ef10_filter_push_prep(struct efx_nic *efx, 4110 const struct efx_filter_spec *spec, 4111 efx_dword_t *inbuf, u64 handle, 4112 bool replacing) 4113 { 4114 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4115 u32 flags = spec->flags; 4116 4117 memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN); 4118 4119 /* Remove RSS flag if we don't have an RSS context. */ 4120 if (flags & EFX_FILTER_FLAG_RX_RSS && 4121 spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT && 4122 nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) 4123 flags &= ~EFX_FILTER_FLAG_RX_RSS; 4124 4125 if (replacing) { 4126 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4127 MC_CMD_FILTER_OP_IN_OP_REPLACE); 4128 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle); 4129 } else { 4130 efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf); 4131 } 4132 4133 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id); 4134 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST, 4135 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 4136 MC_CMD_FILTER_OP_IN_RX_DEST_DROP : 4137 MC_CMD_FILTER_OP_IN_RX_DEST_HOST); 4138 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0); 4139 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST, 4140 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT); 4141 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, 4142 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 4143 0 : spec->dmaq_id); 4144 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, 4145 (flags & EFX_FILTER_FLAG_RX_RSS) ? 4146 MC_CMD_FILTER_OP_IN_RX_MODE_RSS : 4147 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); 4148 if (flags & EFX_FILTER_FLAG_RX_RSS) 4149 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, 4150 spec->rss_context != 4151 EFX_FILTER_RSS_CONTEXT_DEFAULT ? 4152 spec->rss_context : nic_data->rx_rss_context); 4153 } 4154 4155 static int efx_ef10_filter_push(struct efx_nic *efx, 4156 const struct efx_filter_spec *spec, 4157 u64 *handle, bool replacing) 4158 { 4159 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4160 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); 4161 int rc; 4162 4163 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing); 4164 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 4165 outbuf, sizeof(outbuf), NULL); 4166 if (rc == 0) 4167 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 4168 if (rc == -ENOSPC) 4169 rc = -EBUSY; /* to match efx_farch_filter_insert() */ 4170 return rc; 4171 } 4172 4173 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) 4174 { 4175 enum efx_encap_type encap_type = efx_filter_get_encap_type(spec); 4176 unsigned int match_flags = spec->match_flags; 4177 unsigned int uc_match, mc_match; 4178 u32 mcdi_flags = 0; 4179 4180 #define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \ 4181 unsigned int old_match_flags = match_flags; \ 4182 match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ 4183 if (match_flags != old_match_flags) \ 4184 mcdi_flags |= \ 4185 (1 << ((encap) ? \ 4186 MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \ 4187 mcdi_field ## _LBN : \ 4188 MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\ 4189 mcdi_field ## _LBN)); \ 4190 } 4191 /* inner or outer based on encap type */ 4192 MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type); 4193 MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type); 4194 MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type); 4195 MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type); 4196 MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type); 4197 MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type); 4198 MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type); 4199 MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type); 4200 /* always outer */ 4201 MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false); 4202 MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false); 4203 #undef MAP_FILTER_TO_MCDI_FLAG 4204 4205 /* special handling for encap type, and mismatch */ 4206 if (encap_type) { 4207 match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE; 4208 mcdi_flags |= 4209 (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 4210 mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 4211 4212 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN; 4213 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN; 4214 } else { 4215 uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN; 4216 mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN; 4217 } 4218 4219 if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { 4220 match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; 4221 mcdi_flags |= 4222 is_multicast_ether_addr(spec->loc_mac) ? 4223 1 << mc_match : 4224 1 << uc_match; 4225 } 4226 4227 /* Did we map them all? */ 4228 WARN_ON_ONCE(match_flags); 4229 4230 return mcdi_flags; 4231 } 4232 4233 static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, 4234 const struct efx_filter_spec *spec) 4235 { 4236 u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); 4237 unsigned int match_pri; 4238 4239 for (match_pri = 0; 4240 match_pri < table->rx_match_count; 4241 match_pri++) 4242 if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) 4243 return match_pri; 4244 4245 return -EPROTONOSUPPORT; 4246 } 4247 4248 static s32 efx_ef10_filter_insert(struct efx_nic *efx, 4249 struct efx_filter_spec *spec, 4250 bool replace_equal) 4251 { 4252 struct efx_ef10_filter_table *table = efx->filter_state; 4253 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4254 struct efx_filter_spec *saved_spec; 4255 unsigned int match_pri, hash; 4256 unsigned int priv_flags; 4257 bool replacing = false; 4258 int ins_index = -1; 4259 DEFINE_WAIT(wait); 4260 bool is_mc_recip; 4261 s32 rc; 4262 4263 /* For now, only support RX filters */ 4264 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) != 4265 EFX_FILTER_FLAG_RX) 4266 return -EINVAL; 4267 4268 rc = efx_ef10_filter_pri(table, spec); 4269 if (rc < 0) 4270 return rc; 4271 match_pri = rc; 4272 4273 hash = efx_ef10_filter_hash(spec); 4274 is_mc_recip = efx_filter_is_mc_recipient(spec); 4275 if (is_mc_recip) 4276 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4277 4278 /* Find any existing filters with the same match tuple or 4279 * else a free slot to insert at. If any of them are busy, 4280 * we have to wait and retry. 4281 */ 4282 for (;;) { 4283 unsigned int depth = 1; 4284 unsigned int i; 4285 4286 spin_lock_bh(&efx->filter_lock); 4287 4288 for (;;) { 4289 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4290 saved_spec = efx_ef10_filter_entry_spec(table, i); 4291 4292 if (!saved_spec) { 4293 if (ins_index < 0) 4294 ins_index = i; 4295 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 4296 if (table->entry[i].spec & 4297 EFX_EF10_FILTER_FLAG_BUSY) 4298 break; 4299 if (spec->priority < saved_spec->priority && 4300 spec->priority != EFX_FILTER_PRI_AUTO) { 4301 rc = -EPERM; 4302 goto out_unlock; 4303 } 4304 if (!is_mc_recip) { 4305 /* This is the only one */ 4306 if (spec->priority == 4307 saved_spec->priority && 4308 !replace_equal) { 4309 rc = -EEXIST; 4310 goto out_unlock; 4311 } 4312 ins_index = i; 4313 goto found; 4314 } else if (spec->priority > 4315 saved_spec->priority || 4316 (spec->priority == 4317 saved_spec->priority && 4318 replace_equal)) { 4319 if (ins_index < 0) 4320 ins_index = i; 4321 else 4322 __set_bit(depth, mc_rem_map); 4323 } 4324 } 4325 4326 /* Once we reach the maximum search depth, use 4327 * the first suitable slot or return -EBUSY if 4328 * there was none 4329 */ 4330 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { 4331 if (ins_index < 0) { 4332 rc = -EBUSY; 4333 goto out_unlock; 4334 } 4335 goto found; 4336 } 4337 4338 ++depth; 4339 } 4340 4341 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); 4342 spin_unlock_bh(&efx->filter_lock); 4343 schedule(); 4344 } 4345 4346 found: 4347 /* Create a software table entry if necessary, and mark it 4348 * busy. We might yet fail to insert, but any attempt to 4349 * insert a conflicting filter while we're waiting for the 4350 * firmware must find the busy entry. 4351 */ 4352 saved_spec = efx_ef10_filter_entry_spec(table, ins_index); 4353 if (saved_spec) { 4354 if (spec->priority == EFX_FILTER_PRI_AUTO && 4355 saved_spec->priority >= EFX_FILTER_PRI_AUTO) { 4356 /* Just make sure it won't be removed */ 4357 if (saved_spec->priority > EFX_FILTER_PRI_AUTO) 4358 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 4359 table->entry[ins_index].spec &= 4360 ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 4361 rc = ins_index; 4362 goto out_unlock; 4363 } 4364 replacing = true; 4365 priv_flags = efx_ef10_filter_entry_flags(table, ins_index); 4366 } else { 4367 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 4368 if (!saved_spec) { 4369 rc = -ENOMEM; 4370 goto out_unlock; 4371 } 4372 *saved_spec = *spec; 4373 priv_flags = 0; 4374 } 4375 efx_ef10_filter_set_entry(table, ins_index, saved_spec, 4376 priv_flags | EFX_EF10_FILTER_FLAG_BUSY); 4377 4378 /* Mark lower-priority multicast recipients busy prior to removal */ 4379 if (is_mc_recip) { 4380 unsigned int depth, i; 4381 4382 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 4383 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4384 if (test_bit(depth, mc_rem_map)) 4385 table->entry[i].spec |= 4386 EFX_EF10_FILTER_FLAG_BUSY; 4387 } 4388 } 4389 4390 spin_unlock_bh(&efx->filter_lock); 4391 4392 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle, 4393 replacing); 4394 4395 /* Finalise the software table entry */ 4396 spin_lock_bh(&efx->filter_lock); 4397 if (rc == 0) { 4398 if (replacing) { 4399 /* Update the fields that may differ */ 4400 if (saved_spec->priority == EFX_FILTER_PRI_AUTO) 4401 saved_spec->flags |= 4402 EFX_FILTER_FLAG_RX_OVER_AUTO; 4403 saved_spec->priority = spec->priority; 4404 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO; 4405 saved_spec->flags |= spec->flags; 4406 saved_spec->rss_context = spec->rss_context; 4407 saved_spec->dmaq_id = spec->dmaq_id; 4408 } 4409 } else if (!replacing) { 4410 kfree(saved_spec); 4411 saved_spec = NULL; 4412 } 4413 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags); 4414 4415 /* Remove and finalise entries for lower-priority multicast 4416 * recipients 4417 */ 4418 if (is_mc_recip) { 4419 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4420 unsigned int depth, i; 4421 4422 memset(inbuf, 0, sizeof(inbuf)); 4423 4424 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) { 4425 if (!test_bit(depth, mc_rem_map)) 4426 continue; 4427 4428 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4429 saved_spec = efx_ef10_filter_entry_spec(table, i); 4430 priv_flags = efx_ef10_filter_entry_flags(table, i); 4431 4432 if (rc == 0) { 4433 spin_unlock_bh(&efx->filter_lock); 4434 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4435 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 4436 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4437 table->entry[i].handle); 4438 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, 4439 inbuf, sizeof(inbuf), 4440 NULL, 0, NULL); 4441 spin_lock_bh(&efx->filter_lock); 4442 } 4443 4444 if (rc == 0) { 4445 kfree(saved_spec); 4446 saved_spec = NULL; 4447 priv_flags = 0; 4448 } else { 4449 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY; 4450 } 4451 efx_ef10_filter_set_entry(table, i, saved_spec, 4452 priv_flags); 4453 } 4454 } 4455 4456 /* If successful, return the inserted filter ID */ 4457 if (rc == 0) 4458 rc = efx_ef10_make_filter_id(match_pri, ins_index); 4459 4460 wake_up_all(&table->waitq); 4461 out_unlock: 4462 spin_unlock_bh(&efx->filter_lock); 4463 finish_wait(&table->waitq, &wait); 4464 return rc; 4465 } 4466 4467 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 4468 { 4469 /* no need to do anything here on EF10 */ 4470 } 4471 4472 /* Remove a filter. 4473 * If !by_index, remove by ID 4474 * If by_index, remove by index 4475 * Filter ID may come from userland and must be range-checked. 4476 */ 4477 static int efx_ef10_filter_remove_internal(struct efx_nic *efx, 4478 unsigned int priority_mask, 4479 u32 filter_id, bool by_index) 4480 { 4481 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); 4482 struct efx_ef10_filter_table *table = efx->filter_state; 4483 MCDI_DECLARE_BUF(inbuf, 4484 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 4485 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 4486 struct efx_filter_spec *spec; 4487 DEFINE_WAIT(wait); 4488 int rc; 4489 4490 /* Find the software table entry and mark it busy. Don't 4491 * remove it yet; any attempt to update while we're waiting 4492 * for the firmware must find the busy entry. 4493 */ 4494 for (;;) { 4495 spin_lock_bh(&efx->filter_lock); 4496 if (!(table->entry[filter_idx].spec & 4497 EFX_EF10_FILTER_FLAG_BUSY)) 4498 break; 4499 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE); 4500 spin_unlock_bh(&efx->filter_lock); 4501 schedule(); 4502 } 4503 4504 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4505 if (!spec || 4506 (!by_index && 4507 efx_ef10_filter_pri(table, spec) != 4508 efx_ef10_filter_get_unsafe_pri(filter_id))) { 4509 rc = -ENOENT; 4510 goto out_unlock; 4511 } 4512 4513 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO && 4514 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) { 4515 /* Just remove flags */ 4516 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO; 4517 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD; 4518 rc = 0; 4519 goto out_unlock; 4520 } 4521 4522 if (!(priority_mask & (1U << spec->priority))) { 4523 rc = -ENOENT; 4524 goto out_unlock; 4525 } 4526 4527 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 4528 spin_unlock_bh(&efx->filter_lock); 4529 4530 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 4531 /* Reset to an automatic filter */ 4532 4533 struct efx_filter_spec new_spec = *spec; 4534 4535 new_spec.priority = EFX_FILTER_PRI_AUTO; 4536 new_spec.flags = (EFX_FILTER_FLAG_RX | 4537 (efx_rss_enabled(efx) ? 4538 EFX_FILTER_FLAG_RX_RSS : 0)); 4539 new_spec.dmaq_id = 0; 4540 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; 4541 rc = efx_ef10_filter_push(efx, &new_spec, 4542 &table->entry[filter_idx].handle, 4543 true); 4544 4545 spin_lock_bh(&efx->filter_lock); 4546 if (rc == 0) 4547 *spec = new_spec; 4548 } else { 4549 /* Really remove the filter */ 4550 4551 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4552 efx_ef10_filter_is_exclusive(spec) ? 4553 MC_CMD_FILTER_OP_IN_OP_REMOVE : 4554 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 4555 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4556 table->entry[filter_idx].handle); 4557 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, 4558 inbuf, sizeof(inbuf), NULL, 0, NULL); 4559 4560 spin_lock_bh(&efx->filter_lock); 4561 if ((rc == 0) || (rc == -ENOENT)) { 4562 /* Filter removed OK or didn't actually exist */ 4563 kfree(spec); 4564 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4565 } else { 4566 efx_mcdi_display_error(efx, MC_CMD_FILTER_OP, 4567 MC_CMD_FILTER_OP_EXT_IN_LEN, 4568 NULL, 0, rc); 4569 } 4570 } 4571 4572 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; 4573 wake_up_all(&table->waitq); 4574 out_unlock: 4575 spin_unlock_bh(&efx->filter_lock); 4576 finish_wait(&table->waitq, &wait); 4577 return rc; 4578 } 4579 4580 static int efx_ef10_filter_remove_safe(struct efx_nic *efx, 4581 enum efx_filter_priority priority, 4582 u32 filter_id) 4583 { 4584 return efx_ef10_filter_remove_internal(efx, 1U << priority, 4585 filter_id, false); 4586 } 4587 4588 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, 4589 enum efx_filter_priority priority, 4590 u32 filter_id) 4591 { 4592 if (filter_id == EFX_EF10_FILTER_ID_INVALID) 4593 return; 4594 efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true); 4595 } 4596 4597 static int efx_ef10_filter_get_safe(struct efx_nic *efx, 4598 enum efx_filter_priority priority, 4599 u32 filter_id, struct efx_filter_spec *spec) 4600 { 4601 unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id); 4602 struct efx_ef10_filter_table *table = efx->filter_state; 4603 const struct efx_filter_spec *saved_spec; 4604 int rc; 4605 4606 spin_lock_bh(&efx->filter_lock); 4607 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); 4608 if (saved_spec && saved_spec->priority == priority && 4609 efx_ef10_filter_pri(table, saved_spec) == 4610 efx_ef10_filter_get_unsafe_pri(filter_id)) { 4611 *spec = *saved_spec; 4612 rc = 0; 4613 } else { 4614 rc = -ENOENT; 4615 } 4616 spin_unlock_bh(&efx->filter_lock); 4617 return rc; 4618 } 4619 4620 static int efx_ef10_filter_clear_rx(struct efx_nic *efx, 4621 enum efx_filter_priority priority) 4622 { 4623 unsigned int priority_mask; 4624 unsigned int i; 4625 int rc; 4626 4627 priority_mask = (((1U << (priority + 1)) - 1) & 4628 ~(1U << EFX_FILTER_PRI_AUTO)); 4629 4630 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 4631 rc = efx_ef10_filter_remove_internal(efx, priority_mask, 4632 i, true); 4633 if (rc && rc != -ENOENT) 4634 return rc; 4635 } 4636 4637 return 0; 4638 } 4639 4640 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx, 4641 enum efx_filter_priority priority) 4642 { 4643 struct efx_ef10_filter_table *table = efx->filter_state; 4644 unsigned int filter_idx; 4645 s32 count = 0; 4646 4647 spin_lock_bh(&efx->filter_lock); 4648 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 4649 if (table->entry[filter_idx].spec && 4650 efx_ef10_filter_entry_spec(table, filter_idx)->priority == 4651 priority) 4652 ++count; 4653 } 4654 spin_unlock_bh(&efx->filter_lock); 4655 return count; 4656 } 4657 4658 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx) 4659 { 4660 struct efx_ef10_filter_table *table = efx->filter_state; 4661 4662 return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2; 4663 } 4664 4665 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, 4666 enum efx_filter_priority priority, 4667 u32 *buf, u32 size) 4668 { 4669 struct efx_ef10_filter_table *table = efx->filter_state; 4670 struct efx_filter_spec *spec; 4671 unsigned int filter_idx; 4672 s32 count = 0; 4673 4674 spin_lock_bh(&efx->filter_lock); 4675 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 4676 spec = efx_ef10_filter_entry_spec(table, filter_idx); 4677 if (spec && spec->priority == priority) { 4678 if (count == size) { 4679 count = -EMSGSIZE; 4680 break; 4681 } 4682 buf[count++] = 4683 efx_ef10_make_filter_id( 4684 efx_ef10_filter_pri(table, spec), 4685 filter_idx); 4686 } 4687 } 4688 spin_unlock_bh(&efx->filter_lock); 4689 return count; 4690 } 4691 4692 #ifdef CONFIG_RFS_ACCEL 4693 4694 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete; 4695 4696 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx, 4697 struct efx_filter_spec *spec) 4698 { 4699 struct efx_ef10_filter_table *table = efx->filter_state; 4700 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 4701 struct efx_filter_spec *saved_spec; 4702 unsigned int hash, i, depth = 1; 4703 bool replacing = false; 4704 int ins_index = -1; 4705 u64 cookie; 4706 s32 rc; 4707 4708 /* Must be an RX filter without RSS and not for a multicast 4709 * destination address (RFS only works for connected sockets). 4710 * These restrictions allow us to pass only a tiny amount of 4711 * data through to the completion function. 4712 */ 4713 EFX_WARN_ON_PARANOID(spec->flags != 4714 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER)); 4715 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT); 4716 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec)); 4717 4718 hash = efx_ef10_filter_hash(spec); 4719 4720 spin_lock_bh(&efx->filter_lock); 4721 4722 /* Find any existing filter with the same match tuple or else 4723 * a free slot to insert at. If an existing filter is busy, 4724 * we have to give up. 4725 */ 4726 for (;;) { 4727 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1); 4728 saved_spec = efx_ef10_filter_entry_spec(table, i); 4729 4730 if (!saved_spec) { 4731 if (ins_index < 0) 4732 ins_index = i; 4733 } else if (efx_ef10_filter_equal(spec, saved_spec)) { 4734 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) { 4735 rc = -EBUSY; 4736 goto fail_unlock; 4737 } 4738 if (spec->priority < saved_spec->priority) { 4739 rc = -EPERM; 4740 goto fail_unlock; 4741 } 4742 ins_index = i; 4743 break; 4744 } 4745 4746 /* Once we reach the maximum search depth, use the 4747 * first suitable slot or return -EBUSY if there was 4748 * none 4749 */ 4750 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) { 4751 if (ins_index < 0) { 4752 rc = -EBUSY; 4753 goto fail_unlock; 4754 } 4755 break; 4756 } 4757 4758 ++depth; 4759 } 4760 4761 /* Create a software table entry if necessary, and mark it 4762 * busy. We might yet fail to insert, but any attempt to 4763 * insert a conflicting filter while we're waiting for the 4764 * firmware must find the busy entry. 4765 */ 4766 saved_spec = efx_ef10_filter_entry_spec(table, ins_index); 4767 if (saved_spec) { 4768 replacing = true; 4769 } else { 4770 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC); 4771 if (!saved_spec) { 4772 rc = -ENOMEM; 4773 goto fail_unlock; 4774 } 4775 *saved_spec = *spec; 4776 } 4777 efx_ef10_filter_set_entry(table, ins_index, saved_spec, 4778 EFX_EF10_FILTER_FLAG_BUSY); 4779 4780 spin_unlock_bh(&efx->filter_lock); 4781 4782 /* Pack up the variables needed on completion */ 4783 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id; 4784 4785 efx_ef10_filter_push_prep(efx, spec, inbuf, 4786 table->entry[ins_index].handle, replacing); 4787 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 4788 MC_CMD_FILTER_OP_OUT_LEN, 4789 efx_ef10_filter_rfs_insert_complete, cookie); 4790 4791 return ins_index; 4792 4793 fail_unlock: 4794 spin_unlock_bh(&efx->filter_lock); 4795 return rc; 4796 } 4797 4798 static void 4799 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie, 4800 int rc, efx_dword_t *outbuf, 4801 size_t outlen_actual) 4802 { 4803 struct efx_ef10_filter_table *table = efx->filter_state; 4804 unsigned int ins_index, dmaq_id; 4805 struct efx_filter_spec *spec; 4806 bool replacing; 4807 4808 /* Unpack the cookie */ 4809 replacing = cookie >> 31; 4810 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1); 4811 dmaq_id = cookie & 0xffff; 4812 4813 spin_lock_bh(&efx->filter_lock); 4814 spec = efx_ef10_filter_entry_spec(table, ins_index); 4815 if (rc == 0) { 4816 table->entry[ins_index].handle = 4817 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE); 4818 if (replacing) 4819 spec->dmaq_id = dmaq_id; 4820 } else if (!replacing) { 4821 kfree(spec); 4822 spec = NULL; 4823 } 4824 efx_ef10_filter_set_entry(table, ins_index, spec, 0); 4825 spin_unlock_bh(&efx->filter_lock); 4826 4827 wake_up_all(&table->waitq); 4828 } 4829 4830 static void 4831 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, 4832 unsigned long filter_idx, 4833 int rc, efx_dword_t *outbuf, 4834 size_t outlen_actual); 4835 4836 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 4837 unsigned int filter_idx) 4838 { 4839 struct efx_ef10_filter_table *table = efx->filter_state; 4840 struct efx_filter_spec *spec = 4841 efx_ef10_filter_entry_spec(table, filter_idx); 4842 MCDI_DECLARE_BUF(inbuf, 4843 MC_CMD_FILTER_OP_IN_HANDLE_OFST + 4844 MC_CMD_FILTER_OP_IN_HANDLE_LEN); 4845 4846 if (!spec || 4847 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) || 4848 spec->priority != EFX_FILTER_PRI_HINT || 4849 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id, 4850 flow_id, filter_idx)) 4851 return false; 4852 4853 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 4854 MC_CMD_FILTER_OP_IN_OP_REMOVE); 4855 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 4856 table->entry[filter_idx].handle); 4857 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0, 4858 efx_ef10_filter_rfs_expire_complete, filter_idx)) 4859 return false; 4860 4861 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 4862 return true; 4863 } 4864 4865 static void 4866 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx, 4867 unsigned long filter_idx, 4868 int rc, efx_dword_t *outbuf, 4869 size_t outlen_actual) 4870 { 4871 struct efx_ef10_filter_table *table = efx->filter_state; 4872 struct efx_filter_spec *spec = 4873 efx_ef10_filter_entry_spec(table, filter_idx); 4874 4875 spin_lock_bh(&efx->filter_lock); 4876 if (rc == 0) { 4877 kfree(spec); 4878 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 4879 } 4880 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY; 4881 wake_up_all(&table->waitq); 4882 spin_unlock_bh(&efx->filter_lock); 4883 } 4884 4885 #endif /* CONFIG_RFS_ACCEL */ 4886 4887 static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags) 4888 { 4889 int match_flags = 0; 4890 4891 #define MAP_FLAG(gen_flag, mcdi_field) do { \ 4892 u32 old_mcdi_flags = mcdi_flags; \ 4893 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \ 4894 mcdi_field ## _LBN); \ 4895 if (mcdi_flags != old_mcdi_flags) \ 4896 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \ 4897 } while (0) 4898 4899 if (encap) { 4900 /* encap filters must specify encap type */ 4901 match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE; 4902 /* and imply ethertype and ip proto */ 4903 mcdi_flags &= 4904 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN); 4905 mcdi_flags &= 4906 ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN); 4907 /* VLAN tags refer to the outer packet */ 4908 MAP_FLAG(INNER_VID, INNER_VLAN); 4909 MAP_FLAG(OUTER_VID, OUTER_VLAN); 4910 /* everything else refers to the inner packet */ 4911 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST); 4912 MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST); 4913 MAP_FLAG(REM_HOST, IFRM_SRC_IP); 4914 MAP_FLAG(LOC_HOST, IFRM_DST_IP); 4915 MAP_FLAG(REM_MAC, IFRM_SRC_MAC); 4916 MAP_FLAG(REM_PORT, IFRM_SRC_PORT); 4917 MAP_FLAG(LOC_MAC, IFRM_DST_MAC); 4918 MAP_FLAG(LOC_PORT, IFRM_DST_PORT); 4919 MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE); 4920 MAP_FLAG(IP_PROTO, IFRM_IP_PROTO); 4921 } else { 4922 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST); 4923 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST); 4924 MAP_FLAG(REM_HOST, SRC_IP); 4925 MAP_FLAG(LOC_HOST, DST_IP); 4926 MAP_FLAG(REM_MAC, SRC_MAC); 4927 MAP_FLAG(REM_PORT, SRC_PORT); 4928 MAP_FLAG(LOC_MAC, DST_MAC); 4929 MAP_FLAG(LOC_PORT, DST_PORT); 4930 MAP_FLAG(ETHER_TYPE, ETHER_TYPE); 4931 MAP_FLAG(INNER_VID, INNER_VLAN); 4932 MAP_FLAG(OUTER_VID, OUTER_VLAN); 4933 MAP_FLAG(IP_PROTO, IP_PROTO); 4934 } 4935 #undef MAP_FLAG 4936 4937 /* Did we map them all? */ 4938 if (mcdi_flags) 4939 return -EINVAL; 4940 4941 return match_flags; 4942 } 4943 4944 static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) 4945 { 4946 struct efx_ef10_filter_table *table = efx->filter_state; 4947 struct efx_ef10_filter_vlan *vlan, *next_vlan; 4948 4949 /* See comment in efx_ef10_filter_table_remove() */ 4950 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 4951 return; 4952 4953 if (!table) 4954 return; 4955 4956 list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) 4957 efx_ef10_filter_del_vlan_internal(efx, vlan); 4958 } 4959 4960 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, 4961 bool encap, 4962 enum efx_filter_match_flags match_flags) 4963 { 4964 unsigned int match_pri; 4965 int mf; 4966 4967 for (match_pri = 0; 4968 match_pri < table->rx_match_count; 4969 match_pri++) { 4970 mf = efx_ef10_filter_match_flags_from_mcdi(encap, 4971 table->rx_match_mcdi_flags[match_pri]); 4972 if (mf == match_flags) 4973 return true; 4974 } 4975 4976 return false; 4977 } 4978 4979 static int 4980 efx_ef10_filter_table_probe_matches(struct efx_nic *efx, 4981 struct efx_ef10_filter_table *table, 4982 bool encap) 4983 { 4984 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); 4985 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); 4986 unsigned int pd_match_pri, pd_match_count; 4987 size_t outlen; 4988 int rc; 4989 4990 /* Find out which RX filter types are supported, and their priorities */ 4991 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP, 4992 encap ? 4993 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES : 4994 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES); 4995 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO, 4996 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf), 4997 &outlen); 4998 if (rc) 4999 return rc; 5000 5001 pd_match_count = MCDI_VAR_ARRAY_LEN( 5002 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES); 5003 5004 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) { 5005 u32 mcdi_flags = 5006 MCDI_ARRAY_DWORD( 5007 outbuf, 5008 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES, 5009 pd_match_pri); 5010 rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags); 5011 if (rc < 0) { 5012 netif_dbg(efx, probe, efx->net_dev, 5013 "%s: fw flags %#x pri %u not supported in driver\n", 5014 __func__, mcdi_flags, pd_match_pri); 5015 } else { 5016 netif_dbg(efx, probe, efx->net_dev, 5017 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", 5018 __func__, mcdi_flags, pd_match_pri, 5019 rc, table->rx_match_count); 5020 table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; 5021 table->rx_match_count++; 5022 } 5023 } 5024 5025 return 0; 5026 } 5027 5028 static int efx_ef10_filter_table_probe(struct efx_nic *efx) 5029 { 5030 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5031 struct net_device *net_dev = efx->net_dev; 5032 struct efx_ef10_filter_table *table; 5033 struct efx_ef10_vlan *vlan; 5034 int rc; 5035 5036 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5037 return -EINVAL; 5038 5039 if (efx->filter_state) /* already probed */ 5040 return 0; 5041 5042 table = kzalloc(sizeof(*table), GFP_KERNEL); 5043 if (!table) 5044 return -ENOMEM; 5045 5046 table->rx_match_count = 0; 5047 rc = efx_ef10_filter_table_probe_matches(efx, table, false); 5048 if (rc) 5049 goto fail; 5050 if (nic_data->datapath_caps & 5051 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 5052 rc = efx_ef10_filter_table_probe_matches(efx, table, true); 5053 if (rc) 5054 goto fail; 5055 if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && 5056 !(efx_ef10_filter_match_supported(table, false, 5057 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && 5058 efx_ef10_filter_match_supported(table, false, 5059 (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { 5060 netif_info(efx, probe, net_dev, 5061 "VLAN filters are not supported in this firmware variant\n"); 5062 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 5063 efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 5064 net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 5065 } 5066 5067 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); 5068 if (!table->entry) { 5069 rc = -ENOMEM; 5070 goto fail; 5071 } 5072 5073 table->mc_promisc_last = false; 5074 table->vlan_filter = 5075 !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 5076 INIT_LIST_HEAD(&table->vlan_list); 5077 5078 efx->filter_state = table; 5079 init_waitqueue_head(&table->waitq); 5080 5081 list_for_each_entry(vlan, &nic_data->vlan_list, list) { 5082 rc = efx_ef10_filter_add_vlan(efx, vlan->vid); 5083 if (rc) 5084 goto fail_add_vlan; 5085 } 5086 5087 return 0; 5088 5089 fail_add_vlan: 5090 efx_ef10_filter_cleanup_vlans(efx); 5091 efx->filter_state = NULL; 5092 fail: 5093 kfree(table); 5094 return rc; 5095 } 5096 5097 /* Caller must hold efx->filter_sem for read if race against 5098 * efx_ef10_filter_table_remove() is possible 5099 */ 5100 static void efx_ef10_filter_table_restore(struct efx_nic *efx) 5101 { 5102 struct efx_ef10_filter_table *table = efx->filter_state; 5103 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5104 unsigned int invalid_filters = 0, failed = 0; 5105 struct efx_ef10_filter_vlan *vlan; 5106 struct efx_filter_spec *spec; 5107 unsigned int filter_idx; 5108 u32 mcdi_flags; 5109 int match_pri; 5110 int rc, i; 5111 5112 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 5113 5114 if (!nic_data->must_restore_filters) 5115 return; 5116 5117 if (!table) 5118 return; 5119 5120 spin_lock_bh(&efx->filter_lock); 5121 5122 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 5123 spec = efx_ef10_filter_entry_spec(table, filter_idx); 5124 if (!spec) 5125 continue; 5126 5127 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); 5128 match_pri = 0; 5129 while (match_pri < table->rx_match_count && 5130 table->rx_match_mcdi_flags[match_pri] != mcdi_flags) 5131 ++match_pri; 5132 if (match_pri >= table->rx_match_count) { 5133 invalid_filters++; 5134 goto not_restored; 5135 } 5136 if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT && 5137 spec->rss_context != nic_data->rx_rss_context) 5138 netif_warn(efx, drv, efx->net_dev, 5139 "Warning: unable to restore a filter with specific RSS context.\n"); 5140 5141 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY; 5142 spin_unlock_bh(&efx->filter_lock); 5143 5144 rc = efx_ef10_filter_push(efx, spec, 5145 &table->entry[filter_idx].handle, 5146 false); 5147 if (rc) 5148 failed++; 5149 spin_lock_bh(&efx->filter_lock); 5150 5151 if (rc) { 5152 not_restored: 5153 list_for_each_entry(vlan, &table->vlan_list, list) 5154 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i) 5155 if (vlan->default_filters[i] == filter_idx) 5156 vlan->default_filters[i] = 5157 EFX_EF10_FILTER_ID_INVALID; 5158 5159 kfree(spec); 5160 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0); 5161 } else { 5162 table->entry[filter_idx].spec &= 5163 ~EFX_EF10_FILTER_FLAG_BUSY; 5164 } 5165 } 5166 5167 spin_unlock_bh(&efx->filter_lock); 5168 5169 /* This can happen validly if the MC's capabilities have changed, so 5170 * is not an error. 5171 */ 5172 if (invalid_filters) 5173 netif_dbg(efx, drv, efx->net_dev, 5174 "Did not restore %u filters that are now unsupported.\n", 5175 invalid_filters); 5176 5177 if (failed) 5178 netif_err(efx, hw, efx->net_dev, 5179 "unable to restore %u filters\n", failed); 5180 else 5181 nic_data->must_restore_filters = false; 5182 } 5183 5184 static void efx_ef10_filter_table_remove(struct efx_nic *efx) 5185 { 5186 struct efx_ef10_filter_table *table = efx->filter_state; 5187 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); 5188 struct efx_filter_spec *spec; 5189 unsigned int filter_idx; 5190 int rc; 5191 5192 efx_ef10_filter_cleanup_vlans(efx); 5193 efx->filter_state = NULL; 5194 /* If we were called without locking, then it's not safe to free 5195 * the table as others might be using it. So we just WARN, leak 5196 * the memory, and potentially get an inconsistent filter table 5197 * state. 5198 * This should never actually happen. 5199 */ 5200 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5201 return; 5202 5203 if (!table) 5204 return; 5205 5206 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) { 5207 spec = efx_ef10_filter_entry_spec(table, filter_idx); 5208 if (!spec) 5209 continue; 5210 5211 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, 5212 efx_ef10_filter_is_exclusive(spec) ? 5213 MC_CMD_FILTER_OP_IN_OP_REMOVE : 5214 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE); 5215 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, 5216 table->entry[filter_idx].handle); 5217 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP, inbuf, 5218 sizeof(inbuf), NULL, 0, NULL); 5219 if (rc) 5220 netif_info(efx, drv, efx->net_dev, 5221 "%s: filter %04x remove failed\n", 5222 __func__, filter_idx); 5223 kfree(spec); 5224 } 5225 5226 vfree(table->entry); 5227 kfree(table); 5228 } 5229 5230 static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) 5231 { 5232 struct efx_ef10_filter_table *table = efx->filter_state; 5233 unsigned int filter_idx; 5234 5235 if (*id != EFX_EF10_FILTER_ID_INVALID) { 5236 filter_idx = efx_ef10_filter_get_unsafe_id(*id); 5237 if (!table->entry[filter_idx].spec) 5238 netif_dbg(efx, drv, efx->net_dev, 5239 "marked null spec old %04x:%04x\n", *id, 5240 filter_idx); 5241 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; 5242 *id = EFX_EF10_FILTER_ID_INVALID; 5243 } 5244 } 5245 5246 /* Mark old per-VLAN filters that may need to be removed */ 5247 static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, 5248 struct efx_ef10_filter_vlan *vlan) 5249 { 5250 struct efx_ef10_filter_table *table = efx->filter_state; 5251 unsigned int i; 5252 5253 for (i = 0; i < table->dev_uc_count; i++) 5254 efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); 5255 for (i = 0; i < table->dev_mc_count; i++) 5256 efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); 5257 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5258 efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]); 5259 } 5260 5261 /* Mark old filters that may need to be removed. 5262 * Caller must hold efx->filter_sem for read if race against 5263 * efx_ef10_filter_table_remove() is possible 5264 */ 5265 static void efx_ef10_filter_mark_old(struct efx_nic *efx) 5266 { 5267 struct efx_ef10_filter_table *table = efx->filter_state; 5268 struct efx_ef10_filter_vlan *vlan; 5269 5270 spin_lock_bh(&efx->filter_lock); 5271 list_for_each_entry(vlan, &table->vlan_list, list) 5272 _efx_ef10_filter_vlan_mark_old(efx, vlan); 5273 spin_unlock_bh(&efx->filter_lock); 5274 } 5275 5276 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) 5277 { 5278 struct efx_ef10_filter_table *table = efx->filter_state; 5279 struct net_device *net_dev = efx->net_dev; 5280 struct netdev_hw_addr *uc; 5281 unsigned int i; 5282 5283 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); 5284 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 5285 i = 1; 5286 netdev_for_each_uc_addr(uc, net_dev) { 5287 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { 5288 table->uc_promisc = true; 5289 break; 5290 } 5291 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 5292 i++; 5293 } 5294 5295 table->dev_uc_count = i; 5296 } 5297 5298 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) 5299 { 5300 struct efx_ef10_filter_table *table = efx->filter_state; 5301 struct net_device *net_dev = efx->net_dev; 5302 struct netdev_hw_addr *mc; 5303 unsigned int i; 5304 5305 table->mc_overflow = false; 5306 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); 5307 5308 i = 0; 5309 netdev_for_each_mc_addr(mc, net_dev) { 5310 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { 5311 table->mc_promisc = true; 5312 table->mc_overflow = true; 5313 break; 5314 } 5315 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); 5316 i++; 5317 } 5318 5319 table->dev_mc_count = i; 5320 } 5321 5322 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, 5323 struct efx_ef10_filter_vlan *vlan, 5324 bool multicast, bool rollback) 5325 { 5326 struct efx_ef10_filter_table *table = efx->filter_state; 5327 struct efx_ef10_dev_addr *addr_list; 5328 enum efx_filter_flags filter_flags; 5329 struct efx_filter_spec spec; 5330 u8 baddr[ETH_ALEN]; 5331 unsigned int i, j; 5332 int addr_count; 5333 u16 *ids; 5334 int rc; 5335 5336 if (multicast) { 5337 addr_list = table->dev_mc_list; 5338 addr_count = table->dev_mc_count; 5339 ids = vlan->mc; 5340 } else { 5341 addr_list = table->dev_uc_list; 5342 addr_count = table->dev_uc_count; 5343 ids = vlan->uc; 5344 } 5345 5346 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5347 5348 /* Insert/renew filters */ 5349 for (i = 0; i < addr_count; i++) { 5350 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 5351 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5352 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5353 rc = efx_ef10_filter_insert(efx, &spec, true); 5354 if (rc < 0) { 5355 if (rollback) { 5356 netif_info(efx, drv, efx->net_dev, 5357 "efx_ef10_filter_insert failed rc=%d\n", 5358 rc); 5359 /* Fall back to promiscuous */ 5360 for (j = 0; j < i; j++) { 5361 efx_ef10_filter_remove_unsafe( 5362 efx, EFX_FILTER_PRI_AUTO, 5363 ids[j]); 5364 ids[j] = EFX_EF10_FILTER_ID_INVALID; 5365 } 5366 return rc; 5367 } else { 5368 /* keep invalid ID, and carry on */ 5369 } 5370 } else { 5371 ids[i] = efx_ef10_filter_get_unsafe_id(rc); 5372 } 5373 } 5374 5375 if (multicast && rollback) { 5376 /* Also need an Ethernet broadcast filter */ 5377 EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] != 5378 EFX_EF10_FILTER_ID_INVALID); 5379 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5380 eth_broadcast_addr(baddr); 5381 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5382 rc = efx_ef10_filter_insert(efx, &spec, true); 5383 if (rc < 0) { 5384 netif_warn(efx, drv, efx->net_dev, 5385 "Broadcast filter insert failed rc=%d\n", rc); 5386 /* Fall back to promiscuous */ 5387 for (j = 0; j < i; j++) { 5388 efx_ef10_filter_remove_unsafe( 5389 efx, EFX_FILTER_PRI_AUTO, 5390 ids[j]); 5391 ids[j] = EFX_EF10_FILTER_ID_INVALID; 5392 } 5393 return rc; 5394 } else { 5395 vlan->default_filters[EFX_EF10_BCAST] = 5396 efx_ef10_filter_get_unsafe_id(rc); 5397 } 5398 } 5399 5400 return 0; 5401 } 5402 5403 static int efx_ef10_filter_insert_def(struct efx_nic *efx, 5404 struct efx_ef10_filter_vlan *vlan, 5405 enum efx_encap_type encap_type, 5406 bool multicast, bool rollback) 5407 { 5408 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5409 enum efx_filter_flags filter_flags; 5410 struct efx_filter_spec spec; 5411 u8 baddr[ETH_ALEN]; 5412 int rc; 5413 u16 *id; 5414 5415 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; 5416 5417 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5418 5419 if (multicast) 5420 efx_filter_set_mc_def(&spec); 5421 else 5422 efx_filter_set_uc_def(&spec); 5423 5424 if (encap_type) { 5425 if (nic_data->datapath_caps & 5426 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)) 5427 efx_filter_set_encap_type(&spec, encap_type); 5428 else 5429 /* don't insert encap filters on non-supporting 5430 * platforms. ID will be left as INVALID. 5431 */ 5432 return 0; 5433 } 5434 5435 if (vlan->vid != EFX_FILTER_VID_UNSPEC) 5436 efx_filter_set_eth_local(&spec, vlan->vid, NULL); 5437 5438 rc = efx_ef10_filter_insert(efx, &spec, true); 5439 if (rc < 0) { 5440 const char *um = multicast ? "Multicast" : "Unicast"; 5441 const char *encap_name = ""; 5442 const char *encap_ipv = ""; 5443 5444 if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5445 EFX_ENCAP_TYPE_VXLAN) 5446 encap_name = "VXLAN "; 5447 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5448 EFX_ENCAP_TYPE_NVGRE) 5449 encap_name = "NVGRE "; 5450 else if ((encap_type & EFX_ENCAP_TYPES_MASK) == 5451 EFX_ENCAP_TYPE_GENEVE) 5452 encap_name = "GENEVE "; 5453 if (encap_type & EFX_ENCAP_FLAG_IPV6) 5454 encap_ipv = "IPv6 "; 5455 else if (encap_type) 5456 encap_ipv = "IPv4 "; 5457 5458 /* unprivileged functions can't insert mismatch filters 5459 * for encapsulated or unicast traffic, so downgrade 5460 * those warnings to debug. 5461 */ 5462 netif_cond_dbg(efx, drv, efx->net_dev, 5463 rc == -EPERM && (encap_type || !multicast), warn, 5464 "%s%s%s mismatch filter insert failed rc=%d\n", 5465 encap_name, encap_ipv, um, rc); 5466 } else if (multicast) { 5467 /* mapping from encap types to default filter IDs (multicast) */ 5468 static enum efx_ef10_default_filters map[] = { 5469 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF, 5470 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF, 5471 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF, 5472 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF, 5473 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 5474 EFX_EF10_VXLAN6_MCDEF, 5475 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 5476 EFX_EF10_NVGRE6_MCDEF, 5477 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 5478 EFX_EF10_GENEVE6_MCDEF, 5479 }; 5480 5481 /* quick bounds check (BCAST result impossible) */ 5482 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 5483 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 5484 WARN_ON(1); 5485 return -EINVAL; 5486 } 5487 /* then follow map */ 5488 id = &vlan->default_filters[map[encap_type]]; 5489 5490 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 5491 *id = efx_ef10_filter_get_unsafe_id(rc); 5492 if (!nic_data->workaround_26807 && !encap_type) { 5493 /* Also need an Ethernet broadcast filter */ 5494 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, 5495 filter_flags, 0); 5496 eth_broadcast_addr(baddr); 5497 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5498 rc = efx_ef10_filter_insert(efx, &spec, true); 5499 if (rc < 0) { 5500 netif_warn(efx, drv, efx->net_dev, 5501 "Broadcast filter insert failed rc=%d\n", 5502 rc); 5503 if (rollback) { 5504 /* Roll back the mc_def filter */ 5505 efx_ef10_filter_remove_unsafe( 5506 efx, EFX_FILTER_PRI_AUTO, 5507 *id); 5508 *id = EFX_EF10_FILTER_ID_INVALID; 5509 return rc; 5510 } 5511 } else { 5512 EFX_WARN_ON_PARANOID( 5513 vlan->default_filters[EFX_EF10_BCAST] != 5514 EFX_EF10_FILTER_ID_INVALID); 5515 vlan->default_filters[EFX_EF10_BCAST] = 5516 efx_ef10_filter_get_unsafe_id(rc); 5517 } 5518 } 5519 rc = 0; 5520 } else { 5521 /* mapping from encap types to default filter IDs (unicast) */ 5522 static enum efx_ef10_default_filters map[] = { 5523 [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF, 5524 [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF, 5525 [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF, 5526 [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF, 5527 [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] = 5528 EFX_EF10_VXLAN6_UCDEF, 5529 [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] = 5530 EFX_EF10_NVGRE6_UCDEF, 5531 [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] = 5532 EFX_EF10_GENEVE6_UCDEF, 5533 }; 5534 5535 /* quick bounds check (BCAST result impossible) */ 5536 BUILD_BUG_ON(EFX_EF10_BCAST != 0); 5537 if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { 5538 WARN_ON(1); 5539 return -EINVAL; 5540 } 5541 /* then follow map */ 5542 id = &vlan->default_filters[map[encap_type]]; 5543 EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID); 5544 *id = rc; 5545 rc = 0; 5546 } 5547 return rc; 5548 } 5549 5550 /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD 5551 * flag or removes these filters, we don't need to hold the filter_lock while 5552 * scanning for these filters. 5553 */ 5554 static void efx_ef10_filter_remove_old(struct efx_nic *efx) 5555 { 5556 struct efx_ef10_filter_table *table = efx->filter_state; 5557 int remove_failed = 0; 5558 int remove_noent = 0; 5559 int rc; 5560 int i; 5561 5562 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { 5563 if (READ_ONCE(table->entry[i].spec) & 5564 EFX_EF10_FILTER_FLAG_AUTO_OLD) { 5565 rc = efx_ef10_filter_remove_internal(efx, 5566 1U << EFX_FILTER_PRI_AUTO, i, true); 5567 if (rc == -ENOENT) 5568 remove_noent++; 5569 else if (rc) 5570 remove_failed++; 5571 } 5572 } 5573 5574 if (remove_failed) 5575 netif_info(efx, drv, efx->net_dev, 5576 "%s: failed to remove %d filters\n", 5577 __func__, remove_failed); 5578 if (remove_noent) 5579 netif_info(efx, drv, efx->net_dev, 5580 "%s: failed to remove %d non-existent filters\n", 5581 __func__, remove_noent); 5582 } 5583 5584 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx) 5585 { 5586 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5587 u8 mac_old[ETH_ALEN]; 5588 int rc, rc2; 5589 5590 /* Only reconfigure a PF-created vport */ 5591 if (is_zero_ether_addr(nic_data->vport_mac)) 5592 return 0; 5593 5594 efx_device_detach_sync(efx); 5595 efx_net_stop(efx->net_dev); 5596 down_write(&efx->filter_sem); 5597 efx_ef10_filter_table_remove(efx); 5598 up_write(&efx->filter_sem); 5599 5600 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id); 5601 if (rc) 5602 goto restore_filters; 5603 5604 ether_addr_copy(mac_old, nic_data->vport_mac); 5605 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id, 5606 nic_data->vport_mac); 5607 if (rc) 5608 goto restore_vadaptor; 5609 5610 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id, 5611 efx->net_dev->dev_addr); 5612 if (!rc) { 5613 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr); 5614 } else { 5615 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old); 5616 if (rc2) { 5617 /* Failed to add original MAC, so clear vport_mac */ 5618 eth_zero_addr(nic_data->vport_mac); 5619 goto reset_nic; 5620 } 5621 } 5622 5623 restore_vadaptor: 5624 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); 5625 if (rc2) 5626 goto reset_nic; 5627 restore_filters: 5628 down_write(&efx->filter_sem); 5629 rc2 = efx_ef10_filter_table_probe(efx); 5630 up_write(&efx->filter_sem); 5631 if (rc2) 5632 goto reset_nic; 5633 5634 rc2 = efx_net_open(efx->net_dev); 5635 if (rc2) 5636 goto reset_nic; 5637 5638 efx_device_attach_if_not_resetting(efx); 5639 5640 return rc; 5641 5642 reset_nic: 5643 netif_err(efx, drv, efx->net_dev, 5644 "Failed to restore when changing MAC address - scheduling reset\n"); 5645 efx_schedule_reset(efx, RESET_TYPE_DATAPATH); 5646 5647 return rc ? rc : rc2; 5648 } 5649 5650 /* Caller must hold efx->filter_sem for read if race against 5651 * efx_ef10_filter_table_remove() is possible 5652 */ 5653 static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, 5654 struct efx_ef10_filter_vlan *vlan) 5655 { 5656 struct efx_ef10_filter_table *table = efx->filter_state; 5657 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5658 5659 /* Do not install unspecified VID if VLAN filtering is enabled. 5660 * Do not install all specified VIDs if VLAN filtering is disabled. 5661 */ 5662 if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) 5663 return; 5664 5665 /* Insert/renew unicast filters */ 5666 if (table->uc_promisc) { 5667 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE, 5668 false, false); 5669 efx_ef10_filter_insert_addr_list(efx, vlan, false, false); 5670 } else { 5671 /* If any of the filters failed to insert, fall back to 5672 * promiscuous mode - add in the uc_def filter. But keep 5673 * our individual unicast filters. 5674 */ 5675 if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) 5676 efx_ef10_filter_insert_def(efx, vlan, 5677 EFX_ENCAP_TYPE_NONE, 5678 false, false); 5679 } 5680 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 5681 false, false); 5682 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 5683 EFX_ENCAP_FLAG_IPV6, 5684 false, false); 5685 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 5686 false, false); 5687 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 5688 EFX_ENCAP_FLAG_IPV6, 5689 false, false); 5690 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 5691 false, false); 5692 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 5693 EFX_ENCAP_FLAG_IPV6, 5694 false, false); 5695 5696 /* Insert/renew multicast filters */ 5697 /* If changing promiscuous state with cascaded multicast filters, remove 5698 * old filters first, so that packets are dropped rather than duplicated 5699 */ 5700 if (nic_data->workaround_26807 && 5701 table->mc_promisc_last != table->mc_promisc) 5702 efx_ef10_filter_remove_old(efx); 5703 if (table->mc_promisc) { 5704 if (nic_data->workaround_26807) { 5705 /* If we failed to insert promiscuous filters, rollback 5706 * and fall back to individual multicast filters 5707 */ 5708 if (efx_ef10_filter_insert_def(efx, vlan, 5709 EFX_ENCAP_TYPE_NONE, 5710 true, true)) { 5711 /* Changing promisc state, so remove old filters */ 5712 efx_ef10_filter_remove_old(efx); 5713 efx_ef10_filter_insert_addr_list(efx, vlan, 5714 true, false); 5715 } 5716 } else { 5717 /* If we failed to insert promiscuous filters, don't 5718 * rollback. Regardless, also insert the mc_list, 5719 * unless it's incomplete due to overflow 5720 */ 5721 efx_ef10_filter_insert_def(efx, vlan, 5722 EFX_ENCAP_TYPE_NONE, 5723 true, false); 5724 if (!table->mc_overflow) 5725 efx_ef10_filter_insert_addr_list(efx, vlan, 5726 true, false); 5727 } 5728 } else { 5729 /* If any filters failed to insert, rollback and fall back to 5730 * promiscuous mode - mc_def filter and maybe broadcast. If 5731 * that fails, roll back again and insert as many of our 5732 * individual multicast filters as we can. 5733 */ 5734 if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) { 5735 /* Changing promisc state, so remove old filters */ 5736 if (nic_data->workaround_26807) 5737 efx_ef10_filter_remove_old(efx); 5738 if (efx_ef10_filter_insert_def(efx, vlan, 5739 EFX_ENCAP_TYPE_NONE, 5740 true, true)) 5741 efx_ef10_filter_insert_addr_list(efx, vlan, 5742 true, false); 5743 } 5744 } 5745 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN, 5746 true, false); 5747 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN | 5748 EFX_ENCAP_FLAG_IPV6, 5749 true, false); 5750 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE, 5751 true, false); 5752 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE | 5753 EFX_ENCAP_FLAG_IPV6, 5754 true, false); 5755 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE, 5756 true, false); 5757 efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE | 5758 EFX_ENCAP_FLAG_IPV6, 5759 true, false); 5760 } 5761 5762 /* Caller must hold efx->filter_sem for read if race against 5763 * efx_ef10_filter_table_remove() is possible 5764 */ 5765 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) 5766 { 5767 struct efx_ef10_filter_table *table = efx->filter_state; 5768 struct net_device *net_dev = efx->net_dev; 5769 struct efx_ef10_filter_vlan *vlan; 5770 bool vlan_filter; 5771 5772 if (!efx_dev_registered(efx)) 5773 return; 5774 5775 if (!table) 5776 return; 5777 5778 efx_ef10_filter_mark_old(efx); 5779 5780 /* Copy/convert the address lists; add the primary station 5781 * address and broadcast address 5782 */ 5783 netif_addr_lock_bh(net_dev); 5784 efx_ef10_filter_uc_addr_list(efx); 5785 efx_ef10_filter_mc_addr_list(efx); 5786 netif_addr_unlock_bh(net_dev); 5787 5788 /* If VLAN filtering changes, all old filters are finally removed. 5789 * Do it in advance to avoid conflicts for unicast untagged and 5790 * VLAN 0 tagged filters. 5791 */ 5792 vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); 5793 if (table->vlan_filter != vlan_filter) { 5794 table->vlan_filter = vlan_filter; 5795 efx_ef10_filter_remove_old(efx); 5796 } 5797 5798 list_for_each_entry(vlan, &table->vlan_list, list) 5799 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); 5800 5801 efx_ef10_filter_remove_old(efx); 5802 table->mc_promisc_last = table->mc_promisc; 5803 } 5804 5805 static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid) 5806 { 5807 struct efx_ef10_filter_table *table = efx->filter_state; 5808 struct efx_ef10_filter_vlan *vlan; 5809 5810 WARN_ON(!rwsem_is_locked(&efx->filter_sem)); 5811 5812 list_for_each_entry(vlan, &table->vlan_list, list) { 5813 if (vlan->vid == vid) 5814 return vlan; 5815 } 5816 5817 return NULL; 5818 } 5819 5820 static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) 5821 { 5822 struct efx_ef10_filter_table *table = efx->filter_state; 5823 struct efx_ef10_filter_vlan *vlan; 5824 unsigned int i; 5825 5826 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5827 return -EINVAL; 5828 5829 vlan = efx_ef10_filter_find_vlan(efx, vid); 5830 if (WARN_ON(vlan)) { 5831 netif_err(efx, drv, efx->net_dev, 5832 "VLAN %u already added\n", vid); 5833 return -EALREADY; 5834 } 5835 5836 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); 5837 if (!vlan) 5838 return -ENOMEM; 5839 5840 vlan->vid = vid; 5841 5842 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 5843 vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; 5844 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 5845 vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; 5846 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5847 vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID; 5848 5849 list_add_tail(&vlan->list, &table->vlan_list); 5850 5851 if (efx_dev_registered(efx)) 5852 efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); 5853 5854 return 0; 5855 } 5856 5857 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, 5858 struct efx_ef10_filter_vlan *vlan) 5859 { 5860 unsigned int i; 5861 5862 /* See comment in efx_ef10_filter_table_remove() */ 5863 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5864 return; 5865 5866 list_del(&vlan->list); 5867 5868 for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) 5869 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5870 vlan->uc[i]); 5871 for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) 5872 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5873 vlan->mc[i]); 5874 for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++) 5875 if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID) 5876 efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, 5877 vlan->default_filters[i]); 5878 5879 kfree(vlan); 5880 } 5881 5882 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid) 5883 { 5884 struct efx_ef10_filter_vlan *vlan; 5885 5886 /* See comment in efx_ef10_filter_table_remove() */ 5887 if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) 5888 return; 5889 5890 vlan = efx_ef10_filter_find_vlan(efx, vid); 5891 if (!vlan) { 5892 netif_err(efx, drv, efx->net_dev, 5893 "VLAN %u not found in filter state\n", vid); 5894 return; 5895 } 5896 5897 efx_ef10_filter_del_vlan_internal(efx, vlan); 5898 } 5899 5900 static int efx_ef10_set_mac_address(struct efx_nic *efx) 5901 { 5902 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN); 5903 struct efx_ef10_nic_data *nic_data = efx->nic_data; 5904 bool was_enabled = efx->port_enabled; 5905 int rc; 5906 5907 efx_device_detach_sync(efx); 5908 efx_net_stop(efx->net_dev); 5909 5910 mutex_lock(&efx->mac_lock); 5911 down_write(&efx->filter_sem); 5912 efx_ef10_filter_table_remove(efx); 5913 5914 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR), 5915 efx->net_dev->dev_addr); 5916 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID, 5917 nic_data->vport_id); 5918 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf, 5919 sizeof(inbuf), NULL, 0, NULL); 5920 5921 efx_ef10_filter_table_probe(efx); 5922 up_write(&efx->filter_sem); 5923 mutex_unlock(&efx->mac_lock); 5924 5925 if (was_enabled) 5926 efx_net_open(efx->net_dev); 5927 efx_device_attach_if_not_resetting(efx); 5928 5929 #ifdef CONFIG_SFC_SRIOV 5930 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { 5931 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; 5932 5933 if (rc == -EPERM) { 5934 struct efx_nic *efx_pf; 5935 5936 /* Switch to PF and change MAC address on vport */ 5937 efx_pf = pci_get_drvdata(pci_dev_pf); 5938 5939 rc = efx_ef10_sriov_set_vf_mac(efx_pf, 5940 nic_data->vf_index, 5941 efx->net_dev->dev_addr); 5942 } else if (!rc) { 5943 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); 5944 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; 5945 unsigned int i; 5946 5947 /* MAC address successfully changed by VF (with MAC 5948 * spoofing) so update the parent PF if possible. 5949 */ 5950 for (i = 0; i < efx_pf->vf_count; ++i) { 5951 struct ef10_vf *vf = nic_data->vf + i; 5952 5953 if (vf->efx == efx) { 5954 ether_addr_copy(vf->mac, 5955 efx->net_dev->dev_addr); 5956 return 0; 5957 } 5958 } 5959 } 5960 } else 5961 #endif 5962 if (rc == -EPERM) { 5963 netif_err(efx, drv, efx->net_dev, 5964 "Cannot change MAC address; use sfboot to enable" 5965 " mac-spoofing on this interface\n"); 5966 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) { 5967 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC 5968 * fall-back to the method of changing the MAC address on the 5969 * vport. This only applies to PFs because such versions of 5970 * MCFW do not support VFs. 5971 */ 5972 rc = efx_ef10_vport_set_mac_address(efx); 5973 } else if (rc) { 5974 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, 5975 sizeof(inbuf), NULL, 0, rc); 5976 } 5977 5978 return rc; 5979 } 5980 5981 static int efx_ef10_mac_reconfigure(struct efx_nic *efx) 5982 { 5983 efx_ef10_filter_sync_rx_mode(efx); 5984 5985 return efx_mcdi_set_mac(efx); 5986 } 5987 5988 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) 5989 { 5990 efx_ef10_filter_sync_rx_mode(efx); 5991 5992 return 0; 5993 } 5994 5995 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) 5996 { 5997 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); 5998 5999 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type); 6000 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf), 6001 NULL, 0, NULL); 6002 } 6003 6004 /* MC BISTs follow a different poll mechanism to phy BISTs. 6005 * The BIST is done in the poll handler on the MC, and the MCDI command 6006 * will block until the BIST is done. 6007 */ 6008 static int efx_ef10_poll_bist(struct efx_nic *efx) 6009 { 6010 int rc; 6011 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN); 6012 size_t outlen; 6013 u32 result; 6014 6015 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, 6016 outbuf, sizeof(outbuf), &outlen); 6017 if (rc != 0) 6018 return rc; 6019 6020 if (outlen < MC_CMD_POLL_BIST_OUT_LEN) 6021 return -EIO; 6022 6023 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT); 6024 switch (result) { 6025 case MC_CMD_POLL_BIST_PASSED: 6026 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n"); 6027 return 0; 6028 case MC_CMD_POLL_BIST_TIMEOUT: 6029 netif_err(efx, hw, efx->net_dev, "BIST timed out\n"); 6030 return -EIO; 6031 case MC_CMD_POLL_BIST_FAILED: 6032 netif_err(efx, hw, efx->net_dev, "BIST failed.\n"); 6033 return -EIO; 6034 default: 6035 netif_err(efx, hw, efx->net_dev, 6036 "BIST returned unknown result %u", result); 6037 return -EIO; 6038 } 6039 } 6040 6041 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type) 6042 { 6043 int rc; 6044 6045 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type); 6046 6047 rc = efx_ef10_start_bist(efx, bist_type); 6048 if (rc != 0) 6049 return rc; 6050 6051 return efx_ef10_poll_bist(efx); 6052 } 6053 6054 static int 6055 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 6056 { 6057 int rc, rc2; 6058 6059 efx_reset_down(efx, RESET_TYPE_WORLD); 6060 6061 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST, 6062 NULL, 0, NULL, 0, NULL); 6063 if (rc != 0) 6064 goto out; 6065 6066 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1; 6067 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1; 6068 6069 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD); 6070 6071 out: 6072 if (rc == -EPERM) 6073 rc = 0; 6074 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0); 6075 return rc ? rc : rc2; 6076 } 6077 6078 #ifdef CONFIG_SFC_MTD 6079 6080 struct efx_ef10_nvram_type_info { 6081 u16 type, type_mask; 6082 u8 port; 6083 const char *name; 6084 }; 6085 6086 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { 6087 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" }, 6088 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" }, 6089 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" }, 6090 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" }, 6091 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" }, 6092 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" }, 6093 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" }, 6094 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" }, 6095 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, 6096 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, 6097 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, 6098 }; 6099 6100 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, 6101 struct efx_mcdi_mtd_partition *part, 6102 unsigned int type) 6103 { 6104 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN); 6105 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX); 6106 const struct efx_ef10_nvram_type_info *info; 6107 size_t size, erase_size, outlen; 6108 bool protected; 6109 int rc; 6110 6111 for (info = efx_ef10_nvram_types; ; info++) { 6112 if (info == 6113 efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types)) 6114 return -ENODEV; 6115 if ((type & ~info->type_mask) == info->type) 6116 break; 6117 } 6118 if (info->port != efx_port_num(efx)) 6119 return -ENODEV; 6120 6121 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected); 6122 if (rc) 6123 return rc; 6124 if (protected) 6125 return -ENODEV; /* hide it */ 6126 6127 part->nvram_type = type; 6128 6129 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type); 6130 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf), 6131 outbuf, sizeof(outbuf), &outlen); 6132 if (rc) 6133 return rc; 6134 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) 6135 return -EIO; 6136 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) & 6137 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN)) 6138 part->fw_subtype = MCDI_DWORD(outbuf, 6139 NVRAM_METADATA_OUT_SUBTYPE); 6140 6141 part->common.dev_type_name = "EF10 NVRAM manager"; 6142 part->common.type_name = info->name; 6143 6144 part->common.mtd.type = MTD_NORFLASH; 6145 part->common.mtd.flags = MTD_CAP_NORFLASH; 6146 part->common.mtd.size = size; 6147 part->common.mtd.erasesize = erase_size; 6148 6149 return 0; 6150 } 6151 6152 static int efx_ef10_mtd_probe(struct efx_nic *efx) 6153 { 6154 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); 6155 struct efx_mcdi_mtd_partition *parts; 6156 size_t outlen, n_parts_total, i, n_parts; 6157 unsigned int type; 6158 int rc; 6159 6160 ASSERT_RTNL(); 6161 6162 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); 6163 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, 6164 outbuf, sizeof(outbuf), &outlen); 6165 if (rc) 6166 return rc; 6167 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) 6168 return -EIO; 6169 6170 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); 6171 if (n_parts_total > 6172 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID)) 6173 return -EIO; 6174 6175 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL); 6176 if (!parts) 6177 return -ENOMEM; 6178 6179 n_parts = 0; 6180 for (i = 0; i < n_parts_total; i++) { 6181 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID, 6182 i); 6183 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type); 6184 if (rc == 0) 6185 n_parts++; 6186 else if (rc != -ENODEV) 6187 goto fail; 6188 } 6189 6190 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts)); 6191 fail: 6192 if (rc) 6193 kfree(parts); 6194 return rc; 6195 } 6196 6197 #endif /* CONFIG_SFC_MTD */ 6198 6199 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time) 6200 { 6201 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD); 6202 } 6203 6204 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx, 6205 u32 host_time) {} 6206 6207 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel, 6208 bool temp) 6209 { 6210 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN); 6211 int rc; 6212 6213 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED || 6214 channel->sync_events_state == SYNC_EVENTS_VALID || 6215 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED)) 6216 return 0; 6217 channel->sync_events_state = SYNC_EVENTS_REQUESTED; 6218 6219 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE); 6220 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 6221 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE, 6222 channel->channel); 6223 6224 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 6225 inbuf, sizeof(inbuf), NULL, 0, NULL); 6226 6227 if (rc != 0) 6228 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 6229 SYNC_EVENTS_DISABLED; 6230 6231 return rc; 6232 } 6233 6234 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel, 6235 bool temp) 6236 { 6237 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN); 6238 int rc; 6239 6240 if (channel->sync_events_state == SYNC_EVENTS_DISABLED || 6241 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT)) 6242 return 0; 6243 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) { 6244 channel->sync_events_state = SYNC_EVENTS_DISABLED; 6245 return 0; 6246 } 6247 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT : 6248 SYNC_EVENTS_DISABLED; 6249 6250 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE); 6251 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0); 6252 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL, 6253 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE); 6254 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE, 6255 channel->channel); 6256 6257 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP, 6258 inbuf, sizeof(inbuf), NULL, 0, NULL); 6259 6260 return rc; 6261 } 6262 6263 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en, 6264 bool temp) 6265 { 6266 int (*set)(struct efx_channel *channel, bool temp); 6267 struct efx_channel *channel; 6268 6269 set = en ? 6270 efx_ef10_rx_enable_timestamping : 6271 efx_ef10_rx_disable_timestamping; 6272 6273 channel = efx_ptp_channel(efx); 6274 if (channel) { 6275 int rc = set(channel, temp); 6276 if (en && rc != 0) { 6277 efx_ef10_ptp_set_ts_sync_events(efx, false, temp); 6278 return rc; 6279 } 6280 } 6281 6282 return 0; 6283 } 6284 6285 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx, 6286 struct hwtstamp_config *init) 6287 { 6288 return -EOPNOTSUPP; 6289 } 6290 6291 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, 6292 struct hwtstamp_config *init) 6293 { 6294 int rc; 6295 6296 switch (init->rx_filter) { 6297 case HWTSTAMP_FILTER_NONE: 6298 efx_ef10_ptp_set_ts_sync_events(efx, false, false); 6299 /* if TX timestamping is still requested then leave PTP on */ 6300 return efx_ptp_change_mode(efx, 6301 init->tx_type != HWTSTAMP_TX_OFF, 0); 6302 case HWTSTAMP_FILTER_ALL: 6303 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 6304 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 6305 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 6306 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 6307 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 6308 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 6309 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 6310 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 6311 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 6312 case HWTSTAMP_FILTER_PTP_V2_EVENT: 6313 case HWTSTAMP_FILTER_PTP_V2_SYNC: 6314 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 6315 case HWTSTAMP_FILTER_NTP_ALL: 6316 init->rx_filter = HWTSTAMP_FILTER_ALL; 6317 rc = efx_ptp_change_mode(efx, true, 0); 6318 if (!rc) 6319 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false); 6320 if (rc) 6321 efx_ptp_change_mode(efx, false, 0); 6322 return rc; 6323 default: 6324 return -ERANGE; 6325 } 6326 } 6327 6328 static int efx_ef10_get_phys_port_id(struct efx_nic *efx, 6329 struct netdev_phys_item_id *ppid) 6330 { 6331 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6332 6333 if (!is_valid_ether_addr(nic_data->port_id)) 6334 return -EOPNOTSUPP; 6335 6336 ppid->id_len = ETH_ALEN; 6337 memcpy(ppid->id, nic_data->port_id, ppid->id_len); 6338 6339 return 0; 6340 } 6341 6342 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) 6343 { 6344 if (proto != htons(ETH_P_8021Q)) 6345 return -EINVAL; 6346 6347 return efx_ef10_add_vlan(efx, vid); 6348 } 6349 6350 static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) 6351 { 6352 if (proto != htons(ETH_P_8021Q)) 6353 return -EINVAL; 6354 6355 return efx_ef10_del_vlan(efx, vid); 6356 } 6357 6358 /* We rely on the MCDI wiping out our TX rings if it made any changes to the 6359 * ports table, ensuring that any TSO descriptors that were made on a now- 6360 * removed tunnel port will be blown away and won't break things when we try 6361 * to transmit them using the new ports table. 6362 */ 6363 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) 6364 { 6365 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6366 MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX); 6367 MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN); 6368 bool will_reset = false; 6369 size_t num_entries = 0; 6370 size_t inlen, outlen; 6371 size_t i; 6372 int rc; 6373 efx_dword_t flags_and_num_entries; 6374 6375 WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock)); 6376 6377 nic_data->udp_tunnels_dirty = false; 6378 6379 if (!(nic_data->datapath_caps & 6380 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) { 6381 efx_device_attach_if_not_resetting(efx); 6382 return 0; 6383 } 6384 6385 BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) > 6386 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); 6387 6388 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 6389 if (nic_data->udp_tunnels[i].count && 6390 nic_data->udp_tunnels[i].port) { 6391 efx_dword_t entry; 6392 6393 EFX_POPULATE_DWORD_2(entry, 6394 TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT, 6395 ntohs(nic_data->udp_tunnels[i].port), 6396 TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL, 6397 nic_data->udp_tunnels[i].type); 6398 *_MCDI_ARRAY_DWORD(inbuf, 6399 SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES, 6400 num_entries++) = entry; 6401 } 6402 } 6403 6404 BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST - 6405 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 != 6406 EFX_WORD_1_LBN); 6407 BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 != 6408 EFX_WORD_1_WIDTH); 6409 EFX_POPULATE_DWORD_2(flags_and_num_entries, 6410 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING, 6411 !!unloading, 6412 EFX_WORD_1, num_entries); 6413 *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) = 6414 flags_and_num_entries; 6415 6416 inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries); 6417 6418 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS, 6419 inbuf, inlen, outbuf, sizeof(outbuf), &outlen); 6420 if (rc == -EIO) { 6421 /* Most likely the MC rebooted due to another function also 6422 * setting its tunnel port list. Mark the tunnel port list as 6423 * dirty, so it will be pushed upon coming up from the reboot. 6424 */ 6425 nic_data->udp_tunnels_dirty = true; 6426 return 0; 6427 } 6428 6429 if (rc) { 6430 /* expected not available on unprivileged functions */ 6431 if (rc != -EPERM) 6432 netif_warn(efx, drv, efx->net_dev, 6433 "Unable to set UDP tunnel ports; rc=%d.\n", rc); 6434 } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) & 6435 (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) { 6436 netif_info(efx, drv, efx->net_dev, 6437 "Rebooting MC due to UDP tunnel port list change\n"); 6438 will_reset = true; 6439 if (unloading) 6440 /* Delay for the MC reset to complete. This will make 6441 * unloading other functions a bit smoother. This is a 6442 * race, but the other unload will work whichever way 6443 * it goes, this just avoids an unnecessary error 6444 * message. 6445 */ 6446 msleep(100); 6447 } 6448 if (!will_reset && !unloading) { 6449 /* The caller will have detached, relying on the MC reset to 6450 * trigger a re-attach. Since there won't be an MC reset, we 6451 * have to do the attach ourselves. 6452 */ 6453 efx_device_attach_if_not_resetting(efx); 6454 } 6455 6456 return rc; 6457 } 6458 6459 static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) 6460 { 6461 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6462 int rc = 0; 6463 6464 mutex_lock(&nic_data->udp_tunnels_lock); 6465 if (nic_data->udp_tunnels_dirty) { 6466 /* Make sure all TX are stopped while we modify the table, else 6467 * we might race against an efx_features_check(). 6468 */ 6469 efx_device_detach_sync(efx); 6470 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6471 } 6472 mutex_unlock(&nic_data->udp_tunnels_lock); 6473 return rc; 6474 } 6475 6476 static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, 6477 __be16 port) 6478 { 6479 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6480 size_t i; 6481 6482 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { 6483 if (!nic_data->udp_tunnels[i].count) 6484 continue; 6485 if (nic_data->udp_tunnels[i].port == port) 6486 return &nic_data->udp_tunnels[i]; 6487 } 6488 return NULL; 6489 } 6490 6491 static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, 6492 struct efx_udp_tunnel tnl) 6493 { 6494 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6495 struct efx_udp_tunnel *match; 6496 char typebuf[8]; 6497 size_t i; 6498 int rc; 6499 6500 if (!(nic_data->datapath_caps & 6501 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6502 return 0; 6503 6504 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 6505 netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", 6506 typebuf, ntohs(tnl.port)); 6507 6508 mutex_lock(&nic_data->udp_tunnels_lock); 6509 /* Make sure all TX are stopped while we add to the table, else we 6510 * might race against an efx_features_check(). 6511 */ 6512 efx_device_detach_sync(efx); 6513 6514 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 6515 if (match != NULL) { 6516 if (match->type == tnl.type) { 6517 netif_dbg(efx, drv, efx->net_dev, 6518 "Referencing existing tunnel entry\n"); 6519 match->count++; 6520 /* No need to cause an MCDI update */ 6521 rc = 0; 6522 goto unlock_out; 6523 } 6524 efx_get_udp_tunnel_type_name(match->type, 6525 typebuf, sizeof(typebuf)); 6526 netif_dbg(efx, drv, efx->net_dev, 6527 "UDP port %d is already in use by %s\n", 6528 ntohs(tnl.port), typebuf); 6529 rc = -EEXIST; 6530 goto unlock_out; 6531 } 6532 6533 for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) 6534 if (!nic_data->udp_tunnels[i].count) { 6535 nic_data->udp_tunnels[i] = tnl; 6536 nic_data->udp_tunnels[i].count = 1; 6537 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6538 goto unlock_out; 6539 } 6540 6541 netif_dbg(efx, drv, efx->net_dev, 6542 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", 6543 typebuf, ntohs(tnl.port)); 6544 6545 rc = -ENOMEM; 6546 6547 unlock_out: 6548 mutex_unlock(&nic_data->udp_tunnels_lock); 6549 return rc; 6550 } 6551 6552 /* Called under the TX lock with the TX queue running, hence no-one can be 6553 * in the middle of updating the UDP tunnels table. However, they could 6554 * have tried and failed the MCDI, in which case they'll have set the dirty 6555 * flag before dropping their locks. 6556 */ 6557 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) 6558 { 6559 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6560 6561 if (!(nic_data->datapath_caps & 6562 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6563 return false; 6564 6565 if (nic_data->udp_tunnels_dirty) 6566 /* SW table may not match HW state, so just assume we can't 6567 * use any UDP tunnel offloads. 6568 */ 6569 return false; 6570 6571 return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; 6572 } 6573 6574 static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, 6575 struct efx_udp_tunnel tnl) 6576 { 6577 struct efx_ef10_nic_data *nic_data = efx->nic_data; 6578 struct efx_udp_tunnel *match; 6579 char typebuf[8]; 6580 int rc; 6581 6582 if (!(nic_data->datapath_caps & 6583 (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) 6584 return 0; 6585 6586 efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); 6587 netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", 6588 typebuf, ntohs(tnl.port)); 6589 6590 mutex_lock(&nic_data->udp_tunnels_lock); 6591 /* Make sure all TX are stopped while we remove from the table, else we 6592 * might race against an efx_features_check(). 6593 */ 6594 efx_device_detach_sync(efx); 6595 6596 match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); 6597 if (match != NULL) { 6598 if (match->type == tnl.type) { 6599 if (--match->count) { 6600 /* Port is still in use, so nothing to do */ 6601 netif_dbg(efx, drv, efx->net_dev, 6602 "UDP tunnel port %d remains active\n", 6603 ntohs(tnl.port)); 6604 rc = 0; 6605 goto out_unlock; 6606 } 6607 rc = efx_ef10_set_udp_tnl_ports(efx, false); 6608 goto out_unlock; 6609 } 6610 efx_get_udp_tunnel_type_name(match->type, 6611 typebuf, sizeof(typebuf)); 6612 netif_warn(efx, drv, efx->net_dev, 6613 "UDP port %d is actually in use by %s, not removing\n", 6614 ntohs(tnl.port), typebuf); 6615 } 6616 rc = -ENOENT; 6617 6618 out_unlock: 6619 mutex_unlock(&nic_data->udp_tunnels_lock); 6620 return rc; 6621 } 6622 6623 #define EF10_OFFLOAD_FEATURES \ 6624 (NETIF_F_IP_CSUM | \ 6625 NETIF_F_HW_VLAN_CTAG_FILTER | \ 6626 NETIF_F_IPV6_CSUM | \ 6627 NETIF_F_RXHASH | \ 6628 NETIF_F_NTUPLE) 6629 6630 const struct efx_nic_type efx_hunt_a0_vf_nic_type = { 6631 .is_vf = true, 6632 .mem_bar = efx_ef10_vf_mem_bar, 6633 .mem_map_size = efx_ef10_mem_map_size, 6634 .probe = efx_ef10_probe_vf, 6635 .remove = efx_ef10_remove, 6636 .dimension_resources = efx_ef10_dimension_resources, 6637 .init = efx_ef10_init_nic, 6638 .fini = efx_port_dummy_op_void, 6639 .map_reset_reason = efx_ef10_map_reset_reason, 6640 .map_reset_flags = efx_ef10_map_reset_flags, 6641 .reset = efx_ef10_reset, 6642 .probe_port = efx_mcdi_port_probe, 6643 .remove_port = efx_mcdi_port_remove, 6644 .fini_dmaq = efx_ef10_fini_dmaq, 6645 .prepare_flr = efx_ef10_prepare_flr, 6646 .finish_flr = efx_port_dummy_op_void, 6647 .describe_stats = efx_ef10_describe_stats, 6648 .update_stats = efx_ef10_update_stats_vf, 6649 .start_stats = efx_port_dummy_op_void, 6650 .pull_stats = efx_port_dummy_op_void, 6651 .stop_stats = efx_port_dummy_op_void, 6652 .set_id_led = efx_mcdi_set_id_led, 6653 .push_irq_moderation = efx_ef10_push_irq_moderation, 6654 .reconfigure_mac = efx_ef10_mac_reconfigure_vf, 6655 .check_mac_fault = efx_mcdi_mac_check_fault, 6656 .reconfigure_port = efx_mcdi_port_reconfigure, 6657 .get_wol = efx_ef10_get_wol_vf, 6658 .set_wol = efx_ef10_set_wol_vf, 6659 .resume_wol = efx_port_dummy_op_void, 6660 .mcdi_request = efx_ef10_mcdi_request, 6661 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 6662 .mcdi_read_response = efx_ef10_mcdi_read_response, 6663 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 6664 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 6665 .irq_enable_master = efx_port_dummy_op_void, 6666 .irq_test_generate = efx_ef10_irq_test_generate, 6667 .irq_disable_non_ev = efx_port_dummy_op_void, 6668 .irq_handle_msi = efx_ef10_msi_interrupt, 6669 .irq_handle_legacy = efx_ef10_legacy_interrupt, 6670 .tx_probe = efx_ef10_tx_probe, 6671 .tx_init = efx_ef10_tx_init, 6672 .tx_remove = efx_ef10_tx_remove, 6673 .tx_write = efx_ef10_tx_write, 6674 .tx_limit_len = efx_ef10_tx_limit_len, 6675 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config, 6676 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, 6677 .rx_probe = efx_ef10_rx_probe, 6678 .rx_init = efx_ef10_rx_init, 6679 .rx_remove = efx_ef10_rx_remove, 6680 .rx_write = efx_ef10_rx_write, 6681 .rx_defer_refill = efx_ef10_rx_defer_refill, 6682 .ev_probe = efx_ef10_ev_probe, 6683 .ev_init = efx_ef10_ev_init, 6684 .ev_fini = efx_ef10_ev_fini, 6685 .ev_remove = efx_ef10_ev_remove, 6686 .ev_process = efx_ef10_ev_process, 6687 .ev_read_ack = efx_ef10_ev_read_ack, 6688 .ev_test_generate = efx_ef10_ev_test_generate, 6689 .filter_table_probe = efx_ef10_filter_table_probe, 6690 .filter_table_restore = efx_ef10_filter_table_restore, 6691 .filter_table_remove = efx_ef10_filter_table_remove, 6692 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 6693 .filter_insert = efx_ef10_filter_insert, 6694 .filter_remove_safe = efx_ef10_filter_remove_safe, 6695 .filter_get_safe = efx_ef10_filter_get_safe, 6696 .filter_clear_rx = efx_ef10_filter_clear_rx, 6697 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 6698 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 6699 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 6700 #ifdef CONFIG_RFS_ACCEL 6701 .filter_rfs_insert = efx_ef10_filter_rfs_insert, 6702 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 6703 #endif 6704 #ifdef CONFIG_SFC_MTD 6705 .mtd_probe = efx_port_dummy_op_int, 6706 #endif 6707 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, 6708 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, 6709 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 6710 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 6711 #ifdef CONFIG_SFC_SRIOV 6712 .vswitching_probe = efx_ef10_vswitching_probe_vf, 6713 .vswitching_restore = efx_ef10_vswitching_restore_vf, 6714 .vswitching_remove = efx_ef10_vswitching_remove_vf, 6715 #endif 6716 .get_mac_address = efx_ef10_get_mac_address_vf, 6717 .set_mac_address = efx_ef10_set_mac_address, 6718 6719 .get_phys_port_id = efx_ef10_get_phys_port_id, 6720 .revision = EFX_REV_HUNT_A0, 6721 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 6722 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 6723 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 6724 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 6725 .can_rx_scatter = true, 6726 .always_rx_scatter = true, 6727 .min_interrupt_mode = EFX_INT_MODE_MSIX, 6728 .max_interrupt_mode = EFX_INT_MODE_MSIX, 6729 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 6730 .offload_features = EF10_OFFLOAD_FEATURES, 6731 .mcdi_max_ver = 2, 6732 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 6733 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 6734 1 << HWTSTAMP_FILTER_ALL, 6735 .rx_hash_key_size = 40, 6736 }; 6737 6738 const struct efx_nic_type efx_hunt_a0_nic_type = { 6739 .is_vf = false, 6740 .mem_bar = efx_ef10_pf_mem_bar, 6741 .mem_map_size = efx_ef10_mem_map_size, 6742 .probe = efx_ef10_probe_pf, 6743 .remove = efx_ef10_remove, 6744 .dimension_resources = efx_ef10_dimension_resources, 6745 .init = efx_ef10_init_nic, 6746 .fini = efx_port_dummy_op_void, 6747 .map_reset_reason = efx_ef10_map_reset_reason, 6748 .map_reset_flags = efx_ef10_map_reset_flags, 6749 .reset = efx_ef10_reset, 6750 .probe_port = efx_mcdi_port_probe, 6751 .remove_port = efx_mcdi_port_remove, 6752 .fini_dmaq = efx_ef10_fini_dmaq, 6753 .prepare_flr = efx_ef10_prepare_flr, 6754 .finish_flr = efx_port_dummy_op_void, 6755 .describe_stats = efx_ef10_describe_stats, 6756 .update_stats = efx_ef10_update_stats_pf, 6757 .start_stats = efx_mcdi_mac_start_stats, 6758 .pull_stats = efx_mcdi_mac_pull_stats, 6759 .stop_stats = efx_mcdi_mac_stop_stats, 6760 .set_id_led = efx_mcdi_set_id_led, 6761 .push_irq_moderation = efx_ef10_push_irq_moderation, 6762 .reconfigure_mac = efx_ef10_mac_reconfigure, 6763 .check_mac_fault = efx_mcdi_mac_check_fault, 6764 .reconfigure_port = efx_mcdi_port_reconfigure, 6765 .get_wol = efx_ef10_get_wol, 6766 .set_wol = efx_ef10_set_wol, 6767 .resume_wol = efx_port_dummy_op_void, 6768 .test_chip = efx_ef10_test_chip, 6769 .test_nvram = efx_mcdi_nvram_test_all, 6770 .mcdi_request = efx_ef10_mcdi_request, 6771 .mcdi_poll_response = efx_ef10_mcdi_poll_response, 6772 .mcdi_read_response = efx_ef10_mcdi_read_response, 6773 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot, 6774 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected, 6775 .irq_enable_master = efx_port_dummy_op_void, 6776 .irq_test_generate = efx_ef10_irq_test_generate, 6777 .irq_disable_non_ev = efx_port_dummy_op_void, 6778 .irq_handle_msi = efx_ef10_msi_interrupt, 6779 .irq_handle_legacy = efx_ef10_legacy_interrupt, 6780 .tx_probe = efx_ef10_tx_probe, 6781 .tx_init = efx_ef10_tx_init, 6782 .tx_remove = efx_ef10_tx_remove, 6783 .tx_write = efx_ef10_tx_write, 6784 .tx_limit_len = efx_ef10_tx_limit_len, 6785 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config, 6786 .rx_pull_rss_config = efx_ef10_rx_pull_rss_config, 6787 .rx_probe = efx_ef10_rx_probe, 6788 .rx_init = efx_ef10_rx_init, 6789 .rx_remove = efx_ef10_rx_remove, 6790 .rx_write = efx_ef10_rx_write, 6791 .rx_defer_refill = efx_ef10_rx_defer_refill, 6792 .ev_probe = efx_ef10_ev_probe, 6793 .ev_init = efx_ef10_ev_init, 6794 .ev_fini = efx_ef10_ev_fini, 6795 .ev_remove = efx_ef10_ev_remove, 6796 .ev_process = efx_ef10_ev_process, 6797 .ev_read_ack = efx_ef10_ev_read_ack, 6798 .ev_test_generate = efx_ef10_ev_test_generate, 6799 .filter_table_probe = efx_ef10_filter_table_probe, 6800 .filter_table_restore = efx_ef10_filter_table_restore, 6801 .filter_table_remove = efx_ef10_filter_table_remove, 6802 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter, 6803 .filter_insert = efx_ef10_filter_insert, 6804 .filter_remove_safe = efx_ef10_filter_remove_safe, 6805 .filter_get_safe = efx_ef10_filter_get_safe, 6806 .filter_clear_rx = efx_ef10_filter_clear_rx, 6807 .filter_count_rx_used = efx_ef10_filter_count_rx_used, 6808 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit, 6809 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids, 6810 #ifdef CONFIG_RFS_ACCEL 6811 .filter_rfs_insert = efx_ef10_filter_rfs_insert, 6812 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one, 6813 #endif 6814 #ifdef CONFIG_SFC_MTD 6815 .mtd_probe = efx_ef10_mtd_probe, 6816 .mtd_rename = efx_mcdi_mtd_rename, 6817 .mtd_read = efx_mcdi_mtd_read, 6818 .mtd_erase = efx_mcdi_mtd_erase, 6819 .mtd_write = efx_mcdi_mtd_write, 6820 .mtd_sync = efx_mcdi_mtd_sync, 6821 #endif 6822 .ptp_write_host_time = efx_ef10_ptp_write_host_time, 6823 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, 6824 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, 6825 .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, 6826 .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, 6827 .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, 6828 .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, 6829 .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, 6830 .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, 6831 #ifdef CONFIG_SFC_SRIOV 6832 .sriov_configure = efx_ef10_sriov_configure, 6833 .sriov_init = efx_ef10_sriov_init, 6834 .sriov_fini = efx_ef10_sriov_fini, 6835 .sriov_wanted = efx_ef10_sriov_wanted, 6836 .sriov_reset = efx_ef10_sriov_reset, 6837 .sriov_flr = efx_ef10_sriov_flr, 6838 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac, 6839 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan, 6840 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk, 6841 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config, 6842 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state, 6843 .vswitching_probe = efx_ef10_vswitching_probe_pf, 6844 .vswitching_restore = efx_ef10_vswitching_restore_pf, 6845 .vswitching_remove = efx_ef10_vswitching_remove_pf, 6846 #endif 6847 .get_mac_address = efx_ef10_get_mac_address_pf, 6848 .set_mac_address = efx_ef10_set_mac_address, 6849 .tso_versions = efx_ef10_tso_versions, 6850 6851 .get_phys_port_id = efx_ef10_get_phys_port_id, 6852 .revision = EFX_REV_HUNT_A0, 6853 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH), 6854 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE, 6855 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST, 6856 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST, 6857 .can_rx_scatter = true, 6858 .always_rx_scatter = true, 6859 .option_descriptors = true, 6860 .min_interrupt_mode = EFX_INT_MODE_LEGACY, 6861 .max_interrupt_mode = EFX_INT_MODE_MSIX, 6862 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, 6863 .offload_features = EF10_OFFLOAD_FEATURES, 6864 .mcdi_max_ver = 2, 6865 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, 6866 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | 6867 1 << HWTSTAMP_FILTER_ALL, 6868 .rx_hash_key_size = 40, 6869 }; 6870