1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include <linux/cpu_rmap.h> 18 #include "net_driver.h" 19 #include "bitfield.h" 20 #include "efx.h" 21 #include "nic.h" 22 #include "farch_regs.h" 23 #include "io.h" 24 #include "workarounds.h" 25 26 /************************************************************************** 27 * 28 * Configurable values 29 * 30 ************************************************************************** 31 */ 32 33 /* This is set to 16 for a good reason. In summary, if larger than 34 * 16, the descriptor cache holds more than a default socket 35 * buffer's worth of packets (for UDP we can only have at most one 36 * socket buffer's worth outstanding). This combined with the fact 37 * that we only get 1 TX event per descriptor cache means the NIC 38 * goes idle. 39 */ 40 #define TX_DC_ENTRIES 16 41 #define TX_DC_ENTRIES_ORDER 1 42 43 #define RX_DC_ENTRIES 64 44 #define RX_DC_ENTRIES_ORDER 3 45 46 /* If EFX_MAX_INT_ERRORS internal errors occur within 47 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 48 * disable it. 49 */ 50 #define EFX_INT_ERROR_EXPIRE 3600 51 #define EFX_MAX_INT_ERRORS 5 52 53 /* Depth of RX flush request fifo */ 54 #define EFX_RX_FLUSH_COUNT 4 55 56 /* Driver generated events */ 57 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 58 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 59 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 60 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 61 62 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 63 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 64 65 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 66 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 67 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 69 efx_rx_queue_index(_rx_queue)) 70 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 71 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 72 efx_rx_queue_index(_rx_queue)) 73 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 74 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 75 (_tx_queue)->queue) 76 77 static void efx_magic_event(struct efx_channel *channel, u32 magic); 78 79 /************************************************************************** 80 * 81 * Solarstorm hardware access 82 * 83 **************************************************************************/ 84 85 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 86 unsigned int index) 87 { 88 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 89 value, index); 90 } 91 92 /* Read the current event from the event queue */ 93 static inline efx_qword_t *efx_event(struct efx_channel *channel, 94 unsigned int index) 95 { 96 return ((efx_qword_t *) (channel->eventq.buf.addr)) + 97 (index & channel->eventq_mask); 98 } 99 100 /* See if an event is present 101 * 102 * We check both the high and low dword of the event for all ones. We 103 * wrote all ones when we cleared the event, and no valid event can 104 * have all ones in either its high or low dwords. This approach is 105 * robust against reordering. 106 * 107 * Note that using a single 64-bit comparison is incorrect; even 108 * though the CPU read will be atomic, the DMA write may not be. 109 */ 110 static inline int efx_event_present(efx_qword_t *event) 111 { 112 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 113 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 114 } 115 116 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 117 const efx_oword_t *mask) 118 { 119 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 120 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 121 } 122 123 int efx_nic_test_registers(struct efx_nic *efx, 124 const struct efx_nic_register_test *regs, 125 size_t n_regs) 126 { 127 unsigned address = 0, i, j; 128 efx_oword_t mask, imask, original, reg, buf; 129 130 for (i = 0; i < n_regs; ++i) { 131 address = regs[i].address; 132 mask = imask = regs[i].mask; 133 EFX_INVERT_OWORD(imask); 134 135 efx_reado(efx, &original, address); 136 137 /* bit sweep on and off */ 138 for (j = 0; j < 128; j++) { 139 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 140 continue; 141 142 /* Test this testable bit can be set in isolation */ 143 EFX_AND_OWORD(reg, original, mask); 144 EFX_SET_OWORD32(reg, j, j, 1); 145 146 efx_writeo(efx, ®, address); 147 efx_reado(efx, &buf, address); 148 149 if (efx_masked_compare_oword(®, &buf, &mask)) 150 goto fail; 151 152 /* Test this testable bit can be cleared in isolation */ 153 EFX_OR_OWORD(reg, original, mask); 154 EFX_SET_OWORD32(reg, j, j, 0); 155 156 efx_writeo(efx, ®, address); 157 efx_reado(efx, &buf, address); 158 159 if (efx_masked_compare_oword(®, &buf, &mask)) 160 goto fail; 161 } 162 163 efx_writeo(efx, &original, address); 164 } 165 166 return 0; 167 168 fail: 169 netif_err(efx, hw, efx->net_dev, 170 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 171 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 172 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 173 return -EIO; 174 } 175 176 /************************************************************************** 177 * 178 * Special buffer handling 179 * Special buffers are used for event queues and the TX and RX 180 * descriptor rings. 181 * 182 *************************************************************************/ 183 184 /* 185 * Initialise a special buffer 186 * 187 * This will define a buffer (previously allocated via 188 * efx_alloc_special_buffer()) in the buffer table, allowing 189 * it to be used for event queues, descriptor rings etc. 190 */ 191 static void 192 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 193 { 194 efx_qword_t buf_desc; 195 unsigned int index; 196 dma_addr_t dma_addr; 197 int i; 198 199 EFX_BUG_ON_PARANOID(!buffer->buf.addr); 200 201 /* Write buffer descriptors to NIC */ 202 for (i = 0; i < buffer->entries; i++) { 203 index = buffer->index + i; 204 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); 205 netif_dbg(efx, probe, efx->net_dev, 206 "mapping special buffer %d at %llx\n", 207 index, (unsigned long long)dma_addr); 208 EFX_POPULATE_QWORD_3(buf_desc, 209 FRF_AZ_BUF_ADR_REGION, 0, 210 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 211 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 212 efx_write_buf_tbl(efx, &buf_desc, index); 213 } 214 } 215 216 /* Unmaps a buffer and clears the buffer table entries */ 217 static void 218 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 219 { 220 efx_oword_t buf_tbl_upd; 221 unsigned int start = buffer->index; 222 unsigned int end = (buffer->index + buffer->entries - 1); 223 224 if (!buffer->entries) 225 return; 226 227 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 228 buffer->index, buffer->index + buffer->entries - 1); 229 230 EFX_POPULATE_OWORD_4(buf_tbl_upd, 231 FRF_AZ_BUF_UPD_CMD, 0, 232 FRF_AZ_BUF_CLR_CMD, 1, 233 FRF_AZ_BUF_CLR_END_ID, end, 234 FRF_AZ_BUF_CLR_START_ID, start); 235 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 236 } 237 238 /* 239 * Allocate a new special buffer 240 * 241 * This allocates memory for a new buffer, clears it and allocates a 242 * new buffer ID range. It does not write into the buffer table. 243 * 244 * This call will allocate 4KB buffers, since 8KB buffers can't be 245 * used for event queues and descriptor rings. 246 */ 247 static int efx_alloc_special_buffer(struct efx_nic *efx, 248 struct efx_special_buffer *buffer, 249 unsigned int len) 250 { 251 len = ALIGN(len, EFX_BUF_SIZE); 252 253 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) 254 return -ENOMEM; 255 buffer->entries = len / EFX_BUF_SIZE; 256 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); 257 258 /* Select new buffer ID */ 259 buffer->index = efx->next_buffer_table; 260 efx->next_buffer_table += buffer->entries; 261 #ifdef CONFIG_SFC_SRIOV 262 BUG_ON(efx_sriov_enabled(efx) && 263 efx->vf_buftbl_base < efx->next_buffer_table); 264 #endif 265 266 netif_dbg(efx, probe, efx->net_dev, 267 "allocating special buffers %d-%d at %llx+%x " 268 "(virt %p phys %llx)\n", buffer->index, 269 buffer->index + buffer->entries - 1, 270 (u64)buffer->buf.dma_addr, len, 271 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 272 273 return 0; 274 } 275 276 static void 277 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 278 { 279 if (!buffer->buf.addr) 280 return; 281 282 netif_dbg(efx, hw, efx->net_dev, 283 "deallocating special buffers %d-%d at %llx+%x " 284 "(virt %p phys %llx)\n", buffer->index, 285 buffer->index + buffer->entries - 1, 286 (u64)buffer->buf.dma_addr, buffer->buf.len, 287 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 288 289 efx_nic_free_buffer(efx, &buffer->buf); 290 buffer->entries = 0; 291 } 292 293 /************************************************************************** 294 * 295 * Generic buffer handling 296 * These buffers are used for interrupt status, MAC stats, etc. 297 * 298 **************************************************************************/ 299 300 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 301 unsigned int len, gfp_t gfp_flags) 302 { 303 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 304 &buffer->dma_addr, 305 gfp_flags | __GFP_ZERO); 306 if (!buffer->addr) 307 return -ENOMEM; 308 buffer->len = len; 309 return 0; 310 } 311 312 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 313 { 314 if (buffer->addr) { 315 dma_free_coherent(&efx->pci_dev->dev, buffer->len, 316 buffer->addr, buffer->dma_addr); 317 buffer->addr = NULL; 318 } 319 } 320 321 /************************************************************************** 322 * 323 * TX path 324 * 325 **************************************************************************/ 326 327 /* Returns a pointer to the specified transmit descriptor in the TX 328 * descriptor queue belonging to the specified channel. 329 */ 330 static inline efx_qword_t * 331 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 332 { 333 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 334 } 335 336 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 337 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 338 { 339 unsigned write_ptr; 340 efx_dword_t reg; 341 342 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 343 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 344 efx_writed_page(tx_queue->efx, ®, 345 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 346 } 347 348 /* Write pointer and first descriptor for TX descriptor ring */ 349 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 350 const efx_qword_t *txd) 351 { 352 unsigned write_ptr; 353 efx_oword_t reg; 354 355 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 356 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 357 358 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 359 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 360 FRF_AZ_TX_DESC_WPTR, write_ptr); 361 reg.qword[0] = *txd; 362 efx_writeo_page(tx_queue->efx, ®, 363 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 364 } 365 366 static inline bool 367 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 368 { 369 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 370 371 if (empty_read_count == 0) 372 return false; 373 374 tx_queue->empty_read_count = 0; 375 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 376 && tx_queue->write_count - write_count == 1; 377 } 378 379 /* For each entry inserted into the software descriptor ring, create a 380 * descriptor in the hardware TX descriptor ring (in host memory), and 381 * write a doorbell. 382 */ 383 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 384 { 385 386 struct efx_tx_buffer *buffer; 387 efx_qword_t *txd; 388 unsigned write_ptr; 389 unsigned old_write_count = tx_queue->write_count; 390 391 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 392 393 do { 394 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 395 buffer = &tx_queue->buffer[write_ptr]; 396 txd = efx_tx_desc(tx_queue, write_ptr); 397 ++tx_queue->write_count; 398 399 /* Create TX descriptor ring entry */ 400 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 401 EFX_POPULATE_QWORD_4(*txd, 402 FSF_AZ_TX_KER_CONT, 403 buffer->flags & EFX_TX_BUF_CONT, 404 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 405 FSF_AZ_TX_KER_BUF_REGION, 0, 406 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 407 } while (tx_queue->write_count != tx_queue->insert_count); 408 409 wmb(); /* Ensure descriptors are written before they are fetched */ 410 411 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 412 txd = efx_tx_desc(tx_queue, 413 old_write_count & tx_queue->ptr_mask); 414 efx_push_tx_desc(tx_queue, txd); 415 ++tx_queue->pushes; 416 } else { 417 efx_notify_tx_desc(tx_queue); 418 } 419 } 420 421 /* Allocate hardware resources for a TX queue */ 422 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 423 { 424 struct efx_nic *efx = tx_queue->efx; 425 unsigned entries; 426 427 entries = tx_queue->ptr_mask + 1; 428 return efx_alloc_special_buffer(efx, &tx_queue->txd, 429 entries * sizeof(efx_qword_t)); 430 } 431 432 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 433 { 434 struct efx_nic *efx = tx_queue->efx; 435 efx_oword_t reg; 436 437 /* Pin TX descriptor ring */ 438 efx_init_special_buffer(efx, &tx_queue->txd); 439 440 /* Push TX descriptor ring to card */ 441 EFX_POPULATE_OWORD_10(reg, 442 FRF_AZ_TX_DESCQ_EN, 1, 443 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 444 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 445 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 446 FRF_AZ_TX_DESCQ_EVQ_ID, 447 tx_queue->channel->channel, 448 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 449 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 450 FRF_AZ_TX_DESCQ_SIZE, 451 __ffs(tx_queue->txd.entries), 452 FRF_AZ_TX_DESCQ_TYPE, 0, 453 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 454 455 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 456 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 457 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 458 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 459 !csum); 460 } 461 462 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 463 tx_queue->queue); 464 465 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 466 /* Only 128 bits in this register */ 467 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 468 469 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 470 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 471 __clear_bit_le(tx_queue->queue, ®); 472 else 473 __set_bit_le(tx_queue->queue, ®); 474 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 475 } 476 477 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 478 EFX_POPULATE_OWORD_1(reg, 479 FRF_BZ_TX_PACE, 480 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 481 FFE_BZ_TX_PACE_OFF : 482 FFE_BZ_TX_PACE_RESERVED); 483 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 484 tx_queue->queue); 485 } 486 } 487 488 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 489 { 490 struct efx_nic *efx = tx_queue->efx; 491 efx_oword_t tx_flush_descq; 492 493 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 494 atomic_set(&tx_queue->flush_outstanding, 1); 495 496 EFX_POPULATE_OWORD_2(tx_flush_descq, 497 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 498 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 499 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 500 } 501 502 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 503 { 504 struct efx_nic *efx = tx_queue->efx; 505 efx_oword_t tx_desc_ptr; 506 507 /* Remove TX descriptor ring from card */ 508 EFX_ZERO_OWORD(tx_desc_ptr); 509 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 510 tx_queue->queue); 511 512 /* Unpin TX descriptor ring */ 513 efx_fini_special_buffer(efx, &tx_queue->txd); 514 } 515 516 /* Free buffers backing TX queue */ 517 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 518 { 519 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 520 } 521 522 /************************************************************************** 523 * 524 * RX path 525 * 526 **************************************************************************/ 527 528 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 529 static inline efx_qword_t * 530 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 531 { 532 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; 533 } 534 535 /* This creates an entry in the RX descriptor queue */ 536 static inline void 537 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 538 { 539 struct efx_rx_buffer *rx_buf; 540 efx_qword_t *rxd; 541 542 rxd = efx_rx_desc(rx_queue, index); 543 rx_buf = efx_rx_buffer(rx_queue, index); 544 EFX_POPULATE_QWORD_3(*rxd, 545 FSF_AZ_RX_KER_BUF_SIZE, 546 rx_buf->len - 547 rx_queue->efx->type->rx_buffer_padding, 548 FSF_AZ_RX_KER_BUF_REGION, 0, 549 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 550 } 551 552 /* This writes to the RX_DESC_WPTR register for the specified receive 553 * descriptor ring. 554 */ 555 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 556 { 557 struct efx_nic *efx = rx_queue->efx; 558 efx_dword_t reg; 559 unsigned write_ptr; 560 561 while (rx_queue->notified_count != rx_queue->added_count) { 562 efx_build_rx_desc( 563 rx_queue, 564 rx_queue->notified_count & rx_queue->ptr_mask); 565 ++rx_queue->notified_count; 566 } 567 568 wmb(); 569 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 570 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 571 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 572 efx_rx_queue_index(rx_queue)); 573 } 574 575 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 576 { 577 struct efx_nic *efx = rx_queue->efx; 578 unsigned entries; 579 580 entries = rx_queue->ptr_mask + 1; 581 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 582 entries * sizeof(efx_qword_t)); 583 } 584 585 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 586 { 587 efx_oword_t rx_desc_ptr; 588 struct efx_nic *efx = rx_queue->efx; 589 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 590 bool iscsi_digest_en = is_b0; 591 bool jumbo_en; 592 593 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables 594 * DMA to continue after a PCIe page boundary (and scattering 595 * is not possible). In Falcon B0 and Siena, it enables 596 * scatter. 597 */ 598 jumbo_en = !is_b0 || efx->rx_scatter; 599 600 netif_dbg(efx, hw, efx->net_dev, 601 "RX queue %d ring in special buffers %d-%d\n", 602 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 603 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 604 605 rx_queue->scatter_n = 0; 606 607 /* Pin RX descriptor ring */ 608 efx_init_special_buffer(efx, &rx_queue->rxd); 609 610 /* Push RX descriptor ring to card */ 611 EFX_POPULATE_OWORD_10(rx_desc_ptr, 612 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 613 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 614 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 615 FRF_AZ_RX_DESCQ_EVQ_ID, 616 efx_rx_queue_channel(rx_queue)->channel, 617 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 618 FRF_AZ_RX_DESCQ_LABEL, 619 efx_rx_queue_index(rx_queue), 620 FRF_AZ_RX_DESCQ_SIZE, 621 __ffs(rx_queue->rxd.entries), 622 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 623 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 624 FRF_AZ_RX_DESCQ_EN, 1); 625 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 626 efx_rx_queue_index(rx_queue)); 627 } 628 629 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 630 { 631 struct efx_nic *efx = rx_queue->efx; 632 efx_oword_t rx_flush_descq; 633 634 EFX_POPULATE_OWORD_2(rx_flush_descq, 635 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 636 FRF_AZ_RX_FLUSH_DESCQ, 637 efx_rx_queue_index(rx_queue)); 638 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 639 } 640 641 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 642 { 643 efx_oword_t rx_desc_ptr; 644 struct efx_nic *efx = rx_queue->efx; 645 646 /* Remove RX descriptor ring from card */ 647 EFX_ZERO_OWORD(rx_desc_ptr); 648 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 649 efx_rx_queue_index(rx_queue)); 650 651 /* Unpin RX descriptor ring */ 652 efx_fini_special_buffer(efx, &rx_queue->rxd); 653 } 654 655 /* Free buffers backing RX queue */ 656 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 657 { 658 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 659 } 660 661 /************************************************************************** 662 * 663 * Flush handling 664 * 665 **************************************************************************/ 666 667 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 668 * or more RX flushes can be kicked off. 669 */ 670 static bool efx_flush_wake(struct efx_nic *efx) 671 { 672 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 673 smp_mb(); 674 675 return (atomic_read(&efx->drain_pending) == 0 || 676 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 677 && atomic_read(&efx->rxq_flush_pending) > 0)); 678 } 679 680 static bool efx_check_tx_flush_complete(struct efx_nic *efx) 681 { 682 bool i = true; 683 efx_oword_t txd_ptr_tbl; 684 struct efx_channel *channel; 685 struct efx_tx_queue *tx_queue; 686 687 efx_for_each_channel(channel, efx) { 688 efx_for_each_channel_tx_queue(tx_queue, channel) { 689 efx_reado_table(efx, &txd_ptr_tbl, 690 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 691 if (EFX_OWORD_FIELD(txd_ptr_tbl, 692 FRF_AZ_TX_DESCQ_FLUSH) || 693 EFX_OWORD_FIELD(txd_ptr_tbl, 694 FRF_AZ_TX_DESCQ_EN)) { 695 netif_dbg(efx, hw, efx->net_dev, 696 "flush did not complete on TXQ %d\n", 697 tx_queue->queue); 698 i = false; 699 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 700 1, 0)) { 701 /* The flush is complete, but we didn't 702 * receive a flush completion event 703 */ 704 netif_dbg(efx, hw, efx->net_dev, 705 "flush complete on TXQ %d, so drain " 706 "the queue\n", tx_queue->queue); 707 /* Don't need to increment drain_pending as it 708 * has already been incremented for the queues 709 * which did not drain 710 */ 711 efx_magic_event(channel, 712 EFX_CHANNEL_MAGIC_TX_DRAIN( 713 tx_queue)); 714 } 715 } 716 } 717 718 return i; 719 } 720 721 /* Flush all the transmit queues, and continue flushing receive queues until 722 * they're all flushed. Wait for the DRAIN events to be recieved so that there 723 * are no more RX and TX events left on any channel. */ 724 int efx_nic_flush_queues(struct efx_nic *efx) 725 { 726 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 727 struct efx_channel *channel; 728 struct efx_rx_queue *rx_queue; 729 struct efx_tx_queue *tx_queue; 730 int rc = 0; 731 732 efx->type->prepare_flush(efx); 733 734 efx_for_each_channel(channel, efx) { 735 efx_for_each_channel_tx_queue(tx_queue, channel) { 736 atomic_inc(&efx->drain_pending); 737 efx_flush_tx_queue(tx_queue); 738 } 739 efx_for_each_channel_rx_queue(rx_queue, channel) { 740 atomic_inc(&efx->drain_pending); 741 rx_queue->flush_pending = true; 742 atomic_inc(&efx->rxq_flush_pending); 743 } 744 } 745 746 while (timeout && atomic_read(&efx->drain_pending) > 0) { 747 /* If SRIOV is enabled, then offload receive queue flushing to 748 * the firmware (though we will still have to poll for 749 * completion). If that fails, fall back to the old scheme. 750 */ 751 if (efx_sriov_enabled(efx)) { 752 rc = efx_mcdi_flush_rxqs(efx); 753 if (!rc) 754 goto wait; 755 } 756 757 /* The hardware supports four concurrent rx flushes, each of 758 * which may need to be retried if there is an outstanding 759 * descriptor fetch 760 */ 761 efx_for_each_channel(channel, efx) { 762 efx_for_each_channel_rx_queue(rx_queue, channel) { 763 if (atomic_read(&efx->rxq_flush_outstanding) >= 764 EFX_RX_FLUSH_COUNT) 765 break; 766 767 if (rx_queue->flush_pending) { 768 rx_queue->flush_pending = false; 769 atomic_dec(&efx->rxq_flush_pending); 770 atomic_inc(&efx->rxq_flush_outstanding); 771 efx_flush_rx_queue(rx_queue); 772 } 773 } 774 } 775 776 wait: 777 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 778 timeout); 779 } 780 781 if (atomic_read(&efx->drain_pending) && 782 !efx_check_tx_flush_complete(efx)) { 783 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 784 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 785 atomic_read(&efx->rxq_flush_outstanding), 786 atomic_read(&efx->rxq_flush_pending)); 787 rc = -ETIMEDOUT; 788 789 atomic_set(&efx->drain_pending, 0); 790 atomic_set(&efx->rxq_flush_pending, 0); 791 atomic_set(&efx->rxq_flush_outstanding, 0); 792 } 793 794 efx->type->finish_flush(efx); 795 796 return rc; 797 } 798 799 /************************************************************************** 800 * 801 * Event queue processing 802 * Event queues are processed by per-channel tasklets. 803 * 804 **************************************************************************/ 805 806 /* Update a channel's event queue's read pointer (RPTR) register 807 * 808 * This writes the EVQ_RPTR_REG register for the specified channel's 809 * event queue. 810 */ 811 void efx_nic_eventq_read_ack(struct efx_channel *channel) 812 { 813 efx_dword_t reg; 814 struct efx_nic *efx = channel->efx; 815 816 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 817 channel->eventq_read_ptr & channel->eventq_mask); 818 819 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 820 * of 4 bytes, but it is really 16 bytes just like later revisions. 821 */ 822 efx_writed(efx, ®, 823 efx->type->evq_rptr_tbl_base + 824 FR_BZ_EVQ_RPTR_STEP * channel->channel); 825 } 826 827 /* Use HW to insert a SW defined event */ 828 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 829 efx_qword_t *event) 830 { 831 efx_oword_t drv_ev_reg; 832 833 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 834 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 835 drv_ev_reg.u32[0] = event->u32[0]; 836 drv_ev_reg.u32[1] = event->u32[1]; 837 drv_ev_reg.u32[2] = 0; 838 drv_ev_reg.u32[3] = 0; 839 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 840 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 841 } 842 843 static void efx_magic_event(struct efx_channel *channel, u32 magic) 844 { 845 efx_qword_t event; 846 847 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 848 FSE_AZ_EV_CODE_DRV_GEN_EV, 849 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 850 efx_generate_event(channel->efx, channel->channel, &event); 851 } 852 853 /* Handle a transmit completion event 854 * 855 * The NIC batches TX completion events; the message we receive is of 856 * the form "complete all TX events up to this index". 857 */ 858 static int 859 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 860 { 861 unsigned int tx_ev_desc_ptr; 862 unsigned int tx_ev_q_label; 863 struct efx_tx_queue *tx_queue; 864 struct efx_nic *efx = channel->efx; 865 int tx_packets = 0; 866 867 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 868 return 0; 869 870 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 871 /* Transmit completion */ 872 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 873 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 874 tx_queue = efx_channel_get_tx_queue( 875 channel, tx_ev_q_label % EFX_TXQ_TYPES); 876 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 877 tx_queue->ptr_mask); 878 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 879 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 880 /* Rewrite the FIFO write pointer */ 881 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 882 tx_queue = efx_channel_get_tx_queue( 883 channel, tx_ev_q_label % EFX_TXQ_TYPES); 884 885 netif_tx_lock(efx->net_dev); 886 efx_notify_tx_desc(tx_queue); 887 netif_tx_unlock(efx->net_dev); 888 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 889 EFX_WORKAROUND_10727(efx)) { 890 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 891 } else { 892 netif_err(efx, tx_err, efx->net_dev, 893 "channel %d unexpected TX event " 894 EFX_QWORD_FMT"\n", channel->channel, 895 EFX_QWORD_VAL(*event)); 896 } 897 898 return tx_packets; 899 } 900 901 /* Detect errors included in the rx_evt_pkt_ok bit. */ 902 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 903 const efx_qword_t *event) 904 { 905 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 906 struct efx_nic *efx = rx_queue->efx; 907 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 908 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 909 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 910 bool rx_ev_other_err, rx_ev_pause_frm; 911 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 912 unsigned rx_ev_pkt_type; 913 914 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 915 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 916 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 917 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 918 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 919 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 920 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 921 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 922 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 923 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 924 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 925 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 926 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 927 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 928 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 929 930 /* Every error apart from tobe_disc and pause_frm */ 931 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 932 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 933 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 934 935 /* Count errors that are not in MAC stats. Ignore expected 936 * checksum errors during self-test. */ 937 if (rx_ev_frm_trunc) 938 ++channel->n_rx_frm_trunc; 939 else if (rx_ev_tobe_disc) 940 ++channel->n_rx_tobe_disc; 941 else if (!efx->loopback_selftest) { 942 if (rx_ev_ip_hdr_chksum_err) 943 ++channel->n_rx_ip_hdr_chksum_err; 944 else if (rx_ev_tcp_udp_chksum_err) 945 ++channel->n_rx_tcp_udp_chksum_err; 946 } 947 948 /* TOBE_DISC is expected on unicast mismatches; don't print out an 949 * error message. FRM_TRUNC indicates RXDP dropped the packet due 950 * to a FIFO overflow. 951 */ 952 #ifdef DEBUG 953 if (rx_ev_other_err && net_ratelimit()) { 954 netif_dbg(efx, rx_err, efx->net_dev, 955 " RX queue %d unexpected RX event " 956 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 957 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 958 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 959 rx_ev_ip_hdr_chksum_err ? 960 " [IP_HDR_CHKSUM_ERR]" : "", 961 rx_ev_tcp_udp_chksum_err ? 962 " [TCP_UDP_CHKSUM_ERR]" : "", 963 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 964 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 965 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 966 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 967 rx_ev_pause_frm ? " [PAUSE]" : ""); 968 } 969 #endif 970 971 /* The frame must be discarded if any of these are true. */ 972 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 973 rx_ev_tobe_disc | rx_ev_pause_frm) ? 974 EFX_RX_PKT_DISCARD : 0; 975 } 976 977 /* Handle receive events that are not in-order. Return true if this 978 * can be handled as a partial packet discard, false if it's more 979 * serious. 980 */ 981 static bool 982 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 983 { 984 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 985 struct efx_nic *efx = rx_queue->efx; 986 unsigned expected, dropped; 987 988 if (rx_queue->scatter_n && 989 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 990 rx_queue->ptr_mask)) { 991 ++channel->n_rx_nodesc_trunc; 992 return true; 993 } 994 995 expected = rx_queue->removed_count & rx_queue->ptr_mask; 996 dropped = (index - expected) & rx_queue->ptr_mask; 997 netif_info(efx, rx_err, efx->net_dev, 998 "dropped %d events (index=%d expected=%d)\n", 999 dropped, index, expected); 1000 1001 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1002 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1003 return false; 1004 } 1005 1006 /* Handle a packet received event 1007 * 1008 * The NIC gives a "discard" flag if it's a unicast packet with the 1009 * wrong destination address 1010 * Also "is multicast" and "matches multicast filter" flags can be used to 1011 * discard non-matching multicast packets. 1012 */ 1013 static void 1014 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 1015 { 1016 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1017 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1018 unsigned expected_ptr; 1019 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 1020 u16 flags; 1021 struct efx_rx_queue *rx_queue; 1022 struct efx_nic *efx = channel->efx; 1023 1024 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1025 return; 1026 1027 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 1028 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 1029 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1030 channel->channel); 1031 1032 rx_queue = efx_channel_get_rx_queue(channel); 1033 1034 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1035 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 1036 rx_queue->ptr_mask); 1037 1038 /* Check for partial drops and other errors */ 1039 if (unlikely(rx_ev_desc_ptr != expected_ptr) || 1040 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 1041 if (rx_ev_desc_ptr != expected_ptr && 1042 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 1043 return; 1044 1045 /* Discard all pending fragments */ 1046 if (rx_queue->scatter_n) { 1047 efx_rx_packet( 1048 rx_queue, 1049 rx_queue->removed_count & rx_queue->ptr_mask, 1050 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 1051 rx_queue->removed_count += rx_queue->scatter_n; 1052 rx_queue->scatter_n = 0; 1053 } 1054 1055 /* Return if there is no new fragment */ 1056 if (rx_ev_desc_ptr != expected_ptr) 1057 return; 1058 1059 /* Discard new fragment if not SOP */ 1060 if (!rx_ev_sop) { 1061 efx_rx_packet( 1062 rx_queue, 1063 rx_queue->removed_count & rx_queue->ptr_mask, 1064 1, 0, EFX_RX_PKT_DISCARD); 1065 ++rx_queue->removed_count; 1066 return; 1067 } 1068 } 1069 1070 ++rx_queue->scatter_n; 1071 if (rx_ev_cont) 1072 return; 1073 1074 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1075 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1076 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1077 1078 if (likely(rx_ev_pkt_ok)) { 1079 /* If packet is marked as OK then we can rely on the 1080 * hardware checksum and classification. 1081 */ 1082 flags = 0; 1083 switch (rx_ev_hdr_type) { 1084 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 1085 flags |= EFX_RX_PKT_TCP; 1086 /* fall through */ 1087 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 1088 flags |= EFX_RX_PKT_CSUMMED; 1089 /* fall through */ 1090 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 1091 case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 1092 break; 1093 } 1094 } else { 1095 flags = efx_handle_rx_not_ok(rx_queue, event); 1096 } 1097 1098 /* Detect multicast packets that didn't match the filter */ 1099 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1100 if (rx_ev_mcast_pkt) { 1101 unsigned int rx_ev_mcast_hash_match = 1102 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1103 1104 if (unlikely(!rx_ev_mcast_hash_match)) { 1105 ++channel->n_rx_mcast_mismatch; 1106 flags |= EFX_RX_PKT_DISCARD; 1107 } 1108 } 1109 1110 channel->irq_mod_score += 2; 1111 1112 /* Handle received packet */ 1113 efx_rx_packet(rx_queue, 1114 rx_queue->removed_count & rx_queue->ptr_mask, 1115 rx_queue->scatter_n, rx_ev_byte_cnt, flags); 1116 rx_queue->removed_count += rx_queue->scatter_n; 1117 rx_queue->scatter_n = 0; 1118 } 1119 1120 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1121 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1122 * of all transmit completions. 1123 */ 1124 static void 1125 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1126 { 1127 struct efx_tx_queue *tx_queue; 1128 int qid; 1129 1130 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1131 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1132 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1133 qid % EFX_TXQ_TYPES); 1134 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1135 efx_magic_event(tx_queue->channel, 1136 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1137 } 1138 } 1139 } 1140 1141 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1142 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1143 * the RX queue back to the mask of RX queues in need of flushing. 1144 */ 1145 static void 1146 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1147 { 1148 struct efx_channel *channel; 1149 struct efx_rx_queue *rx_queue; 1150 int qid; 1151 bool failed; 1152 1153 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1154 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1155 if (qid >= efx->n_channels) 1156 return; 1157 channel = efx_get_channel(efx, qid); 1158 if (!efx_channel_has_rx_queue(channel)) 1159 return; 1160 rx_queue = efx_channel_get_rx_queue(channel); 1161 1162 if (failed) { 1163 netif_info(efx, hw, efx->net_dev, 1164 "RXQ %d flush retry\n", qid); 1165 rx_queue->flush_pending = true; 1166 atomic_inc(&efx->rxq_flush_pending); 1167 } else { 1168 efx_magic_event(efx_rx_queue_channel(rx_queue), 1169 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1170 } 1171 atomic_dec(&efx->rxq_flush_outstanding); 1172 if (efx_flush_wake(efx)) 1173 wake_up(&efx->flush_wq); 1174 } 1175 1176 static void 1177 efx_handle_drain_event(struct efx_channel *channel) 1178 { 1179 struct efx_nic *efx = channel->efx; 1180 1181 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1182 atomic_dec(&efx->drain_pending); 1183 if (efx_flush_wake(efx)) 1184 wake_up(&efx->flush_wq); 1185 } 1186 1187 static void 1188 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1189 { 1190 struct efx_nic *efx = channel->efx; 1191 struct efx_rx_queue *rx_queue = 1192 efx_channel_has_rx_queue(channel) ? 1193 efx_channel_get_rx_queue(channel) : NULL; 1194 unsigned magic, code; 1195 1196 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1197 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1198 1199 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1200 channel->event_test_cpu = raw_smp_processor_id(); 1201 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1202 /* The queue must be empty, so we won't receive any rx 1203 * events, so efx_process_channel() won't refill the 1204 * queue. Refill it here */ 1205 efx_fast_push_rx_descriptors(rx_queue); 1206 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1207 rx_queue->enabled = false; 1208 efx_handle_drain_event(channel); 1209 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1210 efx_handle_drain_event(channel); 1211 } else { 1212 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1213 "generated event "EFX_QWORD_FMT"\n", 1214 channel->channel, EFX_QWORD_VAL(*event)); 1215 } 1216 } 1217 1218 static void 1219 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1220 { 1221 struct efx_nic *efx = channel->efx; 1222 unsigned int ev_sub_code; 1223 unsigned int ev_sub_data; 1224 1225 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1226 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1227 1228 switch (ev_sub_code) { 1229 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1230 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1231 channel->channel, ev_sub_data); 1232 efx_handle_tx_flush_done(efx, event); 1233 efx_sriov_tx_flush_done(efx, event); 1234 break; 1235 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1236 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1237 channel->channel, ev_sub_data); 1238 efx_handle_rx_flush_done(efx, event); 1239 efx_sriov_rx_flush_done(efx, event); 1240 break; 1241 case FSE_AZ_EVQ_INIT_DONE_EV: 1242 netif_dbg(efx, hw, efx->net_dev, 1243 "channel %d EVQ %d initialised\n", 1244 channel->channel, ev_sub_data); 1245 break; 1246 case FSE_AZ_SRM_UPD_DONE_EV: 1247 netif_vdbg(efx, hw, efx->net_dev, 1248 "channel %d SRAM update done\n", channel->channel); 1249 break; 1250 case FSE_AZ_WAKE_UP_EV: 1251 netif_vdbg(efx, hw, efx->net_dev, 1252 "channel %d RXQ %d wakeup event\n", 1253 channel->channel, ev_sub_data); 1254 break; 1255 case FSE_AZ_TIMER_EV: 1256 netif_vdbg(efx, hw, efx->net_dev, 1257 "channel %d RX queue %d timer expired\n", 1258 channel->channel, ev_sub_data); 1259 break; 1260 case FSE_AA_RX_RECOVER_EV: 1261 netif_err(efx, rx_err, efx->net_dev, 1262 "channel %d seen DRIVER RX_RESET event. " 1263 "Resetting.\n", channel->channel); 1264 atomic_inc(&efx->rx_reset); 1265 efx_schedule_reset(efx, 1266 EFX_WORKAROUND_6555(efx) ? 1267 RESET_TYPE_RX_RECOVERY : 1268 RESET_TYPE_DISABLE); 1269 break; 1270 case FSE_BZ_RX_DSC_ERROR_EV: 1271 if (ev_sub_data < EFX_VI_BASE) { 1272 netif_err(efx, rx_err, efx->net_dev, 1273 "RX DMA Q %d reports descriptor fetch error." 1274 " RX Q %d is disabled.\n", ev_sub_data, 1275 ev_sub_data); 1276 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1277 } else 1278 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1279 break; 1280 case FSE_BZ_TX_DSC_ERROR_EV: 1281 if (ev_sub_data < EFX_VI_BASE) { 1282 netif_err(efx, tx_err, efx->net_dev, 1283 "TX DMA Q %d reports descriptor fetch error." 1284 " TX Q %d is disabled.\n", ev_sub_data, 1285 ev_sub_data); 1286 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1287 } else 1288 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1289 break; 1290 default: 1291 netif_vdbg(efx, hw, efx->net_dev, 1292 "channel %d unknown driver event code %d " 1293 "data %04x\n", channel->channel, ev_sub_code, 1294 ev_sub_data); 1295 break; 1296 } 1297 } 1298 1299 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1300 { 1301 struct efx_nic *efx = channel->efx; 1302 unsigned int read_ptr; 1303 efx_qword_t event, *p_event; 1304 int ev_code; 1305 int tx_packets = 0; 1306 int spent = 0; 1307 1308 read_ptr = channel->eventq_read_ptr; 1309 1310 for (;;) { 1311 p_event = efx_event(channel, read_ptr); 1312 event = *p_event; 1313 1314 if (!efx_event_present(&event)) 1315 /* End of events */ 1316 break; 1317 1318 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1319 "channel %d event is "EFX_QWORD_FMT"\n", 1320 channel->channel, EFX_QWORD_VAL(event)); 1321 1322 /* Clear this event by marking it all ones */ 1323 EFX_SET_QWORD(*p_event); 1324 1325 ++read_ptr; 1326 1327 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1328 1329 switch (ev_code) { 1330 case FSE_AZ_EV_CODE_RX_EV: 1331 efx_handle_rx_event(channel, &event); 1332 if (++spent == budget) 1333 goto out; 1334 break; 1335 case FSE_AZ_EV_CODE_TX_EV: 1336 tx_packets += efx_handle_tx_event(channel, &event); 1337 if (tx_packets > efx->txq_entries) { 1338 spent = budget; 1339 goto out; 1340 } 1341 break; 1342 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1343 efx_handle_generated_event(channel, &event); 1344 break; 1345 case FSE_AZ_EV_CODE_DRIVER_EV: 1346 efx_handle_driver_event(channel, &event); 1347 break; 1348 case FSE_CZ_EV_CODE_USER_EV: 1349 efx_sriov_event(channel, &event); 1350 break; 1351 case FSE_CZ_EV_CODE_MCDI_EV: 1352 efx_mcdi_process_event(channel, &event); 1353 break; 1354 case FSE_AZ_EV_CODE_GLOBAL_EV: 1355 if (efx->type->handle_global_event && 1356 efx->type->handle_global_event(channel, &event)) 1357 break; 1358 /* else fall through */ 1359 default: 1360 netif_err(channel->efx, hw, channel->efx->net_dev, 1361 "channel %d unknown event type %d (data " 1362 EFX_QWORD_FMT ")\n", channel->channel, 1363 ev_code, EFX_QWORD_VAL(event)); 1364 } 1365 } 1366 1367 out: 1368 channel->eventq_read_ptr = read_ptr; 1369 return spent; 1370 } 1371 1372 /* Check whether an event is present in the eventq at the current 1373 * read pointer. Only useful for self-test. 1374 */ 1375 bool efx_nic_event_present(struct efx_channel *channel) 1376 { 1377 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1378 } 1379 1380 /* Allocate buffer table entries for event queue */ 1381 int efx_nic_probe_eventq(struct efx_channel *channel) 1382 { 1383 struct efx_nic *efx = channel->efx; 1384 unsigned entries; 1385 1386 entries = channel->eventq_mask + 1; 1387 return efx_alloc_special_buffer(efx, &channel->eventq, 1388 entries * sizeof(efx_qword_t)); 1389 } 1390 1391 void efx_nic_init_eventq(struct efx_channel *channel) 1392 { 1393 efx_oword_t reg; 1394 struct efx_nic *efx = channel->efx; 1395 1396 netif_dbg(efx, hw, efx->net_dev, 1397 "channel %d event queue in special buffers %d-%d\n", 1398 channel->channel, channel->eventq.index, 1399 channel->eventq.index + channel->eventq.entries - 1); 1400 1401 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1402 EFX_POPULATE_OWORD_3(reg, 1403 FRF_CZ_TIMER_Q_EN, 1, 1404 FRF_CZ_HOST_NOTIFY_MODE, 0, 1405 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1406 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1407 } 1408 1409 /* Pin event queue buffer */ 1410 efx_init_special_buffer(efx, &channel->eventq); 1411 1412 /* Fill event queue with all ones (i.e. empty events) */ 1413 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 1414 1415 /* Push event queue to card */ 1416 EFX_POPULATE_OWORD_3(reg, 1417 FRF_AZ_EVQ_EN, 1, 1418 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1419 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1420 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1421 channel->channel); 1422 1423 efx->type->push_irq_moderation(channel); 1424 } 1425 1426 void efx_nic_fini_eventq(struct efx_channel *channel) 1427 { 1428 efx_oword_t reg; 1429 struct efx_nic *efx = channel->efx; 1430 1431 /* Remove event queue from card */ 1432 EFX_ZERO_OWORD(reg); 1433 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1434 channel->channel); 1435 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1436 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1437 1438 /* Unpin event queue */ 1439 efx_fini_special_buffer(efx, &channel->eventq); 1440 } 1441 1442 /* Free buffers backing event queue */ 1443 void efx_nic_remove_eventq(struct efx_channel *channel) 1444 { 1445 efx_free_special_buffer(channel->efx, &channel->eventq); 1446 } 1447 1448 1449 void efx_nic_event_test_start(struct efx_channel *channel) 1450 { 1451 channel->event_test_cpu = -1; 1452 smp_wmb(); 1453 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1454 } 1455 1456 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1457 { 1458 efx_magic_event(efx_rx_queue_channel(rx_queue), 1459 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1460 } 1461 1462 /************************************************************************** 1463 * 1464 * Hardware interrupts 1465 * The hardware interrupt handler does very little work; all the event 1466 * queue processing is carried out by per-channel tasklets. 1467 * 1468 **************************************************************************/ 1469 1470 /* Enable/disable/generate interrupts */ 1471 static inline void efx_nic_interrupts(struct efx_nic *efx, 1472 bool enabled, bool force) 1473 { 1474 efx_oword_t int_en_reg_ker; 1475 1476 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1477 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1478 FRF_AZ_KER_INT_KER, force, 1479 FRF_AZ_DRV_INT_EN_KER, enabled); 1480 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1481 } 1482 1483 void efx_nic_enable_interrupts(struct efx_nic *efx) 1484 { 1485 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1486 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1487 1488 efx_nic_interrupts(efx, true, false); 1489 } 1490 1491 void efx_nic_disable_interrupts(struct efx_nic *efx) 1492 { 1493 /* Disable interrupts */ 1494 efx_nic_interrupts(efx, false, false); 1495 } 1496 1497 /* Generate a test interrupt 1498 * Interrupt must already have been enabled, otherwise nasty things 1499 * may happen. 1500 */ 1501 void efx_nic_irq_test_start(struct efx_nic *efx) 1502 { 1503 efx->last_irq_cpu = -1; 1504 smp_wmb(); 1505 efx_nic_interrupts(efx, true, true); 1506 } 1507 1508 /* Process a fatal interrupt 1509 * Disable bus mastering ASAP and schedule a reset 1510 */ 1511 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1512 { 1513 struct falcon_nic_data *nic_data = efx->nic_data; 1514 efx_oword_t *int_ker = efx->irq_status.addr; 1515 efx_oword_t fatal_intr; 1516 int error, mem_perr; 1517 1518 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1519 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1520 1521 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1522 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1523 EFX_OWORD_VAL(fatal_intr), 1524 error ? "disabling bus mastering" : "no recognised error"); 1525 1526 /* If this is a memory parity error dump which blocks are offending */ 1527 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1528 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1529 if (mem_perr) { 1530 efx_oword_t reg; 1531 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1532 netif_err(efx, hw, efx->net_dev, 1533 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1534 EFX_OWORD_VAL(reg)); 1535 } 1536 1537 /* Disable both devices */ 1538 pci_clear_master(efx->pci_dev); 1539 if (efx_nic_is_dual_func(efx)) 1540 pci_clear_master(nic_data->pci_dev2); 1541 efx_nic_disable_interrupts(efx); 1542 1543 /* Count errors and reset or disable the NIC accordingly */ 1544 if (efx->int_error_count == 0 || 1545 time_after(jiffies, efx->int_error_expire)) { 1546 efx->int_error_count = 0; 1547 efx->int_error_expire = 1548 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1549 } 1550 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1551 netif_err(efx, hw, efx->net_dev, 1552 "SYSTEM ERROR - reset scheduled\n"); 1553 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1554 } else { 1555 netif_err(efx, hw, efx->net_dev, 1556 "SYSTEM ERROR - max number of errors seen." 1557 "NIC will be disabled\n"); 1558 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1559 } 1560 1561 return IRQ_HANDLED; 1562 } 1563 1564 /* Handle a legacy interrupt 1565 * Acknowledges the interrupt and schedule event queue processing. 1566 */ 1567 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1568 { 1569 struct efx_nic *efx = dev_id; 1570 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); 1571 efx_oword_t *int_ker = efx->irq_status.addr; 1572 irqreturn_t result = IRQ_NONE; 1573 struct efx_channel *channel; 1574 efx_dword_t reg; 1575 u32 queues; 1576 int syserr; 1577 1578 /* Read the ISR which also ACKs the interrupts */ 1579 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1580 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1581 1582 /* Legacy interrupts are disabled too late by the EEH kernel 1583 * code. Disable them earlier. 1584 * If an EEH error occurred, the read will have returned all ones. 1585 */ 1586 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && 1587 !efx->eeh_disabled_legacy_irq) { 1588 disable_irq_nosync(efx->legacy_irq); 1589 efx->eeh_disabled_legacy_irq = true; 1590 } 1591 1592 /* Handle non-event-queue sources */ 1593 if (queues & (1U << efx->irq_level) && soft_enabled) { 1594 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1595 if (unlikely(syserr)) 1596 return efx_nic_fatal_interrupt(efx); 1597 efx->last_irq_cpu = raw_smp_processor_id(); 1598 } 1599 1600 if (queues != 0) { 1601 if (EFX_WORKAROUND_15783(efx)) 1602 efx->irq_zero_count = 0; 1603 1604 /* Schedule processing of any interrupting queues */ 1605 if (likely(soft_enabled)) { 1606 efx_for_each_channel(channel, efx) { 1607 if (queues & 1) 1608 efx_schedule_channel_irq(channel); 1609 queues >>= 1; 1610 } 1611 } 1612 result = IRQ_HANDLED; 1613 1614 } else if (EFX_WORKAROUND_15783(efx)) { 1615 efx_qword_t *event; 1616 1617 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1618 * because this might be a shared interrupt. */ 1619 if (efx->irq_zero_count++ == 0) 1620 result = IRQ_HANDLED; 1621 1622 /* Ensure we schedule or rearm all event queues */ 1623 if (likely(soft_enabled)) { 1624 efx_for_each_channel(channel, efx) { 1625 event = efx_event(channel, 1626 channel->eventq_read_ptr); 1627 if (efx_event_present(event)) 1628 efx_schedule_channel_irq(channel); 1629 else 1630 efx_nic_eventq_read_ack(channel); 1631 } 1632 } 1633 } 1634 1635 if (result == IRQ_HANDLED) 1636 netif_vdbg(efx, intr, efx->net_dev, 1637 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1638 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1639 1640 return result; 1641 } 1642 1643 /* Handle an MSI interrupt 1644 * 1645 * Handle an MSI hardware interrupt. This routine schedules event 1646 * queue processing. No interrupt acknowledgement cycle is necessary. 1647 * Also, we never need to check that the interrupt is for us, since 1648 * MSI interrupts cannot be shared. 1649 */ 1650 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1651 { 1652 struct efx_msi_context *context = dev_id; 1653 struct efx_nic *efx = context->efx; 1654 efx_oword_t *int_ker = efx->irq_status.addr; 1655 int syserr; 1656 1657 netif_vdbg(efx, intr, efx->net_dev, 1658 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1659 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1660 1661 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) 1662 return IRQ_HANDLED; 1663 1664 /* Handle non-event-queue sources */ 1665 if (context->index == efx->irq_level) { 1666 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1667 if (unlikely(syserr)) 1668 return efx_nic_fatal_interrupt(efx); 1669 efx->last_irq_cpu = raw_smp_processor_id(); 1670 } 1671 1672 /* Schedule processing of the channel */ 1673 efx_schedule_channel_irq(efx->channel[context->index]); 1674 1675 return IRQ_HANDLED; 1676 } 1677 1678 1679 /* Setup RSS indirection table. 1680 * This maps from the hash value of the packet to RXQ 1681 */ 1682 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1683 { 1684 size_t i = 0; 1685 efx_dword_t dword; 1686 1687 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1688 return; 1689 1690 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1691 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1692 1693 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1694 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1695 efx->rx_indir_table[i]); 1696 efx_writed(efx, &dword, 1697 FR_BZ_RX_INDIRECTION_TBL + 1698 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1699 } 1700 } 1701 1702 /* Hook interrupt handler(s) 1703 * Try MSI and then legacy interrupts. 1704 */ 1705 int efx_nic_init_interrupt(struct efx_nic *efx) 1706 { 1707 struct efx_channel *channel; 1708 unsigned int n_irqs; 1709 int rc; 1710 1711 if (!EFX_INT_MODE_USE_MSI(efx)) { 1712 irq_handler_t handler; 1713 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1714 handler = efx_legacy_interrupt; 1715 else 1716 handler = falcon_legacy_interrupt_a1; 1717 1718 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1719 efx->name, efx); 1720 if (rc) { 1721 netif_err(efx, drv, efx->net_dev, 1722 "failed to hook legacy IRQ %d\n", 1723 efx->pci_dev->irq); 1724 goto fail1; 1725 } 1726 return 0; 1727 } 1728 1729 #ifdef CONFIG_RFS_ACCEL 1730 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1731 efx->net_dev->rx_cpu_rmap = 1732 alloc_irq_cpu_rmap(efx->n_rx_channels); 1733 if (!efx->net_dev->rx_cpu_rmap) { 1734 rc = -ENOMEM; 1735 goto fail1; 1736 } 1737 } 1738 #endif 1739 1740 /* Hook MSI or MSI-X interrupt */ 1741 n_irqs = 0; 1742 efx_for_each_channel(channel, efx) { 1743 rc = request_irq(channel->irq, efx_msi_interrupt, 1744 IRQF_PROBE_SHARED, /* Not shared */ 1745 efx->msi_context[channel->channel].name, 1746 &efx->msi_context[channel->channel]); 1747 if (rc) { 1748 netif_err(efx, drv, efx->net_dev, 1749 "failed to hook IRQ %d\n", channel->irq); 1750 goto fail2; 1751 } 1752 ++n_irqs; 1753 1754 #ifdef CONFIG_RFS_ACCEL 1755 if (efx->interrupt_mode == EFX_INT_MODE_MSIX && 1756 channel->channel < efx->n_rx_channels) { 1757 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, 1758 channel->irq); 1759 if (rc) 1760 goto fail2; 1761 } 1762 #endif 1763 } 1764 1765 return 0; 1766 1767 fail2: 1768 #ifdef CONFIG_RFS_ACCEL 1769 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 1770 efx->net_dev->rx_cpu_rmap = NULL; 1771 #endif 1772 efx_for_each_channel(channel, efx) { 1773 if (n_irqs-- == 0) 1774 break; 1775 free_irq(channel->irq, &efx->msi_context[channel->channel]); 1776 } 1777 fail1: 1778 return rc; 1779 } 1780 1781 void efx_nic_fini_interrupt(struct efx_nic *efx) 1782 { 1783 struct efx_channel *channel; 1784 1785 #ifdef CONFIG_RFS_ACCEL 1786 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 1787 efx->net_dev->rx_cpu_rmap = NULL; 1788 #endif 1789 1790 /* Disable MSI/MSI-X interrupts */ 1791 efx_for_each_channel(channel, efx) 1792 free_irq(channel->irq, &efx->msi_context[channel->channel]); 1793 1794 /* Disable legacy interrupt */ 1795 if (efx->legacy_irq) 1796 free_irq(efx->legacy_irq, efx); 1797 } 1798 1799 /* Looks at available SRAM resources and works out how many queues we 1800 * can support, and where things like descriptor caches should live. 1801 * 1802 * SRAM is split up as follows: 1803 * 0 buftbl entries for channels 1804 * efx->vf_buftbl_base buftbl entries for SR-IOV 1805 * efx->rx_dc_base RX descriptor caches 1806 * efx->tx_dc_base TX descriptor caches 1807 */ 1808 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1809 { 1810 unsigned vi_count, buftbl_min; 1811 1812 /* Account for the buffer table entries backing the datapath channels 1813 * and the descriptor caches for those channels. 1814 */ 1815 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1816 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1817 efx->n_channels * EFX_MAX_EVQ_SIZE) 1818 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1819 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1820 1821 #ifdef CONFIG_SFC_SRIOV 1822 if (efx_sriov_wanted(efx)) { 1823 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1824 1825 efx->vf_buftbl_base = buftbl_min; 1826 1827 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1828 vi_count = max(vi_count, EFX_VI_BASE); 1829 buftbl_free = (sram_lim_qw - buftbl_min - 1830 vi_count * vi_dc_entries); 1831 1832 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1833 efx_vf_size(efx)); 1834 vf_limit = min(buftbl_free / entries_per_vf, 1835 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1836 1837 if (efx->vf_count > vf_limit) { 1838 netif_err(efx, probe, efx->net_dev, 1839 "Reducing VF count from from %d to %d\n", 1840 efx->vf_count, vf_limit); 1841 efx->vf_count = vf_limit; 1842 } 1843 vi_count += efx->vf_count * efx_vf_size(efx); 1844 } 1845 #endif 1846 1847 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1848 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1849 } 1850 1851 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1852 { 1853 efx_oword_t altera_build; 1854 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1855 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1856 } 1857 1858 void efx_nic_init_common(struct efx_nic *efx) 1859 { 1860 efx_oword_t temp; 1861 1862 /* Set positions of descriptor caches in SRAM. */ 1863 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1864 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1865 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1866 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1867 1868 /* Set TX descriptor cache size. */ 1869 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1870 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1871 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1872 1873 /* Set RX descriptor cache size. Set low watermark to size-8, as 1874 * this allows most efficient prefetching. 1875 */ 1876 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1877 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1878 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1879 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1880 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1881 1882 /* Program INT_KER address */ 1883 EFX_POPULATE_OWORD_2(temp, 1884 FRF_AZ_NORM_INT_VEC_DIS_KER, 1885 EFX_INT_MODE_USE_MSI(efx), 1886 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1887 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1888 1889 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1890 /* Use an interrupt level unused by event queues */ 1891 efx->irq_level = 0x1f; 1892 else 1893 /* Use a valid MSI-X vector */ 1894 efx->irq_level = 0; 1895 1896 /* Enable all the genuinely fatal interrupts. (They are still 1897 * masked by the overall interrupt mask, controlled by 1898 * falcon_interrupts()). 1899 * 1900 * Note: All other fatal interrupts are enabled 1901 */ 1902 EFX_POPULATE_OWORD_3(temp, 1903 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1904 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1905 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1906 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1907 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1908 EFX_INVERT_OWORD(temp); 1909 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1910 1911 efx_nic_push_rx_indir_table(efx); 1912 1913 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1914 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1915 */ 1916 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1917 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1918 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1919 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1920 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1921 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1922 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1923 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1924 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1925 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1926 /* Disable hardware watchdog which can misfire */ 1927 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1928 /* Squash TX of packets of 16 bytes or less */ 1929 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1930 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1931 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1932 1933 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1934 EFX_POPULATE_OWORD_4(temp, 1935 /* Default values */ 1936 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1937 FRF_BZ_TX_PACE_SB_AF, 0xb, 1938 FRF_BZ_TX_PACE_FB_BASE, 0, 1939 /* Allow large pace values in the 1940 * fast bin. */ 1941 FRF_BZ_TX_PACE_BIN_TH, 1942 FFE_BZ_TX_PACE_RESERVED); 1943 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1944 } 1945 } 1946 1947 /* Register dump */ 1948 1949 #define REGISTER_REVISION_A 1 1950 #define REGISTER_REVISION_B 2 1951 #define REGISTER_REVISION_C 3 1952 #define REGISTER_REVISION_Z 3 /* latest revision */ 1953 1954 struct efx_nic_reg { 1955 u32 offset:24; 1956 u32 min_revision:2, max_revision:2; 1957 }; 1958 1959 #define REGISTER(name, min_rev, max_rev) { \ 1960 FR_ ## min_rev ## max_rev ## _ ## name, \ 1961 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1962 } 1963 #define REGISTER_AA(name) REGISTER(name, A, A) 1964 #define REGISTER_AB(name) REGISTER(name, A, B) 1965 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1966 #define REGISTER_BB(name) REGISTER(name, B, B) 1967 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1968 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1969 1970 static const struct efx_nic_reg efx_nic_regs[] = { 1971 REGISTER_AZ(ADR_REGION), 1972 REGISTER_AZ(INT_EN_KER), 1973 REGISTER_BZ(INT_EN_CHAR), 1974 REGISTER_AZ(INT_ADR_KER), 1975 REGISTER_BZ(INT_ADR_CHAR), 1976 /* INT_ACK_KER is WO */ 1977 /* INT_ISR0 is RC */ 1978 REGISTER_AZ(HW_INIT), 1979 REGISTER_CZ(USR_EV_CFG), 1980 REGISTER_AB(EE_SPI_HCMD), 1981 REGISTER_AB(EE_SPI_HADR), 1982 REGISTER_AB(EE_SPI_HDATA), 1983 REGISTER_AB(EE_BASE_PAGE), 1984 REGISTER_AB(EE_VPD_CFG0), 1985 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1986 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1987 /* PCIE_CORE_INDIRECT is indirect */ 1988 REGISTER_AB(NIC_STAT), 1989 REGISTER_AB(GPIO_CTL), 1990 REGISTER_AB(GLB_CTL), 1991 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1992 REGISTER_BZ(DP_CTRL), 1993 REGISTER_AZ(MEM_STAT), 1994 REGISTER_AZ(CS_DEBUG), 1995 REGISTER_AZ(ALTERA_BUILD), 1996 REGISTER_AZ(CSR_SPARE), 1997 REGISTER_AB(PCIE_SD_CTL0123), 1998 REGISTER_AB(PCIE_SD_CTL45), 1999 REGISTER_AB(PCIE_PCS_CTL_STAT), 2000 /* DEBUG_DATA_OUT is not used */ 2001 /* DRV_EV is WO */ 2002 REGISTER_AZ(EVQ_CTL), 2003 REGISTER_AZ(EVQ_CNT1), 2004 REGISTER_AZ(EVQ_CNT2), 2005 REGISTER_AZ(BUF_TBL_CFG), 2006 REGISTER_AZ(SRM_RX_DC_CFG), 2007 REGISTER_AZ(SRM_TX_DC_CFG), 2008 REGISTER_AZ(SRM_CFG), 2009 /* BUF_TBL_UPD is WO */ 2010 REGISTER_AZ(SRM_UPD_EVQ), 2011 REGISTER_AZ(SRAM_PARITY), 2012 REGISTER_AZ(RX_CFG), 2013 REGISTER_BZ(RX_FILTER_CTL), 2014 /* RX_FLUSH_DESCQ is WO */ 2015 REGISTER_AZ(RX_DC_CFG), 2016 REGISTER_AZ(RX_DC_PF_WM), 2017 REGISTER_BZ(RX_RSS_TKEY), 2018 /* RX_NODESC_DROP is RC */ 2019 REGISTER_AA(RX_SELF_RST), 2020 /* RX_DEBUG, RX_PUSH_DROP are not used */ 2021 REGISTER_CZ(RX_RSS_IPV6_REG1), 2022 REGISTER_CZ(RX_RSS_IPV6_REG2), 2023 REGISTER_CZ(RX_RSS_IPV6_REG3), 2024 /* TX_FLUSH_DESCQ is WO */ 2025 REGISTER_AZ(TX_DC_CFG), 2026 REGISTER_AA(TX_CHKSM_CFG), 2027 REGISTER_AZ(TX_CFG), 2028 /* TX_PUSH_DROP is not used */ 2029 REGISTER_AZ(TX_RESERVED), 2030 REGISTER_BZ(TX_PACE), 2031 /* TX_PACE_DROP_QID is RC */ 2032 REGISTER_BB(TX_VLAN), 2033 REGISTER_BZ(TX_IPFIL_PORTEN), 2034 REGISTER_AB(MD_TXD), 2035 REGISTER_AB(MD_RXD), 2036 REGISTER_AB(MD_CS), 2037 REGISTER_AB(MD_PHY_ADR), 2038 REGISTER_AB(MD_ID), 2039 /* MD_STAT is RC */ 2040 REGISTER_AB(MAC_STAT_DMA), 2041 REGISTER_AB(MAC_CTRL), 2042 REGISTER_BB(GEN_MODE), 2043 REGISTER_AB(MAC_MC_HASH_REG0), 2044 REGISTER_AB(MAC_MC_HASH_REG1), 2045 REGISTER_AB(GM_CFG1), 2046 REGISTER_AB(GM_CFG2), 2047 /* GM_IPG and GM_HD are not used */ 2048 REGISTER_AB(GM_MAX_FLEN), 2049 /* GM_TEST is not used */ 2050 REGISTER_AB(GM_ADR1), 2051 REGISTER_AB(GM_ADR2), 2052 REGISTER_AB(GMF_CFG0), 2053 REGISTER_AB(GMF_CFG1), 2054 REGISTER_AB(GMF_CFG2), 2055 REGISTER_AB(GMF_CFG3), 2056 REGISTER_AB(GMF_CFG4), 2057 REGISTER_AB(GMF_CFG5), 2058 REGISTER_BB(TX_SRC_MAC_CTL), 2059 REGISTER_AB(XM_ADR_LO), 2060 REGISTER_AB(XM_ADR_HI), 2061 REGISTER_AB(XM_GLB_CFG), 2062 REGISTER_AB(XM_TX_CFG), 2063 REGISTER_AB(XM_RX_CFG), 2064 REGISTER_AB(XM_MGT_INT_MASK), 2065 REGISTER_AB(XM_FC), 2066 REGISTER_AB(XM_PAUSE_TIME), 2067 REGISTER_AB(XM_TX_PARAM), 2068 REGISTER_AB(XM_RX_PARAM), 2069 /* XM_MGT_INT_MSK (note no 'A') is RC */ 2070 REGISTER_AB(XX_PWR_RST), 2071 REGISTER_AB(XX_SD_CTL), 2072 REGISTER_AB(XX_TXDRV_CTL), 2073 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 2074 /* XX_CORE_STAT is partly RC */ 2075 }; 2076 2077 struct efx_nic_reg_table { 2078 u32 offset:24; 2079 u32 min_revision:2, max_revision:2; 2080 u32 step:6, rows:21; 2081 }; 2082 2083 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 2084 offset, \ 2085 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 2086 step, rows \ 2087 } 2088 #define REGISTER_TABLE(name, min_rev, max_rev) \ 2089 REGISTER_TABLE_DIMENSIONS( \ 2090 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 2091 min_rev, max_rev, \ 2092 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 2093 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 2094 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 2095 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 2096 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 2097 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 2098 #define REGISTER_TABLE_BB_CZ(name) \ 2099 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 2100 FR_BZ_ ## name ## _STEP, \ 2101 FR_BB_ ## name ## _ROWS), \ 2102 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 2103 FR_BZ_ ## name ## _STEP, \ 2104 FR_CZ_ ## name ## _ROWS) 2105 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 2106 2107 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 2108 /* DRIVER is not used */ 2109 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 2110 REGISTER_TABLE_BB(TX_IPFIL_TBL), 2111 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 2112 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 2113 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 2114 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 2115 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 2116 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 2117 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 2118 /* We can't reasonably read all of the buffer table (up to 8MB!). 2119 * However this driver will only use a few entries. Reading 2120 * 1K entries allows for some expansion of queue count and 2121 * size before we need to change the version. */ 2122 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 2123 A, A, 8, 1024), 2124 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 2125 B, Z, 8, 1024), 2126 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 2127 REGISTER_TABLE_BB_CZ(TIMER_TBL), 2128 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 2129 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 2130 /* TX_FILTER_TBL0 is huge and not used by this driver */ 2131 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 2132 REGISTER_TABLE_CZ(MC_TREG_SMEM), 2133 /* MSIX_PBA_TABLE is not mapped */ 2134 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 2135 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 2136 }; 2137 2138 size_t efx_nic_get_regs_len(struct efx_nic *efx) 2139 { 2140 const struct efx_nic_reg *reg; 2141 const struct efx_nic_reg_table *table; 2142 size_t len = 0; 2143 2144 for (reg = efx_nic_regs; 2145 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2146 reg++) 2147 if (efx->type->revision >= reg->min_revision && 2148 efx->type->revision <= reg->max_revision) 2149 len += sizeof(efx_oword_t); 2150 2151 for (table = efx_nic_reg_tables; 2152 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2153 table++) 2154 if (efx->type->revision >= table->min_revision && 2155 efx->type->revision <= table->max_revision) 2156 len += table->rows * min_t(size_t, table->step, 16); 2157 2158 return len; 2159 } 2160 2161 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2162 { 2163 const struct efx_nic_reg *reg; 2164 const struct efx_nic_reg_table *table; 2165 2166 for (reg = efx_nic_regs; 2167 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2168 reg++) { 2169 if (efx->type->revision >= reg->min_revision && 2170 efx->type->revision <= reg->max_revision) { 2171 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2172 buf += sizeof(efx_oword_t); 2173 } 2174 } 2175 2176 for (table = efx_nic_reg_tables; 2177 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2178 table++) { 2179 size_t size, i; 2180 2181 if (!(efx->type->revision >= table->min_revision && 2182 efx->type->revision <= table->max_revision)) 2183 continue; 2184 2185 size = min_t(size_t, table->step, 16); 2186 2187 for (i = 0; i < table->rows; i++) { 2188 switch (table->step) { 2189 case 4: /* 32-bit SRAM */ 2190 efx_readd(efx, buf, table->offset + 4 * i); 2191 break; 2192 case 8: /* 64-bit SRAM */ 2193 efx_sram_readq(efx, 2194 efx->membase + table->offset, 2195 buf, i); 2196 break; 2197 case 16: /* 128-bit-readable register */ 2198 efx_reado_table(efx, buf, table->offset, i); 2199 break; 2200 case 32: /* 128-bit register, interleaved */ 2201 efx_reado_table(efx, buf, table->offset, 2 * i); 2202 break; 2203 default: 2204 WARN_ON(1); 2205 return; 2206 } 2207 buf += size; 2208 } 2209 } 2210 } 2211