1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* Depth of RX flush request fifo */ 53 #define EFX_RX_FLUSH_COUNT 4 54 55 /* Driver generated events */ 56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60 61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 63 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 68 efx_rx_queue_index(_rx_queue)) 69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 71 efx_rx_queue_index(_rx_queue)) 72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 74 (_tx_queue)->queue) 75 76 static void efx_magic_event(struct efx_channel *channel, u32 magic); 77 78 /************************************************************************** 79 * 80 * Solarstorm hardware access 81 * 82 **************************************************************************/ 83 84 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 85 unsigned int index) 86 { 87 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 88 value, index); 89 } 90 91 /* Read the current event from the event queue */ 92 static inline efx_qword_t *efx_event(struct efx_channel *channel, 93 unsigned int index) 94 { 95 return ((efx_qword_t *) (channel->eventq.addr)) + 96 (index & channel->eventq_mask); 97 } 98 99 /* See if an event is present 100 * 101 * We check both the high and low dword of the event for all ones. We 102 * wrote all ones when we cleared the event, and no valid event can 103 * have all ones in either its high or low dwords. This approach is 104 * robust against reordering. 105 * 106 * Note that using a single 64-bit comparison is incorrect; even 107 * though the CPU read will be atomic, the DMA write may not be. 108 */ 109 static inline int efx_event_present(efx_qword_t *event) 110 { 111 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 112 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 113 } 114 115 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 116 const efx_oword_t *mask) 117 { 118 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 119 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 120 } 121 122 int efx_nic_test_registers(struct efx_nic *efx, 123 const struct efx_nic_register_test *regs, 124 size_t n_regs) 125 { 126 unsigned address = 0, i, j; 127 efx_oword_t mask, imask, original, reg, buf; 128 129 for (i = 0; i < n_regs; ++i) { 130 address = regs[i].address; 131 mask = imask = regs[i].mask; 132 EFX_INVERT_OWORD(imask); 133 134 efx_reado(efx, &original, address); 135 136 /* bit sweep on and off */ 137 for (j = 0; j < 128; j++) { 138 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 139 continue; 140 141 /* Test this testable bit can be set in isolation */ 142 EFX_AND_OWORD(reg, original, mask); 143 EFX_SET_OWORD32(reg, j, j, 1); 144 145 efx_writeo(efx, ®, address); 146 efx_reado(efx, &buf, address); 147 148 if (efx_masked_compare_oword(®, &buf, &mask)) 149 goto fail; 150 151 /* Test this testable bit can be cleared in isolation */ 152 EFX_OR_OWORD(reg, original, mask); 153 EFX_SET_OWORD32(reg, j, j, 0); 154 155 efx_writeo(efx, ®, address); 156 efx_reado(efx, &buf, address); 157 158 if (efx_masked_compare_oword(®, &buf, &mask)) 159 goto fail; 160 } 161 162 efx_writeo(efx, &original, address); 163 } 164 165 return 0; 166 167 fail: 168 netif_err(efx, hw, efx->net_dev, 169 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 170 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 171 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 172 return -EIO; 173 } 174 175 /************************************************************************** 176 * 177 * Special buffer handling 178 * Special buffers are used for event queues and the TX and RX 179 * descriptor rings. 180 * 181 *************************************************************************/ 182 183 /* 184 * Initialise a special buffer 185 * 186 * This will define a buffer (previously allocated via 187 * efx_alloc_special_buffer()) in the buffer table, allowing 188 * it to be used for event queues, descriptor rings etc. 189 */ 190 static void 191 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 192 { 193 efx_qword_t buf_desc; 194 unsigned int index; 195 dma_addr_t dma_addr; 196 int i; 197 198 EFX_BUG_ON_PARANOID(!buffer->addr); 199 200 /* Write buffer descriptors to NIC */ 201 for (i = 0; i < buffer->entries; i++) { 202 index = buffer->index + i; 203 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 204 netif_dbg(efx, probe, efx->net_dev, 205 "mapping special buffer %d at %llx\n", 206 index, (unsigned long long)dma_addr); 207 EFX_POPULATE_QWORD_3(buf_desc, 208 FRF_AZ_BUF_ADR_REGION, 0, 209 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 210 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 211 efx_write_buf_tbl(efx, &buf_desc, index); 212 } 213 } 214 215 /* Unmaps a buffer and clears the buffer table entries */ 216 static void 217 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 218 { 219 efx_oword_t buf_tbl_upd; 220 unsigned int start = buffer->index; 221 unsigned int end = (buffer->index + buffer->entries - 1); 222 223 if (!buffer->entries) 224 return; 225 226 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 227 buffer->index, buffer->index + buffer->entries - 1); 228 229 EFX_POPULATE_OWORD_4(buf_tbl_upd, 230 FRF_AZ_BUF_UPD_CMD, 0, 231 FRF_AZ_BUF_CLR_CMD, 1, 232 FRF_AZ_BUF_CLR_END_ID, end, 233 FRF_AZ_BUF_CLR_START_ID, start); 234 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 235 } 236 237 /* 238 * Allocate a new special buffer 239 * 240 * This allocates memory for a new buffer, clears it and allocates a 241 * new buffer ID range. It does not write into the buffer table. 242 * 243 * This call will allocate 4KB buffers, since 8KB buffers can't be 244 * used for event queues and descriptor rings. 245 */ 246 static int efx_alloc_special_buffer(struct efx_nic *efx, 247 struct efx_special_buffer *buffer, 248 unsigned int len) 249 { 250 len = ALIGN(len, EFX_BUF_SIZE); 251 252 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 253 &buffer->dma_addr, GFP_KERNEL); 254 if (!buffer->addr) 255 return -ENOMEM; 256 buffer->len = len; 257 buffer->entries = len / EFX_BUF_SIZE; 258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 259 260 /* Select new buffer ID */ 261 buffer->index = efx->next_buffer_table; 262 efx->next_buffer_table += buffer->entries; 263 #ifdef CONFIG_SFC_SRIOV 264 BUG_ON(efx_sriov_enabled(efx) && 265 efx->vf_buftbl_base < efx->next_buffer_table); 266 #endif 267 268 netif_dbg(efx, probe, efx->net_dev, 269 "allocating special buffers %d-%d at %llx+%x " 270 "(virt %p phys %llx)\n", buffer->index, 271 buffer->index + buffer->entries - 1, 272 (u64)buffer->dma_addr, len, 273 buffer->addr, (u64)virt_to_phys(buffer->addr)); 274 275 return 0; 276 } 277 278 static void 279 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 280 { 281 if (!buffer->addr) 282 return; 283 284 netif_dbg(efx, hw, efx->net_dev, 285 "deallocating special buffers %d-%d at %llx+%x " 286 "(virt %p phys %llx)\n", buffer->index, 287 buffer->index + buffer->entries - 1, 288 (u64)buffer->dma_addr, buffer->len, 289 buffer->addr, (u64)virt_to_phys(buffer->addr)); 290 291 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 292 buffer->dma_addr); 293 buffer->addr = NULL; 294 buffer->entries = 0; 295 } 296 297 /************************************************************************** 298 * 299 * Generic buffer handling 300 * These buffers are used for interrupt status, MAC stats, etc. 301 * 302 **************************************************************************/ 303 304 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 305 unsigned int len) 306 { 307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 308 &buffer->dma_addr, 309 GFP_ATOMIC | __GFP_ZERO); 310 if (!buffer->addr) 311 return -ENOMEM; 312 buffer->len = len; 313 return 0; 314 } 315 316 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 317 { 318 if (buffer->addr) { 319 dma_free_coherent(&efx->pci_dev->dev, buffer->len, 320 buffer->addr, buffer->dma_addr); 321 buffer->addr = NULL; 322 } 323 } 324 325 /************************************************************************** 326 * 327 * TX path 328 * 329 **************************************************************************/ 330 331 /* Returns a pointer to the specified transmit descriptor in the TX 332 * descriptor queue belonging to the specified channel. 333 */ 334 static inline efx_qword_t * 335 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 336 { 337 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 338 } 339 340 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 341 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 342 { 343 unsigned write_ptr; 344 efx_dword_t reg; 345 346 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 347 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 348 efx_writed_page(tx_queue->efx, ®, 349 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 350 } 351 352 /* Write pointer and first descriptor for TX descriptor ring */ 353 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 354 const efx_qword_t *txd) 355 { 356 unsigned write_ptr; 357 efx_oword_t reg; 358 359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 361 362 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 363 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 364 FRF_AZ_TX_DESC_WPTR, write_ptr); 365 reg.qword[0] = *txd; 366 efx_writeo_page(tx_queue->efx, ®, 367 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 368 } 369 370 static inline bool 371 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 372 { 373 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 374 375 if (empty_read_count == 0) 376 return false; 377 378 tx_queue->empty_read_count = 0; 379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 380 && tx_queue->write_count - write_count == 1; 381 } 382 383 /* For each entry inserted into the software descriptor ring, create a 384 * descriptor in the hardware TX descriptor ring (in host memory), and 385 * write a doorbell. 386 */ 387 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 388 { 389 390 struct efx_tx_buffer *buffer; 391 efx_qword_t *txd; 392 unsigned write_ptr; 393 unsigned old_write_count = tx_queue->write_count; 394 395 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 396 397 do { 398 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 399 buffer = &tx_queue->buffer[write_ptr]; 400 txd = efx_tx_desc(tx_queue, write_ptr); 401 ++tx_queue->write_count; 402 403 /* Create TX descriptor ring entry */ 404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 405 EFX_POPULATE_QWORD_4(*txd, 406 FSF_AZ_TX_KER_CONT, 407 buffer->flags & EFX_TX_BUF_CONT, 408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 409 FSF_AZ_TX_KER_BUF_REGION, 0, 410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 411 } while (tx_queue->write_count != tx_queue->insert_count); 412 413 wmb(); /* Ensure descriptors are written before they are fetched */ 414 415 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 416 txd = efx_tx_desc(tx_queue, 417 old_write_count & tx_queue->ptr_mask); 418 efx_push_tx_desc(tx_queue, txd); 419 ++tx_queue->pushes; 420 } else { 421 efx_notify_tx_desc(tx_queue); 422 } 423 } 424 425 /* Allocate hardware resources for a TX queue */ 426 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 427 { 428 struct efx_nic *efx = tx_queue->efx; 429 unsigned entries; 430 431 entries = tx_queue->ptr_mask + 1; 432 return efx_alloc_special_buffer(efx, &tx_queue->txd, 433 entries * sizeof(efx_qword_t)); 434 } 435 436 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 437 { 438 struct efx_nic *efx = tx_queue->efx; 439 efx_oword_t reg; 440 441 /* Pin TX descriptor ring */ 442 efx_init_special_buffer(efx, &tx_queue->txd); 443 444 /* Push TX descriptor ring to card */ 445 EFX_POPULATE_OWORD_10(reg, 446 FRF_AZ_TX_DESCQ_EN, 1, 447 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 448 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 449 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 450 FRF_AZ_TX_DESCQ_EVQ_ID, 451 tx_queue->channel->channel, 452 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 453 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 454 FRF_AZ_TX_DESCQ_SIZE, 455 __ffs(tx_queue->txd.entries), 456 FRF_AZ_TX_DESCQ_TYPE, 0, 457 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 458 459 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 460 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 463 !csum); 464 } 465 466 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 467 tx_queue->queue); 468 469 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 470 /* Only 128 bits in this register */ 471 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 472 473 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 474 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 475 __clear_bit_le(tx_queue->queue, ®); 476 else 477 __set_bit_le(tx_queue->queue, ®); 478 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 479 } 480 481 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 482 EFX_POPULATE_OWORD_1(reg, 483 FRF_BZ_TX_PACE, 484 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 485 FFE_BZ_TX_PACE_OFF : 486 FFE_BZ_TX_PACE_RESERVED); 487 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 488 tx_queue->queue); 489 } 490 } 491 492 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 493 { 494 struct efx_nic *efx = tx_queue->efx; 495 efx_oword_t tx_flush_descq; 496 497 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 498 atomic_set(&tx_queue->flush_outstanding, 1); 499 500 EFX_POPULATE_OWORD_2(tx_flush_descq, 501 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 502 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 503 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 504 } 505 506 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 507 { 508 struct efx_nic *efx = tx_queue->efx; 509 efx_oword_t tx_desc_ptr; 510 511 /* Remove TX descriptor ring from card */ 512 EFX_ZERO_OWORD(tx_desc_ptr); 513 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 514 tx_queue->queue); 515 516 /* Unpin TX descriptor ring */ 517 efx_fini_special_buffer(efx, &tx_queue->txd); 518 } 519 520 /* Free buffers backing TX queue */ 521 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 522 { 523 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 524 } 525 526 /************************************************************************** 527 * 528 * RX path 529 * 530 **************************************************************************/ 531 532 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 533 static inline efx_qword_t * 534 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 535 { 536 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 537 } 538 539 /* This creates an entry in the RX descriptor queue */ 540 static inline void 541 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 542 { 543 struct efx_rx_buffer *rx_buf; 544 efx_qword_t *rxd; 545 546 rxd = efx_rx_desc(rx_queue, index); 547 rx_buf = efx_rx_buffer(rx_queue, index); 548 EFX_POPULATE_QWORD_3(*rxd, 549 FSF_AZ_RX_KER_BUF_SIZE, 550 rx_buf->len - 551 rx_queue->efx->type->rx_buffer_padding, 552 FSF_AZ_RX_KER_BUF_REGION, 0, 553 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 554 } 555 556 /* This writes to the RX_DESC_WPTR register for the specified receive 557 * descriptor ring. 558 */ 559 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 560 { 561 struct efx_nic *efx = rx_queue->efx; 562 efx_dword_t reg; 563 unsigned write_ptr; 564 565 while (rx_queue->notified_count != rx_queue->added_count) { 566 efx_build_rx_desc( 567 rx_queue, 568 rx_queue->notified_count & rx_queue->ptr_mask); 569 ++rx_queue->notified_count; 570 } 571 572 wmb(); 573 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 574 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 575 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 576 efx_rx_queue_index(rx_queue)); 577 } 578 579 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 580 { 581 struct efx_nic *efx = rx_queue->efx; 582 unsigned entries; 583 584 entries = rx_queue->ptr_mask + 1; 585 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 586 entries * sizeof(efx_qword_t)); 587 } 588 589 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 590 { 591 efx_oword_t rx_desc_ptr; 592 struct efx_nic *efx = rx_queue->efx; 593 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 594 bool iscsi_digest_en = is_b0; 595 bool jumbo_en; 596 597 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables 598 * DMA to continue after a PCIe page boundary (and scattering 599 * is not possible). In Falcon B0 and Siena, it enables 600 * scatter. 601 */ 602 jumbo_en = !is_b0 || efx->rx_scatter; 603 604 netif_dbg(efx, hw, efx->net_dev, 605 "RX queue %d ring in special buffers %d-%d\n", 606 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 607 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 608 609 rx_queue->scatter_n = 0; 610 611 /* Pin RX descriptor ring */ 612 efx_init_special_buffer(efx, &rx_queue->rxd); 613 614 /* Push RX descriptor ring to card */ 615 EFX_POPULATE_OWORD_10(rx_desc_ptr, 616 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 617 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 618 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 619 FRF_AZ_RX_DESCQ_EVQ_ID, 620 efx_rx_queue_channel(rx_queue)->channel, 621 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 622 FRF_AZ_RX_DESCQ_LABEL, 623 efx_rx_queue_index(rx_queue), 624 FRF_AZ_RX_DESCQ_SIZE, 625 __ffs(rx_queue->rxd.entries), 626 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 627 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 628 FRF_AZ_RX_DESCQ_EN, 1); 629 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 630 efx_rx_queue_index(rx_queue)); 631 } 632 633 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 634 { 635 struct efx_nic *efx = rx_queue->efx; 636 efx_oword_t rx_flush_descq; 637 638 EFX_POPULATE_OWORD_2(rx_flush_descq, 639 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 640 FRF_AZ_RX_FLUSH_DESCQ, 641 efx_rx_queue_index(rx_queue)); 642 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 643 } 644 645 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 646 { 647 efx_oword_t rx_desc_ptr; 648 struct efx_nic *efx = rx_queue->efx; 649 650 /* Remove RX descriptor ring from card */ 651 EFX_ZERO_OWORD(rx_desc_ptr); 652 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 653 efx_rx_queue_index(rx_queue)); 654 655 /* Unpin RX descriptor ring */ 656 efx_fini_special_buffer(efx, &rx_queue->rxd); 657 } 658 659 /* Free buffers backing RX queue */ 660 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 661 { 662 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 663 } 664 665 /************************************************************************** 666 * 667 * Flush handling 668 * 669 **************************************************************************/ 670 671 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 672 * or more RX flushes can be kicked off. 673 */ 674 static bool efx_flush_wake(struct efx_nic *efx) 675 { 676 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 677 smp_mb(); 678 679 return (atomic_read(&efx->drain_pending) == 0 || 680 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 681 && atomic_read(&efx->rxq_flush_pending) > 0)); 682 } 683 684 static bool efx_check_tx_flush_complete(struct efx_nic *efx) 685 { 686 bool i = true; 687 efx_oword_t txd_ptr_tbl; 688 struct efx_channel *channel; 689 struct efx_tx_queue *tx_queue; 690 691 efx_for_each_channel(channel, efx) { 692 efx_for_each_channel_tx_queue(tx_queue, channel) { 693 efx_reado_table(efx, &txd_ptr_tbl, 694 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 695 if (EFX_OWORD_FIELD(txd_ptr_tbl, 696 FRF_AZ_TX_DESCQ_FLUSH) || 697 EFX_OWORD_FIELD(txd_ptr_tbl, 698 FRF_AZ_TX_DESCQ_EN)) { 699 netif_dbg(efx, hw, efx->net_dev, 700 "flush did not complete on TXQ %d\n", 701 tx_queue->queue); 702 i = false; 703 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 704 1, 0)) { 705 /* The flush is complete, but we didn't 706 * receive a flush completion event 707 */ 708 netif_dbg(efx, hw, efx->net_dev, 709 "flush complete on TXQ %d, so drain " 710 "the queue\n", tx_queue->queue); 711 /* Don't need to increment drain_pending as it 712 * has already been incremented for the queues 713 * which did not drain 714 */ 715 efx_magic_event(channel, 716 EFX_CHANNEL_MAGIC_TX_DRAIN( 717 tx_queue)); 718 } 719 } 720 } 721 722 return i; 723 } 724 725 /* Flush all the transmit queues, and continue flushing receive queues until 726 * they're all flushed. Wait for the DRAIN events to be recieved so that there 727 * are no more RX and TX events left on any channel. */ 728 int efx_nic_flush_queues(struct efx_nic *efx) 729 { 730 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 731 struct efx_channel *channel; 732 struct efx_rx_queue *rx_queue; 733 struct efx_tx_queue *tx_queue; 734 int rc = 0; 735 736 efx->type->prepare_flush(efx); 737 738 efx_for_each_channel(channel, efx) { 739 efx_for_each_channel_tx_queue(tx_queue, channel) { 740 atomic_inc(&efx->drain_pending); 741 efx_flush_tx_queue(tx_queue); 742 } 743 efx_for_each_channel_rx_queue(rx_queue, channel) { 744 atomic_inc(&efx->drain_pending); 745 rx_queue->flush_pending = true; 746 atomic_inc(&efx->rxq_flush_pending); 747 } 748 } 749 750 while (timeout && atomic_read(&efx->drain_pending) > 0) { 751 /* If SRIOV is enabled, then offload receive queue flushing to 752 * the firmware (though we will still have to poll for 753 * completion). If that fails, fall back to the old scheme. 754 */ 755 if (efx_sriov_enabled(efx)) { 756 rc = efx_mcdi_flush_rxqs(efx); 757 if (!rc) 758 goto wait; 759 } 760 761 /* The hardware supports four concurrent rx flushes, each of 762 * which may need to be retried if there is an outstanding 763 * descriptor fetch 764 */ 765 efx_for_each_channel(channel, efx) { 766 efx_for_each_channel_rx_queue(rx_queue, channel) { 767 if (atomic_read(&efx->rxq_flush_outstanding) >= 768 EFX_RX_FLUSH_COUNT) 769 break; 770 771 if (rx_queue->flush_pending) { 772 rx_queue->flush_pending = false; 773 atomic_dec(&efx->rxq_flush_pending); 774 atomic_inc(&efx->rxq_flush_outstanding); 775 efx_flush_rx_queue(rx_queue); 776 } 777 } 778 } 779 780 wait: 781 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 782 timeout); 783 } 784 785 if (atomic_read(&efx->drain_pending) && 786 !efx_check_tx_flush_complete(efx)) { 787 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 788 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 789 atomic_read(&efx->rxq_flush_outstanding), 790 atomic_read(&efx->rxq_flush_pending)); 791 rc = -ETIMEDOUT; 792 793 atomic_set(&efx->drain_pending, 0); 794 atomic_set(&efx->rxq_flush_pending, 0); 795 atomic_set(&efx->rxq_flush_outstanding, 0); 796 } 797 798 efx->type->finish_flush(efx); 799 800 return rc; 801 } 802 803 /************************************************************************** 804 * 805 * Event queue processing 806 * Event queues are processed by per-channel tasklets. 807 * 808 **************************************************************************/ 809 810 /* Update a channel's event queue's read pointer (RPTR) register 811 * 812 * This writes the EVQ_RPTR_REG register for the specified channel's 813 * event queue. 814 */ 815 void efx_nic_eventq_read_ack(struct efx_channel *channel) 816 { 817 efx_dword_t reg; 818 struct efx_nic *efx = channel->efx; 819 820 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 821 channel->eventq_read_ptr & channel->eventq_mask); 822 823 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 824 * of 4 bytes, but it is really 16 bytes just like later revisions. 825 */ 826 efx_writed(efx, ®, 827 efx->type->evq_rptr_tbl_base + 828 FR_BZ_EVQ_RPTR_STEP * channel->channel); 829 } 830 831 /* Use HW to insert a SW defined event */ 832 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 833 efx_qword_t *event) 834 { 835 efx_oword_t drv_ev_reg; 836 837 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 838 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 839 drv_ev_reg.u32[0] = event->u32[0]; 840 drv_ev_reg.u32[1] = event->u32[1]; 841 drv_ev_reg.u32[2] = 0; 842 drv_ev_reg.u32[3] = 0; 843 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 844 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 845 } 846 847 static void efx_magic_event(struct efx_channel *channel, u32 magic) 848 { 849 efx_qword_t event; 850 851 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 852 FSE_AZ_EV_CODE_DRV_GEN_EV, 853 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 854 efx_generate_event(channel->efx, channel->channel, &event); 855 } 856 857 /* Handle a transmit completion event 858 * 859 * The NIC batches TX completion events; the message we receive is of 860 * the form "complete all TX events up to this index". 861 */ 862 static int 863 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 864 { 865 unsigned int tx_ev_desc_ptr; 866 unsigned int tx_ev_q_label; 867 struct efx_tx_queue *tx_queue; 868 struct efx_nic *efx = channel->efx; 869 int tx_packets = 0; 870 871 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 872 return 0; 873 874 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 875 /* Transmit completion */ 876 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 877 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 878 tx_queue = efx_channel_get_tx_queue( 879 channel, tx_ev_q_label % EFX_TXQ_TYPES); 880 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 881 tx_queue->ptr_mask); 882 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 883 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 884 /* Rewrite the FIFO write pointer */ 885 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 886 tx_queue = efx_channel_get_tx_queue( 887 channel, tx_ev_q_label % EFX_TXQ_TYPES); 888 889 netif_tx_lock(efx->net_dev); 890 efx_notify_tx_desc(tx_queue); 891 netif_tx_unlock(efx->net_dev); 892 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 893 EFX_WORKAROUND_10727(efx)) { 894 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 895 } else { 896 netif_err(efx, tx_err, efx->net_dev, 897 "channel %d unexpected TX event " 898 EFX_QWORD_FMT"\n", channel->channel, 899 EFX_QWORD_VAL(*event)); 900 } 901 902 return tx_packets; 903 } 904 905 /* Detect errors included in the rx_evt_pkt_ok bit. */ 906 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 907 const efx_qword_t *event) 908 { 909 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 910 struct efx_nic *efx = rx_queue->efx; 911 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 912 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 913 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 914 bool rx_ev_other_err, rx_ev_pause_frm; 915 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 916 unsigned rx_ev_pkt_type; 917 918 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 919 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 920 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 921 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 922 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 923 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 924 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 925 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 926 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 927 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 928 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 929 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 930 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 931 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 932 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 933 934 /* Every error apart from tobe_disc and pause_frm */ 935 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 936 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 937 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 938 939 /* Count errors that are not in MAC stats. Ignore expected 940 * checksum errors during self-test. */ 941 if (rx_ev_frm_trunc) 942 ++channel->n_rx_frm_trunc; 943 else if (rx_ev_tobe_disc) 944 ++channel->n_rx_tobe_disc; 945 else if (!efx->loopback_selftest) { 946 if (rx_ev_ip_hdr_chksum_err) 947 ++channel->n_rx_ip_hdr_chksum_err; 948 else if (rx_ev_tcp_udp_chksum_err) 949 ++channel->n_rx_tcp_udp_chksum_err; 950 } 951 952 /* TOBE_DISC is expected on unicast mismatches; don't print out an 953 * error message. FRM_TRUNC indicates RXDP dropped the packet due 954 * to a FIFO overflow. 955 */ 956 #ifdef DEBUG 957 if (rx_ev_other_err && net_ratelimit()) { 958 netif_dbg(efx, rx_err, efx->net_dev, 959 " RX queue %d unexpected RX event " 960 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 961 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 962 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 963 rx_ev_ip_hdr_chksum_err ? 964 " [IP_HDR_CHKSUM_ERR]" : "", 965 rx_ev_tcp_udp_chksum_err ? 966 " [TCP_UDP_CHKSUM_ERR]" : "", 967 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 968 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 969 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 970 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 971 rx_ev_pause_frm ? " [PAUSE]" : ""); 972 } 973 #endif 974 975 /* The frame must be discarded if any of these are true. */ 976 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 977 rx_ev_tobe_disc | rx_ev_pause_frm) ? 978 EFX_RX_PKT_DISCARD : 0; 979 } 980 981 /* Handle receive events that are not in-order. Return true if this 982 * can be handled as a partial packet discard, false if it's more 983 * serious. 984 */ 985 static bool 986 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 987 { 988 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 989 struct efx_nic *efx = rx_queue->efx; 990 unsigned expected, dropped; 991 992 if (rx_queue->scatter_n && 993 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 994 rx_queue->ptr_mask)) { 995 ++channel->n_rx_nodesc_trunc; 996 return true; 997 } 998 999 expected = rx_queue->removed_count & rx_queue->ptr_mask; 1000 dropped = (index - expected) & rx_queue->ptr_mask; 1001 netif_info(efx, rx_err, efx->net_dev, 1002 "dropped %d events (index=%d expected=%d)\n", 1003 dropped, index, expected); 1004 1005 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1006 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1007 return false; 1008 } 1009 1010 /* Handle a packet received event 1011 * 1012 * The NIC gives a "discard" flag if it's a unicast packet with the 1013 * wrong destination address 1014 * Also "is multicast" and "matches multicast filter" flags can be used to 1015 * discard non-matching multicast packets. 1016 */ 1017 static void 1018 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 1019 { 1020 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1021 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1022 unsigned expected_ptr; 1023 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 1024 u16 flags; 1025 struct efx_rx_queue *rx_queue; 1026 struct efx_nic *efx = channel->efx; 1027 1028 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1029 return; 1030 1031 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 1032 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 1033 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1034 channel->channel); 1035 1036 rx_queue = efx_channel_get_rx_queue(channel); 1037 1038 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1039 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 1040 rx_queue->ptr_mask); 1041 1042 /* Check for partial drops and other errors */ 1043 if (unlikely(rx_ev_desc_ptr != expected_ptr) || 1044 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 1045 if (rx_ev_desc_ptr != expected_ptr && 1046 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 1047 return; 1048 1049 /* Discard all pending fragments */ 1050 if (rx_queue->scatter_n) { 1051 efx_rx_packet( 1052 rx_queue, 1053 rx_queue->removed_count & rx_queue->ptr_mask, 1054 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 1055 rx_queue->removed_count += rx_queue->scatter_n; 1056 rx_queue->scatter_n = 0; 1057 } 1058 1059 /* Return if there is no new fragment */ 1060 if (rx_ev_desc_ptr != expected_ptr) 1061 return; 1062 1063 /* Discard new fragment if not SOP */ 1064 if (!rx_ev_sop) { 1065 efx_rx_packet( 1066 rx_queue, 1067 rx_queue->removed_count & rx_queue->ptr_mask, 1068 1, 0, EFX_RX_PKT_DISCARD); 1069 ++rx_queue->removed_count; 1070 return; 1071 } 1072 } 1073 1074 ++rx_queue->scatter_n; 1075 if (rx_ev_cont) 1076 return; 1077 1078 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1079 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1080 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1081 1082 if (likely(rx_ev_pkt_ok)) { 1083 /* If packet is marked as OK and packet type is TCP/IP or 1084 * UDP/IP, then we can rely on the hardware checksum. 1085 */ 1086 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 1087 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 1088 EFX_RX_PKT_CSUMMED : 0; 1089 } else { 1090 flags = efx_handle_rx_not_ok(rx_queue, event); 1091 } 1092 1093 /* Detect multicast packets that didn't match the filter */ 1094 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1095 if (rx_ev_mcast_pkt) { 1096 unsigned int rx_ev_mcast_hash_match = 1097 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1098 1099 if (unlikely(!rx_ev_mcast_hash_match)) { 1100 ++channel->n_rx_mcast_mismatch; 1101 flags |= EFX_RX_PKT_DISCARD; 1102 } 1103 } 1104 1105 channel->irq_mod_score += 2; 1106 1107 /* Handle received packet */ 1108 efx_rx_packet(rx_queue, 1109 rx_queue->removed_count & rx_queue->ptr_mask, 1110 rx_queue->scatter_n, rx_ev_byte_cnt, flags); 1111 rx_queue->removed_count += rx_queue->scatter_n; 1112 rx_queue->scatter_n = 0; 1113 } 1114 1115 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1116 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1117 * of all transmit completions. 1118 */ 1119 static void 1120 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1121 { 1122 struct efx_tx_queue *tx_queue; 1123 int qid; 1124 1125 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1126 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1127 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1128 qid % EFX_TXQ_TYPES); 1129 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1130 efx_magic_event(tx_queue->channel, 1131 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1132 } 1133 } 1134 } 1135 1136 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1137 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1138 * the RX queue back to the mask of RX queues in need of flushing. 1139 */ 1140 static void 1141 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1142 { 1143 struct efx_channel *channel; 1144 struct efx_rx_queue *rx_queue; 1145 int qid; 1146 bool failed; 1147 1148 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1149 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1150 if (qid >= efx->n_channels) 1151 return; 1152 channel = efx_get_channel(efx, qid); 1153 if (!efx_channel_has_rx_queue(channel)) 1154 return; 1155 rx_queue = efx_channel_get_rx_queue(channel); 1156 1157 if (failed) { 1158 netif_info(efx, hw, efx->net_dev, 1159 "RXQ %d flush retry\n", qid); 1160 rx_queue->flush_pending = true; 1161 atomic_inc(&efx->rxq_flush_pending); 1162 } else { 1163 efx_magic_event(efx_rx_queue_channel(rx_queue), 1164 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1165 } 1166 atomic_dec(&efx->rxq_flush_outstanding); 1167 if (efx_flush_wake(efx)) 1168 wake_up(&efx->flush_wq); 1169 } 1170 1171 static void 1172 efx_handle_drain_event(struct efx_channel *channel) 1173 { 1174 struct efx_nic *efx = channel->efx; 1175 1176 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1177 atomic_dec(&efx->drain_pending); 1178 if (efx_flush_wake(efx)) 1179 wake_up(&efx->flush_wq); 1180 } 1181 1182 static void 1183 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1184 { 1185 struct efx_nic *efx = channel->efx; 1186 struct efx_rx_queue *rx_queue = 1187 efx_channel_has_rx_queue(channel) ? 1188 efx_channel_get_rx_queue(channel) : NULL; 1189 unsigned magic, code; 1190 1191 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1192 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1193 1194 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1195 channel->event_test_cpu = raw_smp_processor_id(); 1196 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1197 /* The queue must be empty, so we won't receive any rx 1198 * events, so efx_process_channel() won't refill the 1199 * queue. Refill it here */ 1200 efx_fast_push_rx_descriptors(rx_queue); 1201 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1202 rx_queue->enabled = false; 1203 efx_handle_drain_event(channel); 1204 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1205 efx_handle_drain_event(channel); 1206 } else { 1207 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1208 "generated event "EFX_QWORD_FMT"\n", 1209 channel->channel, EFX_QWORD_VAL(*event)); 1210 } 1211 } 1212 1213 static void 1214 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1215 { 1216 struct efx_nic *efx = channel->efx; 1217 unsigned int ev_sub_code; 1218 unsigned int ev_sub_data; 1219 1220 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1221 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1222 1223 switch (ev_sub_code) { 1224 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1225 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1226 channel->channel, ev_sub_data); 1227 efx_handle_tx_flush_done(efx, event); 1228 efx_sriov_tx_flush_done(efx, event); 1229 break; 1230 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1231 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1232 channel->channel, ev_sub_data); 1233 efx_handle_rx_flush_done(efx, event); 1234 efx_sriov_rx_flush_done(efx, event); 1235 break; 1236 case FSE_AZ_EVQ_INIT_DONE_EV: 1237 netif_dbg(efx, hw, efx->net_dev, 1238 "channel %d EVQ %d initialised\n", 1239 channel->channel, ev_sub_data); 1240 break; 1241 case FSE_AZ_SRM_UPD_DONE_EV: 1242 netif_vdbg(efx, hw, efx->net_dev, 1243 "channel %d SRAM update done\n", channel->channel); 1244 break; 1245 case FSE_AZ_WAKE_UP_EV: 1246 netif_vdbg(efx, hw, efx->net_dev, 1247 "channel %d RXQ %d wakeup event\n", 1248 channel->channel, ev_sub_data); 1249 break; 1250 case FSE_AZ_TIMER_EV: 1251 netif_vdbg(efx, hw, efx->net_dev, 1252 "channel %d RX queue %d timer expired\n", 1253 channel->channel, ev_sub_data); 1254 break; 1255 case FSE_AA_RX_RECOVER_EV: 1256 netif_err(efx, rx_err, efx->net_dev, 1257 "channel %d seen DRIVER RX_RESET event. " 1258 "Resetting.\n", channel->channel); 1259 atomic_inc(&efx->rx_reset); 1260 efx_schedule_reset(efx, 1261 EFX_WORKAROUND_6555(efx) ? 1262 RESET_TYPE_RX_RECOVERY : 1263 RESET_TYPE_DISABLE); 1264 break; 1265 case FSE_BZ_RX_DSC_ERROR_EV: 1266 if (ev_sub_data < EFX_VI_BASE) { 1267 netif_err(efx, rx_err, efx->net_dev, 1268 "RX DMA Q %d reports descriptor fetch error." 1269 " RX Q %d is disabled.\n", ev_sub_data, 1270 ev_sub_data); 1271 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1272 } else 1273 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1274 break; 1275 case FSE_BZ_TX_DSC_ERROR_EV: 1276 if (ev_sub_data < EFX_VI_BASE) { 1277 netif_err(efx, tx_err, efx->net_dev, 1278 "TX DMA Q %d reports descriptor fetch error." 1279 " TX Q %d is disabled.\n", ev_sub_data, 1280 ev_sub_data); 1281 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1282 } else 1283 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1284 break; 1285 default: 1286 netif_vdbg(efx, hw, efx->net_dev, 1287 "channel %d unknown driver event code %d " 1288 "data %04x\n", channel->channel, ev_sub_code, 1289 ev_sub_data); 1290 break; 1291 } 1292 } 1293 1294 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1295 { 1296 struct efx_nic *efx = channel->efx; 1297 unsigned int read_ptr; 1298 efx_qword_t event, *p_event; 1299 int ev_code; 1300 int tx_packets = 0; 1301 int spent = 0; 1302 1303 read_ptr = channel->eventq_read_ptr; 1304 1305 for (;;) { 1306 p_event = efx_event(channel, read_ptr); 1307 event = *p_event; 1308 1309 if (!efx_event_present(&event)) 1310 /* End of events */ 1311 break; 1312 1313 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1314 "channel %d event is "EFX_QWORD_FMT"\n", 1315 channel->channel, EFX_QWORD_VAL(event)); 1316 1317 /* Clear this event by marking it all ones */ 1318 EFX_SET_QWORD(*p_event); 1319 1320 ++read_ptr; 1321 1322 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1323 1324 switch (ev_code) { 1325 case FSE_AZ_EV_CODE_RX_EV: 1326 efx_handle_rx_event(channel, &event); 1327 if (++spent == budget) 1328 goto out; 1329 break; 1330 case FSE_AZ_EV_CODE_TX_EV: 1331 tx_packets += efx_handle_tx_event(channel, &event); 1332 if (tx_packets > efx->txq_entries) { 1333 spent = budget; 1334 goto out; 1335 } 1336 break; 1337 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1338 efx_handle_generated_event(channel, &event); 1339 break; 1340 case FSE_AZ_EV_CODE_DRIVER_EV: 1341 efx_handle_driver_event(channel, &event); 1342 break; 1343 case FSE_CZ_EV_CODE_USER_EV: 1344 efx_sriov_event(channel, &event); 1345 break; 1346 case FSE_CZ_EV_CODE_MCDI_EV: 1347 efx_mcdi_process_event(channel, &event); 1348 break; 1349 case FSE_AZ_EV_CODE_GLOBAL_EV: 1350 if (efx->type->handle_global_event && 1351 efx->type->handle_global_event(channel, &event)) 1352 break; 1353 /* else fall through */ 1354 default: 1355 netif_err(channel->efx, hw, channel->efx->net_dev, 1356 "channel %d unknown event type %d (data " 1357 EFX_QWORD_FMT ")\n", channel->channel, 1358 ev_code, EFX_QWORD_VAL(event)); 1359 } 1360 } 1361 1362 out: 1363 channel->eventq_read_ptr = read_ptr; 1364 return spent; 1365 } 1366 1367 /* Check whether an event is present in the eventq at the current 1368 * read pointer. Only useful for self-test. 1369 */ 1370 bool efx_nic_event_present(struct efx_channel *channel) 1371 { 1372 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1373 } 1374 1375 /* Allocate buffer table entries for event queue */ 1376 int efx_nic_probe_eventq(struct efx_channel *channel) 1377 { 1378 struct efx_nic *efx = channel->efx; 1379 unsigned entries; 1380 1381 entries = channel->eventq_mask + 1; 1382 return efx_alloc_special_buffer(efx, &channel->eventq, 1383 entries * sizeof(efx_qword_t)); 1384 } 1385 1386 void efx_nic_init_eventq(struct efx_channel *channel) 1387 { 1388 efx_oword_t reg; 1389 struct efx_nic *efx = channel->efx; 1390 1391 netif_dbg(efx, hw, efx->net_dev, 1392 "channel %d event queue in special buffers %d-%d\n", 1393 channel->channel, channel->eventq.index, 1394 channel->eventq.index + channel->eventq.entries - 1); 1395 1396 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1397 EFX_POPULATE_OWORD_3(reg, 1398 FRF_CZ_TIMER_Q_EN, 1, 1399 FRF_CZ_HOST_NOTIFY_MODE, 0, 1400 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1401 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1402 } 1403 1404 /* Pin event queue buffer */ 1405 efx_init_special_buffer(efx, &channel->eventq); 1406 1407 /* Fill event queue with all ones (i.e. empty events) */ 1408 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1409 1410 /* Push event queue to card */ 1411 EFX_POPULATE_OWORD_3(reg, 1412 FRF_AZ_EVQ_EN, 1, 1413 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1414 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1415 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1416 channel->channel); 1417 1418 efx->type->push_irq_moderation(channel); 1419 } 1420 1421 void efx_nic_fini_eventq(struct efx_channel *channel) 1422 { 1423 efx_oword_t reg; 1424 struct efx_nic *efx = channel->efx; 1425 1426 /* Remove event queue from card */ 1427 EFX_ZERO_OWORD(reg); 1428 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1429 channel->channel); 1430 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1431 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1432 1433 /* Unpin event queue */ 1434 efx_fini_special_buffer(efx, &channel->eventq); 1435 } 1436 1437 /* Free buffers backing event queue */ 1438 void efx_nic_remove_eventq(struct efx_channel *channel) 1439 { 1440 efx_free_special_buffer(channel->efx, &channel->eventq); 1441 } 1442 1443 1444 void efx_nic_event_test_start(struct efx_channel *channel) 1445 { 1446 channel->event_test_cpu = -1; 1447 smp_wmb(); 1448 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1449 } 1450 1451 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1452 { 1453 efx_magic_event(efx_rx_queue_channel(rx_queue), 1454 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1455 } 1456 1457 /************************************************************************** 1458 * 1459 * Hardware interrupts 1460 * The hardware interrupt handler does very little work; all the event 1461 * queue processing is carried out by per-channel tasklets. 1462 * 1463 **************************************************************************/ 1464 1465 /* Enable/disable/generate interrupts */ 1466 static inline void efx_nic_interrupts(struct efx_nic *efx, 1467 bool enabled, bool force) 1468 { 1469 efx_oword_t int_en_reg_ker; 1470 1471 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1472 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1473 FRF_AZ_KER_INT_KER, force, 1474 FRF_AZ_DRV_INT_EN_KER, enabled); 1475 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1476 } 1477 1478 void efx_nic_enable_interrupts(struct efx_nic *efx) 1479 { 1480 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1481 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1482 1483 efx_nic_interrupts(efx, true, false); 1484 } 1485 1486 void efx_nic_disable_interrupts(struct efx_nic *efx) 1487 { 1488 /* Disable interrupts */ 1489 efx_nic_interrupts(efx, false, false); 1490 } 1491 1492 /* Generate a test interrupt 1493 * Interrupt must already have been enabled, otherwise nasty things 1494 * may happen. 1495 */ 1496 void efx_nic_irq_test_start(struct efx_nic *efx) 1497 { 1498 efx->last_irq_cpu = -1; 1499 smp_wmb(); 1500 efx_nic_interrupts(efx, true, true); 1501 } 1502 1503 /* Process a fatal interrupt 1504 * Disable bus mastering ASAP and schedule a reset 1505 */ 1506 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1507 { 1508 struct falcon_nic_data *nic_data = efx->nic_data; 1509 efx_oword_t *int_ker = efx->irq_status.addr; 1510 efx_oword_t fatal_intr; 1511 int error, mem_perr; 1512 1513 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1514 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1515 1516 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1517 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1518 EFX_OWORD_VAL(fatal_intr), 1519 error ? "disabling bus mastering" : "no recognised error"); 1520 1521 /* If this is a memory parity error dump which blocks are offending */ 1522 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1523 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1524 if (mem_perr) { 1525 efx_oword_t reg; 1526 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1527 netif_err(efx, hw, efx->net_dev, 1528 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1529 EFX_OWORD_VAL(reg)); 1530 } 1531 1532 /* Disable both devices */ 1533 pci_clear_master(efx->pci_dev); 1534 if (efx_nic_is_dual_func(efx)) 1535 pci_clear_master(nic_data->pci_dev2); 1536 efx_nic_disable_interrupts(efx); 1537 1538 /* Count errors and reset or disable the NIC accordingly */ 1539 if (efx->int_error_count == 0 || 1540 time_after(jiffies, efx->int_error_expire)) { 1541 efx->int_error_count = 0; 1542 efx->int_error_expire = 1543 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1544 } 1545 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1546 netif_err(efx, hw, efx->net_dev, 1547 "SYSTEM ERROR - reset scheduled\n"); 1548 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1549 } else { 1550 netif_err(efx, hw, efx->net_dev, 1551 "SYSTEM ERROR - max number of errors seen." 1552 "NIC will be disabled\n"); 1553 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1554 } 1555 1556 return IRQ_HANDLED; 1557 } 1558 1559 /* Handle a legacy interrupt 1560 * Acknowledges the interrupt and schedule event queue processing. 1561 */ 1562 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1563 { 1564 struct efx_nic *efx = dev_id; 1565 efx_oword_t *int_ker = efx->irq_status.addr; 1566 irqreturn_t result = IRQ_NONE; 1567 struct efx_channel *channel; 1568 efx_dword_t reg; 1569 u32 queues; 1570 int syserr; 1571 1572 /* Could this be ours? If interrupts are disabled then the 1573 * channel state may not be valid. 1574 */ 1575 if (!efx->legacy_irq_enabled) 1576 return result; 1577 1578 /* Read the ISR which also ACKs the interrupts */ 1579 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1580 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1581 1582 /* Legacy interrupts are disabled too late by the EEH kernel 1583 * code. Disable them earlier. 1584 * If an EEH error occurred, the read will have returned all ones. 1585 */ 1586 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && 1587 !efx->eeh_disabled_legacy_irq) { 1588 disable_irq_nosync(efx->legacy_irq); 1589 efx->eeh_disabled_legacy_irq = true; 1590 } 1591 1592 /* Handle non-event-queue sources */ 1593 if (queues & (1U << efx->irq_level)) { 1594 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1595 if (unlikely(syserr)) 1596 return efx_nic_fatal_interrupt(efx); 1597 efx->last_irq_cpu = raw_smp_processor_id(); 1598 } 1599 1600 if (queues != 0) { 1601 if (EFX_WORKAROUND_15783(efx)) 1602 efx->irq_zero_count = 0; 1603 1604 /* Schedule processing of any interrupting queues */ 1605 efx_for_each_channel(channel, efx) { 1606 if (queues & 1) 1607 efx_schedule_channel_irq(channel); 1608 queues >>= 1; 1609 } 1610 result = IRQ_HANDLED; 1611 1612 } else if (EFX_WORKAROUND_15783(efx)) { 1613 efx_qword_t *event; 1614 1615 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1616 * because this might be a shared interrupt. */ 1617 if (efx->irq_zero_count++ == 0) 1618 result = IRQ_HANDLED; 1619 1620 /* Ensure we schedule or rearm all event queues */ 1621 efx_for_each_channel(channel, efx) { 1622 event = efx_event(channel, channel->eventq_read_ptr); 1623 if (efx_event_present(event)) 1624 efx_schedule_channel_irq(channel); 1625 else 1626 efx_nic_eventq_read_ack(channel); 1627 } 1628 } 1629 1630 if (result == IRQ_HANDLED) 1631 netif_vdbg(efx, intr, efx->net_dev, 1632 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1633 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1634 1635 return result; 1636 } 1637 1638 /* Handle an MSI interrupt 1639 * 1640 * Handle an MSI hardware interrupt. This routine schedules event 1641 * queue processing. No interrupt acknowledgement cycle is necessary. 1642 * Also, we never need to check that the interrupt is for us, since 1643 * MSI interrupts cannot be shared. 1644 */ 1645 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1646 { 1647 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1648 struct efx_nic *efx = channel->efx; 1649 efx_oword_t *int_ker = efx->irq_status.addr; 1650 int syserr; 1651 1652 netif_vdbg(efx, intr, efx->net_dev, 1653 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1654 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1655 1656 /* Handle non-event-queue sources */ 1657 if (channel->channel == efx->irq_level) { 1658 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1659 if (unlikely(syserr)) 1660 return efx_nic_fatal_interrupt(efx); 1661 efx->last_irq_cpu = raw_smp_processor_id(); 1662 } 1663 1664 /* Schedule processing of the channel */ 1665 efx_schedule_channel_irq(channel); 1666 1667 return IRQ_HANDLED; 1668 } 1669 1670 1671 /* Setup RSS indirection table. 1672 * This maps from the hash value of the packet to RXQ 1673 */ 1674 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1675 { 1676 size_t i = 0; 1677 efx_dword_t dword; 1678 1679 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1680 return; 1681 1682 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1683 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1684 1685 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1686 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1687 efx->rx_indir_table[i]); 1688 efx_writed(efx, &dword, 1689 FR_BZ_RX_INDIRECTION_TBL + 1690 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1691 } 1692 } 1693 1694 /* Hook interrupt handler(s) 1695 * Try MSI and then legacy interrupts. 1696 */ 1697 int efx_nic_init_interrupt(struct efx_nic *efx) 1698 { 1699 struct efx_channel *channel; 1700 int rc; 1701 1702 if (!EFX_INT_MODE_USE_MSI(efx)) { 1703 irq_handler_t handler; 1704 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1705 handler = efx_legacy_interrupt; 1706 else 1707 handler = falcon_legacy_interrupt_a1; 1708 1709 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1710 efx->name, efx); 1711 if (rc) { 1712 netif_err(efx, drv, efx->net_dev, 1713 "failed to hook legacy IRQ %d\n", 1714 efx->pci_dev->irq); 1715 goto fail1; 1716 } 1717 return 0; 1718 } 1719 1720 /* Hook MSI or MSI-X interrupt */ 1721 efx_for_each_channel(channel, efx) { 1722 rc = request_irq(channel->irq, efx_msi_interrupt, 1723 IRQF_PROBE_SHARED, /* Not shared */ 1724 efx->channel_name[channel->channel], 1725 &efx->channel[channel->channel]); 1726 if (rc) { 1727 netif_err(efx, drv, efx->net_dev, 1728 "failed to hook IRQ %d\n", channel->irq); 1729 goto fail2; 1730 } 1731 } 1732 1733 return 0; 1734 1735 fail2: 1736 efx_for_each_channel(channel, efx) 1737 free_irq(channel->irq, &efx->channel[channel->channel]); 1738 fail1: 1739 return rc; 1740 } 1741 1742 void efx_nic_fini_interrupt(struct efx_nic *efx) 1743 { 1744 struct efx_channel *channel; 1745 efx_oword_t reg; 1746 1747 /* Disable MSI/MSI-X interrupts */ 1748 efx_for_each_channel(channel, efx) { 1749 if (channel->irq) 1750 free_irq(channel->irq, &efx->channel[channel->channel]); 1751 } 1752 1753 /* ACK legacy interrupt */ 1754 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1755 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1756 else 1757 falcon_irq_ack_a1(efx); 1758 1759 /* Disable legacy interrupt */ 1760 if (efx->legacy_irq) 1761 free_irq(efx->legacy_irq, efx); 1762 } 1763 1764 /* Looks at available SRAM resources and works out how many queues we 1765 * can support, and where things like descriptor caches should live. 1766 * 1767 * SRAM is split up as follows: 1768 * 0 buftbl entries for channels 1769 * efx->vf_buftbl_base buftbl entries for SR-IOV 1770 * efx->rx_dc_base RX descriptor caches 1771 * efx->tx_dc_base TX descriptor caches 1772 */ 1773 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1774 { 1775 unsigned vi_count, buftbl_min; 1776 1777 /* Account for the buffer table entries backing the datapath channels 1778 * and the descriptor caches for those channels. 1779 */ 1780 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1781 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1782 efx->n_channels * EFX_MAX_EVQ_SIZE) 1783 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1784 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1785 1786 #ifdef CONFIG_SFC_SRIOV 1787 if (efx_sriov_wanted(efx)) { 1788 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1789 1790 efx->vf_buftbl_base = buftbl_min; 1791 1792 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1793 vi_count = max(vi_count, EFX_VI_BASE); 1794 buftbl_free = (sram_lim_qw - buftbl_min - 1795 vi_count * vi_dc_entries); 1796 1797 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1798 efx_vf_size(efx)); 1799 vf_limit = min(buftbl_free / entries_per_vf, 1800 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1801 1802 if (efx->vf_count > vf_limit) { 1803 netif_err(efx, probe, efx->net_dev, 1804 "Reducing VF count from from %d to %d\n", 1805 efx->vf_count, vf_limit); 1806 efx->vf_count = vf_limit; 1807 } 1808 vi_count += efx->vf_count * efx_vf_size(efx); 1809 } 1810 #endif 1811 1812 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1813 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1814 } 1815 1816 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1817 { 1818 efx_oword_t altera_build; 1819 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1820 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1821 } 1822 1823 void efx_nic_init_common(struct efx_nic *efx) 1824 { 1825 efx_oword_t temp; 1826 1827 /* Set positions of descriptor caches in SRAM. */ 1828 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1829 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1830 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1831 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1832 1833 /* Set TX descriptor cache size. */ 1834 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1835 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1836 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1837 1838 /* Set RX descriptor cache size. Set low watermark to size-8, as 1839 * this allows most efficient prefetching. 1840 */ 1841 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1842 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1843 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1844 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1845 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1846 1847 /* Program INT_KER address */ 1848 EFX_POPULATE_OWORD_2(temp, 1849 FRF_AZ_NORM_INT_VEC_DIS_KER, 1850 EFX_INT_MODE_USE_MSI(efx), 1851 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1852 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1853 1854 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1855 /* Use an interrupt level unused by event queues */ 1856 efx->irq_level = 0x1f; 1857 else 1858 /* Use a valid MSI-X vector */ 1859 efx->irq_level = 0; 1860 1861 /* Enable all the genuinely fatal interrupts. (They are still 1862 * masked by the overall interrupt mask, controlled by 1863 * falcon_interrupts()). 1864 * 1865 * Note: All other fatal interrupts are enabled 1866 */ 1867 EFX_POPULATE_OWORD_3(temp, 1868 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1869 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1870 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1871 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1872 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1873 EFX_INVERT_OWORD(temp); 1874 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1875 1876 efx_nic_push_rx_indir_table(efx); 1877 1878 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1879 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1880 */ 1881 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1882 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1883 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1884 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1885 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1886 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1887 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1888 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1889 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1890 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1891 /* Disable hardware watchdog which can misfire */ 1892 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1893 /* Squash TX of packets of 16 bytes or less */ 1894 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1895 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1896 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1897 1898 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1899 EFX_POPULATE_OWORD_4(temp, 1900 /* Default values */ 1901 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1902 FRF_BZ_TX_PACE_SB_AF, 0xb, 1903 FRF_BZ_TX_PACE_FB_BASE, 0, 1904 /* Allow large pace values in the 1905 * fast bin. */ 1906 FRF_BZ_TX_PACE_BIN_TH, 1907 FFE_BZ_TX_PACE_RESERVED); 1908 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1909 } 1910 } 1911 1912 /* Register dump */ 1913 1914 #define REGISTER_REVISION_A 1 1915 #define REGISTER_REVISION_B 2 1916 #define REGISTER_REVISION_C 3 1917 #define REGISTER_REVISION_Z 3 /* latest revision */ 1918 1919 struct efx_nic_reg { 1920 u32 offset:24; 1921 u32 min_revision:2, max_revision:2; 1922 }; 1923 1924 #define REGISTER(name, min_rev, max_rev) { \ 1925 FR_ ## min_rev ## max_rev ## _ ## name, \ 1926 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1927 } 1928 #define REGISTER_AA(name) REGISTER(name, A, A) 1929 #define REGISTER_AB(name) REGISTER(name, A, B) 1930 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1931 #define REGISTER_BB(name) REGISTER(name, B, B) 1932 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1933 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1934 1935 static const struct efx_nic_reg efx_nic_regs[] = { 1936 REGISTER_AZ(ADR_REGION), 1937 REGISTER_AZ(INT_EN_KER), 1938 REGISTER_BZ(INT_EN_CHAR), 1939 REGISTER_AZ(INT_ADR_KER), 1940 REGISTER_BZ(INT_ADR_CHAR), 1941 /* INT_ACK_KER is WO */ 1942 /* INT_ISR0 is RC */ 1943 REGISTER_AZ(HW_INIT), 1944 REGISTER_CZ(USR_EV_CFG), 1945 REGISTER_AB(EE_SPI_HCMD), 1946 REGISTER_AB(EE_SPI_HADR), 1947 REGISTER_AB(EE_SPI_HDATA), 1948 REGISTER_AB(EE_BASE_PAGE), 1949 REGISTER_AB(EE_VPD_CFG0), 1950 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1951 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1952 /* PCIE_CORE_INDIRECT is indirect */ 1953 REGISTER_AB(NIC_STAT), 1954 REGISTER_AB(GPIO_CTL), 1955 REGISTER_AB(GLB_CTL), 1956 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1957 REGISTER_BZ(DP_CTRL), 1958 REGISTER_AZ(MEM_STAT), 1959 REGISTER_AZ(CS_DEBUG), 1960 REGISTER_AZ(ALTERA_BUILD), 1961 REGISTER_AZ(CSR_SPARE), 1962 REGISTER_AB(PCIE_SD_CTL0123), 1963 REGISTER_AB(PCIE_SD_CTL45), 1964 REGISTER_AB(PCIE_PCS_CTL_STAT), 1965 /* DEBUG_DATA_OUT is not used */ 1966 /* DRV_EV is WO */ 1967 REGISTER_AZ(EVQ_CTL), 1968 REGISTER_AZ(EVQ_CNT1), 1969 REGISTER_AZ(EVQ_CNT2), 1970 REGISTER_AZ(BUF_TBL_CFG), 1971 REGISTER_AZ(SRM_RX_DC_CFG), 1972 REGISTER_AZ(SRM_TX_DC_CFG), 1973 REGISTER_AZ(SRM_CFG), 1974 /* BUF_TBL_UPD is WO */ 1975 REGISTER_AZ(SRM_UPD_EVQ), 1976 REGISTER_AZ(SRAM_PARITY), 1977 REGISTER_AZ(RX_CFG), 1978 REGISTER_BZ(RX_FILTER_CTL), 1979 /* RX_FLUSH_DESCQ is WO */ 1980 REGISTER_AZ(RX_DC_CFG), 1981 REGISTER_AZ(RX_DC_PF_WM), 1982 REGISTER_BZ(RX_RSS_TKEY), 1983 /* RX_NODESC_DROP is RC */ 1984 REGISTER_AA(RX_SELF_RST), 1985 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1986 REGISTER_CZ(RX_RSS_IPV6_REG1), 1987 REGISTER_CZ(RX_RSS_IPV6_REG2), 1988 REGISTER_CZ(RX_RSS_IPV6_REG3), 1989 /* TX_FLUSH_DESCQ is WO */ 1990 REGISTER_AZ(TX_DC_CFG), 1991 REGISTER_AA(TX_CHKSM_CFG), 1992 REGISTER_AZ(TX_CFG), 1993 /* TX_PUSH_DROP is not used */ 1994 REGISTER_AZ(TX_RESERVED), 1995 REGISTER_BZ(TX_PACE), 1996 /* TX_PACE_DROP_QID is RC */ 1997 REGISTER_BB(TX_VLAN), 1998 REGISTER_BZ(TX_IPFIL_PORTEN), 1999 REGISTER_AB(MD_TXD), 2000 REGISTER_AB(MD_RXD), 2001 REGISTER_AB(MD_CS), 2002 REGISTER_AB(MD_PHY_ADR), 2003 REGISTER_AB(MD_ID), 2004 /* MD_STAT is RC */ 2005 REGISTER_AB(MAC_STAT_DMA), 2006 REGISTER_AB(MAC_CTRL), 2007 REGISTER_BB(GEN_MODE), 2008 REGISTER_AB(MAC_MC_HASH_REG0), 2009 REGISTER_AB(MAC_MC_HASH_REG1), 2010 REGISTER_AB(GM_CFG1), 2011 REGISTER_AB(GM_CFG2), 2012 /* GM_IPG and GM_HD are not used */ 2013 REGISTER_AB(GM_MAX_FLEN), 2014 /* GM_TEST is not used */ 2015 REGISTER_AB(GM_ADR1), 2016 REGISTER_AB(GM_ADR2), 2017 REGISTER_AB(GMF_CFG0), 2018 REGISTER_AB(GMF_CFG1), 2019 REGISTER_AB(GMF_CFG2), 2020 REGISTER_AB(GMF_CFG3), 2021 REGISTER_AB(GMF_CFG4), 2022 REGISTER_AB(GMF_CFG5), 2023 REGISTER_BB(TX_SRC_MAC_CTL), 2024 REGISTER_AB(XM_ADR_LO), 2025 REGISTER_AB(XM_ADR_HI), 2026 REGISTER_AB(XM_GLB_CFG), 2027 REGISTER_AB(XM_TX_CFG), 2028 REGISTER_AB(XM_RX_CFG), 2029 REGISTER_AB(XM_MGT_INT_MASK), 2030 REGISTER_AB(XM_FC), 2031 REGISTER_AB(XM_PAUSE_TIME), 2032 REGISTER_AB(XM_TX_PARAM), 2033 REGISTER_AB(XM_RX_PARAM), 2034 /* XM_MGT_INT_MSK (note no 'A') is RC */ 2035 REGISTER_AB(XX_PWR_RST), 2036 REGISTER_AB(XX_SD_CTL), 2037 REGISTER_AB(XX_TXDRV_CTL), 2038 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 2039 /* XX_CORE_STAT is partly RC */ 2040 }; 2041 2042 struct efx_nic_reg_table { 2043 u32 offset:24; 2044 u32 min_revision:2, max_revision:2; 2045 u32 step:6, rows:21; 2046 }; 2047 2048 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 2049 offset, \ 2050 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 2051 step, rows \ 2052 } 2053 #define REGISTER_TABLE(name, min_rev, max_rev) \ 2054 REGISTER_TABLE_DIMENSIONS( \ 2055 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 2056 min_rev, max_rev, \ 2057 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 2058 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 2059 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 2060 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 2061 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 2062 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 2063 #define REGISTER_TABLE_BB_CZ(name) \ 2064 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 2065 FR_BZ_ ## name ## _STEP, \ 2066 FR_BB_ ## name ## _ROWS), \ 2067 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 2068 FR_BZ_ ## name ## _STEP, \ 2069 FR_CZ_ ## name ## _ROWS) 2070 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 2071 2072 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 2073 /* DRIVER is not used */ 2074 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 2075 REGISTER_TABLE_BB(TX_IPFIL_TBL), 2076 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 2077 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 2078 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 2079 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 2080 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 2081 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 2082 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 2083 /* We can't reasonably read all of the buffer table (up to 8MB!). 2084 * However this driver will only use a few entries. Reading 2085 * 1K entries allows for some expansion of queue count and 2086 * size before we need to change the version. */ 2087 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 2088 A, A, 8, 1024), 2089 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 2090 B, Z, 8, 1024), 2091 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 2092 REGISTER_TABLE_BB_CZ(TIMER_TBL), 2093 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 2094 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 2095 /* TX_FILTER_TBL0 is huge and not used by this driver */ 2096 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 2097 REGISTER_TABLE_CZ(MC_TREG_SMEM), 2098 /* MSIX_PBA_TABLE is not mapped */ 2099 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 2100 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 2101 }; 2102 2103 size_t efx_nic_get_regs_len(struct efx_nic *efx) 2104 { 2105 const struct efx_nic_reg *reg; 2106 const struct efx_nic_reg_table *table; 2107 size_t len = 0; 2108 2109 for (reg = efx_nic_regs; 2110 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2111 reg++) 2112 if (efx->type->revision >= reg->min_revision && 2113 efx->type->revision <= reg->max_revision) 2114 len += sizeof(efx_oword_t); 2115 2116 for (table = efx_nic_reg_tables; 2117 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2118 table++) 2119 if (efx->type->revision >= table->min_revision && 2120 efx->type->revision <= table->max_revision) 2121 len += table->rows * min_t(size_t, table->step, 16); 2122 2123 return len; 2124 } 2125 2126 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2127 { 2128 const struct efx_nic_reg *reg; 2129 const struct efx_nic_reg_table *table; 2130 2131 for (reg = efx_nic_regs; 2132 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2133 reg++) { 2134 if (efx->type->revision >= reg->min_revision && 2135 efx->type->revision <= reg->max_revision) { 2136 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2137 buf += sizeof(efx_oword_t); 2138 } 2139 } 2140 2141 for (table = efx_nic_reg_tables; 2142 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2143 table++) { 2144 size_t size, i; 2145 2146 if (!(efx->type->revision >= table->min_revision && 2147 efx->type->revision <= table->max_revision)) 2148 continue; 2149 2150 size = min_t(size_t, table->step, 16); 2151 2152 for (i = 0; i < table->rows; i++) { 2153 switch (table->step) { 2154 case 4: /* 32-bit SRAM */ 2155 efx_readd(efx, buf, table->offset + 4 * i); 2156 break; 2157 case 8: /* 64-bit SRAM */ 2158 efx_sram_readq(efx, 2159 efx->membase + table->offset, 2160 buf, i); 2161 break; 2162 case 16: /* 128-bit-readable register */ 2163 efx_reado_table(efx, buf, table->offset, i); 2164 break; 2165 case 32: /* 128-bit register, interleaved */ 2166 efx_reado_table(efx, buf, table->offset, 2 * i); 2167 break; 2168 default: 2169 WARN_ON(1); 2170 return; 2171 } 2172 buf += size; 2173 } 2174 } 2175 } 2176