1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* Depth of RX flush request fifo */ 53 #define EFX_RX_FLUSH_COUNT 4 54 55 /* Driver generated events */ 56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60 61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 63 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 68 efx_rx_queue_index(_rx_queue)) 69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 71 efx_rx_queue_index(_rx_queue)) 72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 74 (_tx_queue)->queue) 75 76 static void efx_magic_event(struct efx_channel *channel, u32 magic); 77 78 /************************************************************************** 79 * 80 * Solarstorm hardware access 81 * 82 **************************************************************************/ 83 84 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 85 unsigned int index) 86 { 87 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 88 value, index); 89 } 90 91 /* Read the current event from the event queue */ 92 static inline efx_qword_t *efx_event(struct efx_channel *channel, 93 unsigned int index) 94 { 95 return ((efx_qword_t *) (channel->eventq.addr)) + 96 (index & channel->eventq_mask); 97 } 98 99 /* See if an event is present 100 * 101 * We check both the high and low dword of the event for all ones. We 102 * wrote all ones when we cleared the event, and no valid event can 103 * have all ones in either its high or low dwords. This approach is 104 * robust against reordering. 105 * 106 * Note that using a single 64-bit comparison is incorrect; even 107 * though the CPU read will be atomic, the DMA write may not be. 108 */ 109 static inline int efx_event_present(efx_qword_t *event) 110 { 111 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 112 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 113 } 114 115 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 116 const efx_oword_t *mask) 117 { 118 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 119 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 120 } 121 122 int efx_nic_test_registers(struct efx_nic *efx, 123 const struct efx_nic_register_test *regs, 124 size_t n_regs) 125 { 126 unsigned address = 0, i, j; 127 efx_oword_t mask, imask, original, reg, buf; 128 129 for (i = 0; i < n_regs; ++i) { 130 address = regs[i].address; 131 mask = imask = regs[i].mask; 132 EFX_INVERT_OWORD(imask); 133 134 efx_reado(efx, &original, address); 135 136 /* bit sweep on and off */ 137 for (j = 0; j < 128; j++) { 138 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 139 continue; 140 141 /* Test this testable bit can be set in isolation */ 142 EFX_AND_OWORD(reg, original, mask); 143 EFX_SET_OWORD32(reg, j, j, 1); 144 145 efx_writeo(efx, ®, address); 146 efx_reado(efx, &buf, address); 147 148 if (efx_masked_compare_oword(®, &buf, &mask)) 149 goto fail; 150 151 /* Test this testable bit can be cleared in isolation */ 152 EFX_OR_OWORD(reg, original, mask); 153 EFX_SET_OWORD32(reg, j, j, 0); 154 155 efx_writeo(efx, ®, address); 156 efx_reado(efx, &buf, address); 157 158 if (efx_masked_compare_oword(®, &buf, &mask)) 159 goto fail; 160 } 161 162 efx_writeo(efx, &original, address); 163 } 164 165 return 0; 166 167 fail: 168 netif_err(efx, hw, efx->net_dev, 169 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 170 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 171 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 172 return -EIO; 173 } 174 175 /************************************************************************** 176 * 177 * Special buffer handling 178 * Special buffers are used for event queues and the TX and RX 179 * descriptor rings. 180 * 181 *************************************************************************/ 182 183 /* 184 * Initialise a special buffer 185 * 186 * This will define a buffer (previously allocated via 187 * efx_alloc_special_buffer()) in the buffer table, allowing 188 * it to be used for event queues, descriptor rings etc. 189 */ 190 static void 191 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 192 { 193 efx_qword_t buf_desc; 194 unsigned int index; 195 dma_addr_t dma_addr; 196 int i; 197 198 EFX_BUG_ON_PARANOID(!buffer->addr); 199 200 /* Write buffer descriptors to NIC */ 201 for (i = 0; i < buffer->entries; i++) { 202 index = buffer->index + i; 203 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 204 netif_dbg(efx, probe, efx->net_dev, 205 "mapping special buffer %d at %llx\n", 206 index, (unsigned long long)dma_addr); 207 EFX_POPULATE_QWORD_3(buf_desc, 208 FRF_AZ_BUF_ADR_REGION, 0, 209 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 210 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 211 efx_write_buf_tbl(efx, &buf_desc, index); 212 } 213 } 214 215 /* Unmaps a buffer and clears the buffer table entries */ 216 static void 217 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 218 { 219 efx_oword_t buf_tbl_upd; 220 unsigned int start = buffer->index; 221 unsigned int end = (buffer->index + buffer->entries - 1); 222 223 if (!buffer->entries) 224 return; 225 226 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 227 buffer->index, buffer->index + buffer->entries - 1); 228 229 EFX_POPULATE_OWORD_4(buf_tbl_upd, 230 FRF_AZ_BUF_UPD_CMD, 0, 231 FRF_AZ_BUF_CLR_CMD, 1, 232 FRF_AZ_BUF_CLR_END_ID, end, 233 FRF_AZ_BUF_CLR_START_ID, start); 234 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 235 } 236 237 /* 238 * Allocate a new special buffer 239 * 240 * This allocates memory for a new buffer, clears it and allocates a 241 * new buffer ID range. It does not write into the buffer table. 242 * 243 * This call will allocate 4KB buffers, since 8KB buffers can't be 244 * used for event queues and descriptor rings. 245 */ 246 static int efx_alloc_special_buffer(struct efx_nic *efx, 247 struct efx_special_buffer *buffer, 248 unsigned int len) 249 { 250 len = ALIGN(len, EFX_BUF_SIZE); 251 252 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 253 &buffer->dma_addr, GFP_KERNEL); 254 if (!buffer->addr) 255 return -ENOMEM; 256 buffer->len = len; 257 buffer->entries = len / EFX_BUF_SIZE; 258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 259 260 /* Select new buffer ID */ 261 buffer->index = efx->next_buffer_table; 262 efx->next_buffer_table += buffer->entries; 263 #ifdef CONFIG_SFC_SRIOV 264 BUG_ON(efx_sriov_enabled(efx) && 265 efx->vf_buftbl_base < efx->next_buffer_table); 266 #endif 267 268 netif_dbg(efx, probe, efx->net_dev, 269 "allocating special buffers %d-%d at %llx+%x " 270 "(virt %p phys %llx)\n", buffer->index, 271 buffer->index + buffer->entries - 1, 272 (u64)buffer->dma_addr, len, 273 buffer->addr, (u64)virt_to_phys(buffer->addr)); 274 275 return 0; 276 } 277 278 static void 279 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 280 { 281 if (!buffer->addr) 282 return; 283 284 netif_dbg(efx, hw, efx->net_dev, 285 "deallocating special buffers %d-%d at %llx+%x " 286 "(virt %p phys %llx)\n", buffer->index, 287 buffer->index + buffer->entries - 1, 288 (u64)buffer->dma_addr, buffer->len, 289 buffer->addr, (u64)virt_to_phys(buffer->addr)); 290 291 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 292 buffer->dma_addr); 293 buffer->addr = NULL; 294 buffer->entries = 0; 295 } 296 297 /************************************************************************** 298 * 299 * Generic buffer handling 300 * These buffers are used for interrupt status, MAC stats, etc. 301 * 302 **************************************************************************/ 303 304 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 305 unsigned int len) 306 { 307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 308 &buffer->dma_addr, GFP_ATOMIC); 309 if (!buffer->addr) 310 return -ENOMEM; 311 buffer->len = len; 312 memset(buffer->addr, 0, len); 313 return 0; 314 } 315 316 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 317 { 318 if (buffer->addr) { 319 dma_free_coherent(&efx->pci_dev->dev, buffer->len, 320 buffer->addr, buffer->dma_addr); 321 buffer->addr = NULL; 322 } 323 } 324 325 /************************************************************************** 326 * 327 * TX path 328 * 329 **************************************************************************/ 330 331 /* Returns a pointer to the specified transmit descriptor in the TX 332 * descriptor queue belonging to the specified channel. 333 */ 334 static inline efx_qword_t * 335 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 336 { 337 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 338 } 339 340 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 341 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 342 { 343 unsigned write_ptr; 344 efx_dword_t reg; 345 346 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 347 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 348 efx_writed_page(tx_queue->efx, ®, 349 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 350 } 351 352 /* Write pointer and first descriptor for TX descriptor ring */ 353 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 354 const efx_qword_t *txd) 355 { 356 unsigned write_ptr; 357 efx_oword_t reg; 358 359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 361 362 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 363 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 364 FRF_AZ_TX_DESC_WPTR, write_ptr); 365 reg.qword[0] = *txd; 366 efx_writeo_page(tx_queue->efx, ®, 367 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 368 } 369 370 static inline bool 371 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 372 { 373 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 374 375 if (empty_read_count == 0) 376 return false; 377 378 tx_queue->empty_read_count = 0; 379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 380 } 381 382 /* For each entry inserted into the software descriptor ring, create a 383 * descriptor in the hardware TX descriptor ring (in host memory), and 384 * write a doorbell. 385 */ 386 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 387 { 388 389 struct efx_tx_buffer *buffer; 390 efx_qword_t *txd; 391 unsigned write_ptr; 392 unsigned old_write_count = tx_queue->write_count; 393 394 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 395 396 do { 397 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 398 buffer = &tx_queue->buffer[write_ptr]; 399 txd = efx_tx_desc(tx_queue, write_ptr); 400 ++tx_queue->write_count; 401 402 /* Create TX descriptor ring entry */ 403 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 404 EFX_POPULATE_QWORD_4(*txd, 405 FSF_AZ_TX_KER_CONT, 406 buffer->flags & EFX_TX_BUF_CONT, 407 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 408 FSF_AZ_TX_KER_BUF_REGION, 0, 409 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 410 } while (tx_queue->write_count != tx_queue->insert_count); 411 412 wmb(); /* Ensure descriptors are written before they are fetched */ 413 414 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 415 txd = efx_tx_desc(tx_queue, 416 old_write_count & tx_queue->ptr_mask); 417 efx_push_tx_desc(tx_queue, txd); 418 ++tx_queue->pushes; 419 } else { 420 efx_notify_tx_desc(tx_queue); 421 } 422 } 423 424 /* Allocate hardware resources for a TX queue */ 425 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 426 { 427 struct efx_nic *efx = tx_queue->efx; 428 unsigned entries; 429 430 entries = tx_queue->ptr_mask + 1; 431 return efx_alloc_special_buffer(efx, &tx_queue->txd, 432 entries * sizeof(efx_qword_t)); 433 } 434 435 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 436 { 437 struct efx_nic *efx = tx_queue->efx; 438 efx_oword_t reg; 439 440 /* Pin TX descriptor ring */ 441 efx_init_special_buffer(efx, &tx_queue->txd); 442 443 /* Push TX descriptor ring to card */ 444 EFX_POPULATE_OWORD_10(reg, 445 FRF_AZ_TX_DESCQ_EN, 1, 446 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 447 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 448 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 449 FRF_AZ_TX_DESCQ_EVQ_ID, 450 tx_queue->channel->channel, 451 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 452 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 453 FRF_AZ_TX_DESCQ_SIZE, 454 __ffs(tx_queue->txd.entries), 455 FRF_AZ_TX_DESCQ_TYPE, 0, 456 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 457 458 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 459 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 460 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 462 !csum); 463 } 464 465 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 466 tx_queue->queue); 467 468 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 469 /* Only 128 bits in this register */ 470 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 471 472 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 473 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 474 __clear_bit_le(tx_queue->queue, ®); 475 else 476 __set_bit_le(tx_queue->queue, ®); 477 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 478 } 479 480 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 481 EFX_POPULATE_OWORD_1(reg, 482 FRF_BZ_TX_PACE, 483 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 484 FFE_BZ_TX_PACE_OFF : 485 FFE_BZ_TX_PACE_RESERVED); 486 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 487 tx_queue->queue); 488 } 489 } 490 491 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 492 { 493 struct efx_nic *efx = tx_queue->efx; 494 efx_oword_t tx_flush_descq; 495 496 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 497 atomic_set(&tx_queue->flush_outstanding, 1); 498 499 EFX_POPULATE_OWORD_2(tx_flush_descq, 500 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 501 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 502 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 503 } 504 505 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 506 { 507 struct efx_nic *efx = tx_queue->efx; 508 efx_oword_t tx_desc_ptr; 509 510 /* Remove TX descriptor ring from card */ 511 EFX_ZERO_OWORD(tx_desc_ptr); 512 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 513 tx_queue->queue); 514 515 /* Unpin TX descriptor ring */ 516 efx_fini_special_buffer(efx, &tx_queue->txd); 517 } 518 519 /* Free buffers backing TX queue */ 520 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 521 { 522 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 523 } 524 525 /************************************************************************** 526 * 527 * RX path 528 * 529 **************************************************************************/ 530 531 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 532 static inline efx_qword_t * 533 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 534 { 535 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 536 } 537 538 /* This creates an entry in the RX descriptor queue */ 539 static inline void 540 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 541 { 542 struct efx_rx_buffer *rx_buf; 543 efx_qword_t *rxd; 544 545 rxd = efx_rx_desc(rx_queue, index); 546 rx_buf = efx_rx_buffer(rx_queue, index); 547 EFX_POPULATE_QWORD_3(*rxd, 548 FSF_AZ_RX_KER_BUF_SIZE, 549 rx_buf->len - 550 rx_queue->efx->type->rx_buffer_padding, 551 FSF_AZ_RX_KER_BUF_REGION, 0, 552 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 553 } 554 555 /* This writes to the RX_DESC_WPTR register for the specified receive 556 * descriptor ring. 557 */ 558 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 559 { 560 struct efx_nic *efx = rx_queue->efx; 561 efx_dword_t reg; 562 unsigned write_ptr; 563 564 while (rx_queue->notified_count != rx_queue->added_count) { 565 efx_build_rx_desc( 566 rx_queue, 567 rx_queue->notified_count & rx_queue->ptr_mask); 568 ++rx_queue->notified_count; 569 } 570 571 wmb(); 572 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 573 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 574 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 575 efx_rx_queue_index(rx_queue)); 576 } 577 578 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 579 { 580 struct efx_nic *efx = rx_queue->efx; 581 unsigned entries; 582 583 entries = rx_queue->ptr_mask + 1; 584 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 585 entries * sizeof(efx_qword_t)); 586 } 587 588 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 589 { 590 efx_oword_t rx_desc_ptr; 591 struct efx_nic *efx = rx_queue->efx; 592 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 593 bool iscsi_digest_en = is_b0; 594 bool jumbo_en; 595 596 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables 597 * DMA to continue after a PCIe page boundary (and scattering 598 * is not possible). In Falcon B0 and Siena, it enables 599 * scatter. 600 */ 601 jumbo_en = !is_b0 || efx->rx_scatter; 602 603 netif_dbg(efx, hw, efx->net_dev, 604 "RX queue %d ring in special buffers %d-%d\n", 605 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 606 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 607 608 rx_queue->scatter_n = 0; 609 610 /* Pin RX descriptor ring */ 611 efx_init_special_buffer(efx, &rx_queue->rxd); 612 613 /* Push RX descriptor ring to card */ 614 EFX_POPULATE_OWORD_10(rx_desc_ptr, 615 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 616 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 617 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 618 FRF_AZ_RX_DESCQ_EVQ_ID, 619 efx_rx_queue_channel(rx_queue)->channel, 620 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 621 FRF_AZ_RX_DESCQ_LABEL, 622 efx_rx_queue_index(rx_queue), 623 FRF_AZ_RX_DESCQ_SIZE, 624 __ffs(rx_queue->rxd.entries), 625 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 626 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 627 FRF_AZ_RX_DESCQ_EN, 1); 628 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 629 efx_rx_queue_index(rx_queue)); 630 } 631 632 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 633 { 634 struct efx_nic *efx = rx_queue->efx; 635 efx_oword_t rx_flush_descq; 636 637 EFX_POPULATE_OWORD_2(rx_flush_descq, 638 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 639 FRF_AZ_RX_FLUSH_DESCQ, 640 efx_rx_queue_index(rx_queue)); 641 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 642 } 643 644 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 645 { 646 efx_oword_t rx_desc_ptr; 647 struct efx_nic *efx = rx_queue->efx; 648 649 /* Remove RX descriptor ring from card */ 650 EFX_ZERO_OWORD(rx_desc_ptr); 651 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 652 efx_rx_queue_index(rx_queue)); 653 654 /* Unpin RX descriptor ring */ 655 efx_fini_special_buffer(efx, &rx_queue->rxd); 656 } 657 658 /* Free buffers backing RX queue */ 659 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 660 { 661 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 662 } 663 664 /************************************************************************** 665 * 666 * Flush handling 667 * 668 **************************************************************************/ 669 670 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 671 * or more RX flushes can be kicked off. 672 */ 673 static bool efx_flush_wake(struct efx_nic *efx) 674 { 675 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 676 smp_mb(); 677 678 return (atomic_read(&efx->drain_pending) == 0 || 679 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 680 && atomic_read(&efx->rxq_flush_pending) > 0)); 681 } 682 683 static bool efx_check_tx_flush_complete(struct efx_nic *efx) 684 { 685 bool i = true; 686 efx_oword_t txd_ptr_tbl; 687 struct efx_channel *channel; 688 struct efx_tx_queue *tx_queue; 689 690 efx_for_each_channel(channel, efx) { 691 efx_for_each_channel_tx_queue(tx_queue, channel) { 692 efx_reado_table(efx, &txd_ptr_tbl, 693 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 694 if (EFX_OWORD_FIELD(txd_ptr_tbl, 695 FRF_AZ_TX_DESCQ_FLUSH) || 696 EFX_OWORD_FIELD(txd_ptr_tbl, 697 FRF_AZ_TX_DESCQ_EN)) { 698 netif_dbg(efx, hw, efx->net_dev, 699 "flush did not complete on TXQ %d\n", 700 tx_queue->queue); 701 i = false; 702 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 703 1, 0)) { 704 /* The flush is complete, but we didn't 705 * receive a flush completion event 706 */ 707 netif_dbg(efx, hw, efx->net_dev, 708 "flush complete on TXQ %d, so drain " 709 "the queue\n", tx_queue->queue); 710 /* Don't need to increment drain_pending as it 711 * has already been incremented for the queues 712 * which did not drain 713 */ 714 efx_magic_event(channel, 715 EFX_CHANNEL_MAGIC_TX_DRAIN( 716 tx_queue)); 717 } 718 } 719 } 720 721 return i; 722 } 723 724 /* Flush all the transmit queues, and continue flushing receive queues until 725 * they're all flushed. Wait for the DRAIN events to be recieved so that there 726 * are no more RX and TX events left on any channel. */ 727 int efx_nic_flush_queues(struct efx_nic *efx) 728 { 729 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 730 struct efx_channel *channel; 731 struct efx_rx_queue *rx_queue; 732 struct efx_tx_queue *tx_queue; 733 int rc = 0; 734 735 efx->type->prepare_flush(efx); 736 737 efx_for_each_channel(channel, efx) { 738 efx_for_each_channel_tx_queue(tx_queue, channel) { 739 atomic_inc(&efx->drain_pending); 740 efx_flush_tx_queue(tx_queue); 741 } 742 efx_for_each_channel_rx_queue(rx_queue, channel) { 743 atomic_inc(&efx->drain_pending); 744 rx_queue->flush_pending = true; 745 atomic_inc(&efx->rxq_flush_pending); 746 } 747 } 748 749 while (timeout && atomic_read(&efx->drain_pending) > 0) { 750 /* If SRIOV is enabled, then offload receive queue flushing to 751 * the firmware (though we will still have to poll for 752 * completion). If that fails, fall back to the old scheme. 753 */ 754 if (efx_sriov_enabled(efx)) { 755 rc = efx_mcdi_flush_rxqs(efx); 756 if (!rc) 757 goto wait; 758 } 759 760 /* The hardware supports four concurrent rx flushes, each of 761 * which may need to be retried if there is an outstanding 762 * descriptor fetch 763 */ 764 efx_for_each_channel(channel, efx) { 765 efx_for_each_channel_rx_queue(rx_queue, channel) { 766 if (atomic_read(&efx->rxq_flush_outstanding) >= 767 EFX_RX_FLUSH_COUNT) 768 break; 769 770 if (rx_queue->flush_pending) { 771 rx_queue->flush_pending = false; 772 atomic_dec(&efx->rxq_flush_pending); 773 atomic_inc(&efx->rxq_flush_outstanding); 774 efx_flush_rx_queue(rx_queue); 775 } 776 } 777 } 778 779 wait: 780 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 781 timeout); 782 } 783 784 if (atomic_read(&efx->drain_pending) && 785 !efx_check_tx_flush_complete(efx)) { 786 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 787 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 788 atomic_read(&efx->rxq_flush_outstanding), 789 atomic_read(&efx->rxq_flush_pending)); 790 rc = -ETIMEDOUT; 791 792 atomic_set(&efx->drain_pending, 0); 793 atomic_set(&efx->rxq_flush_pending, 0); 794 atomic_set(&efx->rxq_flush_outstanding, 0); 795 } 796 797 efx->type->finish_flush(efx); 798 799 return rc; 800 } 801 802 /************************************************************************** 803 * 804 * Event queue processing 805 * Event queues are processed by per-channel tasklets. 806 * 807 **************************************************************************/ 808 809 /* Update a channel's event queue's read pointer (RPTR) register 810 * 811 * This writes the EVQ_RPTR_REG register for the specified channel's 812 * event queue. 813 */ 814 void efx_nic_eventq_read_ack(struct efx_channel *channel) 815 { 816 efx_dword_t reg; 817 struct efx_nic *efx = channel->efx; 818 819 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 820 channel->eventq_read_ptr & channel->eventq_mask); 821 822 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 823 * of 4 bytes, but it is really 16 bytes just like later revisions. 824 */ 825 efx_writed(efx, ®, 826 efx->type->evq_rptr_tbl_base + 827 FR_BZ_EVQ_RPTR_STEP * channel->channel); 828 } 829 830 /* Use HW to insert a SW defined event */ 831 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 832 efx_qword_t *event) 833 { 834 efx_oword_t drv_ev_reg; 835 836 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 837 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 838 drv_ev_reg.u32[0] = event->u32[0]; 839 drv_ev_reg.u32[1] = event->u32[1]; 840 drv_ev_reg.u32[2] = 0; 841 drv_ev_reg.u32[3] = 0; 842 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 843 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 844 } 845 846 static void efx_magic_event(struct efx_channel *channel, u32 magic) 847 { 848 efx_qword_t event; 849 850 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 851 FSE_AZ_EV_CODE_DRV_GEN_EV, 852 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 853 efx_generate_event(channel->efx, channel->channel, &event); 854 } 855 856 /* Handle a transmit completion event 857 * 858 * The NIC batches TX completion events; the message we receive is of 859 * the form "complete all TX events up to this index". 860 */ 861 static int 862 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 863 { 864 unsigned int tx_ev_desc_ptr; 865 unsigned int tx_ev_q_label; 866 struct efx_tx_queue *tx_queue; 867 struct efx_nic *efx = channel->efx; 868 int tx_packets = 0; 869 870 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 871 return 0; 872 873 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 874 /* Transmit completion */ 875 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 876 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 877 tx_queue = efx_channel_get_tx_queue( 878 channel, tx_ev_q_label % EFX_TXQ_TYPES); 879 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 880 tx_queue->ptr_mask); 881 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 882 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 883 /* Rewrite the FIFO write pointer */ 884 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 885 tx_queue = efx_channel_get_tx_queue( 886 channel, tx_ev_q_label % EFX_TXQ_TYPES); 887 888 netif_tx_lock(efx->net_dev); 889 efx_notify_tx_desc(tx_queue); 890 netif_tx_unlock(efx->net_dev); 891 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 892 EFX_WORKAROUND_10727(efx)) { 893 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 894 } else { 895 netif_err(efx, tx_err, efx->net_dev, 896 "channel %d unexpected TX event " 897 EFX_QWORD_FMT"\n", channel->channel, 898 EFX_QWORD_VAL(*event)); 899 } 900 901 return tx_packets; 902 } 903 904 /* Detect errors included in the rx_evt_pkt_ok bit. */ 905 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 906 const efx_qword_t *event) 907 { 908 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 909 struct efx_nic *efx = rx_queue->efx; 910 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 911 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 912 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 913 bool rx_ev_other_err, rx_ev_pause_frm; 914 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 915 unsigned rx_ev_pkt_type; 916 917 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 918 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 919 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 920 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 921 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 922 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 923 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 924 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 925 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 926 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 927 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 928 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 929 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 930 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 931 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 932 933 /* Every error apart from tobe_disc and pause_frm */ 934 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 935 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 936 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 937 938 /* Count errors that are not in MAC stats. Ignore expected 939 * checksum errors during self-test. */ 940 if (rx_ev_frm_trunc) 941 ++channel->n_rx_frm_trunc; 942 else if (rx_ev_tobe_disc) 943 ++channel->n_rx_tobe_disc; 944 else if (!efx->loopback_selftest) { 945 if (rx_ev_ip_hdr_chksum_err) 946 ++channel->n_rx_ip_hdr_chksum_err; 947 else if (rx_ev_tcp_udp_chksum_err) 948 ++channel->n_rx_tcp_udp_chksum_err; 949 } 950 951 /* TOBE_DISC is expected on unicast mismatches; don't print out an 952 * error message. FRM_TRUNC indicates RXDP dropped the packet due 953 * to a FIFO overflow. 954 */ 955 #ifdef DEBUG 956 if (rx_ev_other_err && net_ratelimit()) { 957 netif_dbg(efx, rx_err, efx->net_dev, 958 " RX queue %d unexpected RX event " 959 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 960 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 961 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 962 rx_ev_ip_hdr_chksum_err ? 963 " [IP_HDR_CHKSUM_ERR]" : "", 964 rx_ev_tcp_udp_chksum_err ? 965 " [TCP_UDP_CHKSUM_ERR]" : "", 966 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 967 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 968 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 969 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 970 rx_ev_pause_frm ? " [PAUSE]" : ""); 971 } 972 #endif 973 974 /* The frame must be discarded if any of these are true. */ 975 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 976 rx_ev_tobe_disc | rx_ev_pause_frm) ? 977 EFX_RX_PKT_DISCARD : 0; 978 } 979 980 /* Handle receive events that are not in-order. Return true if this 981 * can be handled as a partial packet discard, false if it's more 982 * serious. 983 */ 984 static bool 985 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 986 { 987 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 988 struct efx_nic *efx = rx_queue->efx; 989 unsigned expected, dropped; 990 991 if (rx_queue->scatter_n && 992 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 993 rx_queue->ptr_mask)) { 994 ++channel->n_rx_nodesc_trunc; 995 return true; 996 } 997 998 expected = rx_queue->removed_count & rx_queue->ptr_mask; 999 dropped = (index - expected) & rx_queue->ptr_mask; 1000 netif_info(efx, rx_err, efx->net_dev, 1001 "dropped %d events (index=%d expected=%d)\n", 1002 dropped, index, expected); 1003 1004 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1005 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1006 return false; 1007 } 1008 1009 /* Handle a packet received event 1010 * 1011 * The NIC gives a "discard" flag if it's a unicast packet with the 1012 * wrong destination address 1013 * Also "is multicast" and "matches multicast filter" flags can be used to 1014 * discard non-matching multicast packets. 1015 */ 1016 static void 1017 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 1018 { 1019 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1020 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1021 unsigned expected_ptr; 1022 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 1023 u16 flags; 1024 struct efx_rx_queue *rx_queue; 1025 struct efx_nic *efx = channel->efx; 1026 1027 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1028 return; 1029 1030 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 1031 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 1032 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1033 channel->channel); 1034 1035 rx_queue = efx_channel_get_rx_queue(channel); 1036 1037 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1038 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 1039 rx_queue->ptr_mask); 1040 1041 /* Check for partial drops and other errors */ 1042 if (unlikely(rx_ev_desc_ptr != expected_ptr) || 1043 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 1044 if (rx_ev_desc_ptr != expected_ptr && 1045 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 1046 return; 1047 1048 /* Discard all pending fragments */ 1049 if (rx_queue->scatter_n) { 1050 efx_rx_packet( 1051 rx_queue, 1052 rx_queue->removed_count & rx_queue->ptr_mask, 1053 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 1054 rx_queue->removed_count += rx_queue->scatter_n; 1055 rx_queue->scatter_n = 0; 1056 } 1057 1058 /* Return if there is no new fragment */ 1059 if (rx_ev_desc_ptr != expected_ptr) 1060 return; 1061 1062 /* Discard new fragment if not SOP */ 1063 if (!rx_ev_sop) { 1064 efx_rx_packet( 1065 rx_queue, 1066 rx_queue->removed_count & rx_queue->ptr_mask, 1067 1, 0, EFX_RX_PKT_DISCARD); 1068 ++rx_queue->removed_count; 1069 return; 1070 } 1071 } 1072 1073 ++rx_queue->scatter_n; 1074 if (rx_ev_cont) 1075 return; 1076 1077 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1078 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1079 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1080 1081 if (likely(rx_ev_pkt_ok)) { 1082 /* If packet is marked as OK and packet type is TCP/IP or 1083 * UDP/IP, then we can rely on the hardware checksum. 1084 */ 1085 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 1086 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 1087 EFX_RX_PKT_CSUMMED : 0; 1088 } else { 1089 flags = efx_handle_rx_not_ok(rx_queue, event); 1090 } 1091 1092 /* Detect multicast packets that didn't match the filter */ 1093 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1094 if (rx_ev_mcast_pkt) { 1095 unsigned int rx_ev_mcast_hash_match = 1096 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1097 1098 if (unlikely(!rx_ev_mcast_hash_match)) { 1099 ++channel->n_rx_mcast_mismatch; 1100 flags |= EFX_RX_PKT_DISCARD; 1101 } 1102 } 1103 1104 channel->irq_mod_score += 2; 1105 1106 /* Handle received packet */ 1107 efx_rx_packet(rx_queue, 1108 rx_queue->removed_count & rx_queue->ptr_mask, 1109 rx_queue->scatter_n, rx_ev_byte_cnt, flags); 1110 rx_queue->removed_count += rx_queue->scatter_n; 1111 rx_queue->scatter_n = 0; 1112 } 1113 1114 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1115 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1116 * of all transmit completions. 1117 */ 1118 static void 1119 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1120 { 1121 struct efx_tx_queue *tx_queue; 1122 int qid; 1123 1124 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1125 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1126 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1127 qid % EFX_TXQ_TYPES); 1128 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1129 efx_magic_event(tx_queue->channel, 1130 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1131 } 1132 } 1133 } 1134 1135 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1136 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1137 * the RX queue back to the mask of RX queues in need of flushing. 1138 */ 1139 static void 1140 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1141 { 1142 struct efx_channel *channel; 1143 struct efx_rx_queue *rx_queue; 1144 int qid; 1145 bool failed; 1146 1147 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1148 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1149 if (qid >= efx->n_channels) 1150 return; 1151 channel = efx_get_channel(efx, qid); 1152 if (!efx_channel_has_rx_queue(channel)) 1153 return; 1154 rx_queue = efx_channel_get_rx_queue(channel); 1155 1156 if (failed) { 1157 netif_info(efx, hw, efx->net_dev, 1158 "RXQ %d flush retry\n", qid); 1159 rx_queue->flush_pending = true; 1160 atomic_inc(&efx->rxq_flush_pending); 1161 } else { 1162 efx_magic_event(efx_rx_queue_channel(rx_queue), 1163 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1164 } 1165 atomic_dec(&efx->rxq_flush_outstanding); 1166 if (efx_flush_wake(efx)) 1167 wake_up(&efx->flush_wq); 1168 } 1169 1170 static void 1171 efx_handle_drain_event(struct efx_channel *channel) 1172 { 1173 struct efx_nic *efx = channel->efx; 1174 1175 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1176 atomic_dec(&efx->drain_pending); 1177 if (efx_flush_wake(efx)) 1178 wake_up(&efx->flush_wq); 1179 } 1180 1181 static void 1182 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1183 { 1184 struct efx_nic *efx = channel->efx; 1185 struct efx_rx_queue *rx_queue = 1186 efx_channel_has_rx_queue(channel) ? 1187 efx_channel_get_rx_queue(channel) : NULL; 1188 unsigned magic, code; 1189 1190 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1191 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1192 1193 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1194 channel->event_test_cpu = raw_smp_processor_id(); 1195 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1196 /* The queue must be empty, so we won't receive any rx 1197 * events, so efx_process_channel() won't refill the 1198 * queue. Refill it here */ 1199 efx_fast_push_rx_descriptors(rx_queue); 1200 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1201 rx_queue->enabled = false; 1202 efx_handle_drain_event(channel); 1203 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1204 efx_handle_drain_event(channel); 1205 } else { 1206 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1207 "generated event "EFX_QWORD_FMT"\n", 1208 channel->channel, EFX_QWORD_VAL(*event)); 1209 } 1210 } 1211 1212 static void 1213 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1214 { 1215 struct efx_nic *efx = channel->efx; 1216 unsigned int ev_sub_code; 1217 unsigned int ev_sub_data; 1218 1219 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1220 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1221 1222 switch (ev_sub_code) { 1223 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1224 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1225 channel->channel, ev_sub_data); 1226 efx_handle_tx_flush_done(efx, event); 1227 efx_sriov_tx_flush_done(efx, event); 1228 break; 1229 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1230 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1231 channel->channel, ev_sub_data); 1232 efx_handle_rx_flush_done(efx, event); 1233 efx_sriov_rx_flush_done(efx, event); 1234 break; 1235 case FSE_AZ_EVQ_INIT_DONE_EV: 1236 netif_dbg(efx, hw, efx->net_dev, 1237 "channel %d EVQ %d initialised\n", 1238 channel->channel, ev_sub_data); 1239 break; 1240 case FSE_AZ_SRM_UPD_DONE_EV: 1241 netif_vdbg(efx, hw, efx->net_dev, 1242 "channel %d SRAM update done\n", channel->channel); 1243 break; 1244 case FSE_AZ_WAKE_UP_EV: 1245 netif_vdbg(efx, hw, efx->net_dev, 1246 "channel %d RXQ %d wakeup event\n", 1247 channel->channel, ev_sub_data); 1248 break; 1249 case FSE_AZ_TIMER_EV: 1250 netif_vdbg(efx, hw, efx->net_dev, 1251 "channel %d RX queue %d timer expired\n", 1252 channel->channel, ev_sub_data); 1253 break; 1254 case FSE_AA_RX_RECOVER_EV: 1255 netif_err(efx, rx_err, efx->net_dev, 1256 "channel %d seen DRIVER RX_RESET event. " 1257 "Resetting.\n", channel->channel); 1258 atomic_inc(&efx->rx_reset); 1259 efx_schedule_reset(efx, 1260 EFX_WORKAROUND_6555(efx) ? 1261 RESET_TYPE_RX_RECOVERY : 1262 RESET_TYPE_DISABLE); 1263 break; 1264 case FSE_BZ_RX_DSC_ERROR_EV: 1265 if (ev_sub_data < EFX_VI_BASE) { 1266 netif_err(efx, rx_err, efx->net_dev, 1267 "RX DMA Q %d reports descriptor fetch error." 1268 " RX Q %d is disabled.\n", ev_sub_data, 1269 ev_sub_data); 1270 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1271 } else 1272 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1273 break; 1274 case FSE_BZ_TX_DSC_ERROR_EV: 1275 if (ev_sub_data < EFX_VI_BASE) { 1276 netif_err(efx, tx_err, efx->net_dev, 1277 "TX DMA Q %d reports descriptor fetch error." 1278 " TX Q %d is disabled.\n", ev_sub_data, 1279 ev_sub_data); 1280 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1281 } else 1282 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1283 break; 1284 default: 1285 netif_vdbg(efx, hw, efx->net_dev, 1286 "channel %d unknown driver event code %d " 1287 "data %04x\n", channel->channel, ev_sub_code, 1288 ev_sub_data); 1289 break; 1290 } 1291 } 1292 1293 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1294 { 1295 struct efx_nic *efx = channel->efx; 1296 unsigned int read_ptr; 1297 efx_qword_t event, *p_event; 1298 int ev_code; 1299 int tx_packets = 0; 1300 int spent = 0; 1301 1302 read_ptr = channel->eventq_read_ptr; 1303 1304 for (;;) { 1305 p_event = efx_event(channel, read_ptr); 1306 event = *p_event; 1307 1308 if (!efx_event_present(&event)) 1309 /* End of events */ 1310 break; 1311 1312 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1313 "channel %d event is "EFX_QWORD_FMT"\n", 1314 channel->channel, EFX_QWORD_VAL(event)); 1315 1316 /* Clear this event by marking it all ones */ 1317 EFX_SET_QWORD(*p_event); 1318 1319 ++read_ptr; 1320 1321 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1322 1323 switch (ev_code) { 1324 case FSE_AZ_EV_CODE_RX_EV: 1325 efx_handle_rx_event(channel, &event); 1326 if (++spent == budget) 1327 goto out; 1328 break; 1329 case FSE_AZ_EV_CODE_TX_EV: 1330 tx_packets += efx_handle_tx_event(channel, &event); 1331 if (tx_packets > efx->txq_entries) { 1332 spent = budget; 1333 goto out; 1334 } 1335 break; 1336 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1337 efx_handle_generated_event(channel, &event); 1338 break; 1339 case FSE_AZ_EV_CODE_DRIVER_EV: 1340 efx_handle_driver_event(channel, &event); 1341 break; 1342 case FSE_CZ_EV_CODE_USER_EV: 1343 efx_sriov_event(channel, &event); 1344 break; 1345 case FSE_CZ_EV_CODE_MCDI_EV: 1346 efx_mcdi_process_event(channel, &event); 1347 break; 1348 case FSE_AZ_EV_CODE_GLOBAL_EV: 1349 if (efx->type->handle_global_event && 1350 efx->type->handle_global_event(channel, &event)) 1351 break; 1352 /* else fall through */ 1353 default: 1354 netif_err(channel->efx, hw, channel->efx->net_dev, 1355 "channel %d unknown event type %d (data " 1356 EFX_QWORD_FMT ")\n", channel->channel, 1357 ev_code, EFX_QWORD_VAL(event)); 1358 } 1359 } 1360 1361 out: 1362 channel->eventq_read_ptr = read_ptr; 1363 return spent; 1364 } 1365 1366 /* Check whether an event is present in the eventq at the current 1367 * read pointer. Only useful for self-test. 1368 */ 1369 bool efx_nic_event_present(struct efx_channel *channel) 1370 { 1371 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1372 } 1373 1374 /* Allocate buffer table entries for event queue */ 1375 int efx_nic_probe_eventq(struct efx_channel *channel) 1376 { 1377 struct efx_nic *efx = channel->efx; 1378 unsigned entries; 1379 1380 entries = channel->eventq_mask + 1; 1381 return efx_alloc_special_buffer(efx, &channel->eventq, 1382 entries * sizeof(efx_qword_t)); 1383 } 1384 1385 void efx_nic_init_eventq(struct efx_channel *channel) 1386 { 1387 efx_oword_t reg; 1388 struct efx_nic *efx = channel->efx; 1389 1390 netif_dbg(efx, hw, efx->net_dev, 1391 "channel %d event queue in special buffers %d-%d\n", 1392 channel->channel, channel->eventq.index, 1393 channel->eventq.index + channel->eventq.entries - 1); 1394 1395 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1396 EFX_POPULATE_OWORD_3(reg, 1397 FRF_CZ_TIMER_Q_EN, 1, 1398 FRF_CZ_HOST_NOTIFY_MODE, 0, 1399 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1400 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1401 } 1402 1403 /* Pin event queue buffer */ 1404 efx_init_special_buffer(efx, &channel->eventq); 1405 1406 /* Fill event queue with all ones (i.e. empty events) */ 1407 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1408 1409 /* Push event queue to card */ 1410 EFX_POPULATE_OWORD_3(reg, 1411 FRF_AZ_EVQ_EN, 1, 1412 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1413 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1414 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1415 channel->channel); 1416 1417 efx->type->push_irq_moderation(channel); 1418 } 1419 1420 void efx_nic_fini_eventq(struct efx_channel *channel) 1421 { 1422 efx_oword_t reg; 1423 struct efx_nic *efx = channel->efx; 1424 1425 /* Remove event queue from card */ 1426 EFX_ZERO_OWORD(reg); 1427 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1428 channel->channel); 1429 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1430 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1431 1432 /* Unpin event queue */ 1433 efx_fini_special_buffer(efx, &channel->eventq); 1434 } 1435 1436 /* Free buffers backing event queue */ 1437 void efx_nic_remove_eventq(struct efx_channel *channel) 1438 { 1439 efx_free_special_buffer(channel->efx, &channel->eventq); 1440 } 1441 1442 1443 void efx_nic_event_test_start(struct efx_channel *channel) 1444 { 1445 channel->event_test_cpu = -1; 1446 smp_wmb(); 1447 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1448 } 1449 1450 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1451 { 1452 efx_magic_event(efx_rx_queue_channel(rx_queue), 1453 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1454 } 1455 1456 /************************************************************************** 1457 * 1458 * Hardware interrupts 1459 * The hardware interrupt handler does very little work; all the event 1460 * queue processing is carried out by per-channel tasklets. 1461 * 1462 **************************************************************************/ 1463 1464 /* Enable/disable/generate interrupts */ 1465 static inline void efx_nic_interrupts(struct efx_nic *efx, 1466 bool enabled, bool force) 1467 { 1468 efx_oword_t int_en_reg_ker; 1469 1470 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1471 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1472 FRF_AZ_KER_INT_KER, force, 1473 FRF_AZ_DRV_INT_EN_KER, enabled); 1474 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1475 } 1476 1477 void efx_nic_enable_interrupts(struct efx_nic *efx) 1478 { 1479 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1480 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1481 1482 efx_nic_interrupts(efx, true, false); 1483 } 1484 1485 void efx_nic_disable_interrupts(struct efx_nic *efx) 1486 { 1487 /* Disable interrupts */ 1488 efx_nic_interrupts(efx, false, false); 1489 } 1490 1491 /* Generate a test interrupt 1492 * Interrupt must already have been enabled, otherwise nasty things 1493 * may happen. 1494 */ 1495 void efx_nic_irq_test_start(struct efx_nic *efx) 1496 { 1497 efx->last_irq_cpu = -1; 1498 smp_wmb(); 1499 efx_nic_interrupts(efx, true, true); 1500 } 1501 1502 /* Process a fatal interrupt 1503 * Disable bus mastering ASAP and schedule a reset 1504 */ 1505 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1506 { 1507 struct falcon_nic_data *nic_data = efx->nic_data; 1508 efx_oword_t *int_ker = efx->irq_status.addr; 1509 efx_oword_t fatal_intr; 1510 int error, mem_perr; 1511 1512 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1513 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1514 1515 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1516 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1517 EFX_OWORD_VAL(fatal_intr), 1518 error ? "disabling bus mastering" : "no recognised error"); 1519 1520 /* If this is a memory parity error dump which blocks are offending */ 1521 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1522 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1523 if (mem_perr) { 1524 efx_oword_t reg; 1525 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1526 netif_err(efx, hw, efx->net_dev, 1527 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1528 EFX_OWORD_VAL(reg)); 1529 } 1530 1531 /* Disable both devices */ 1532 pci_clear_master(efx->pci_dev); 1533 if (efx_nic_is_dual_func(efx)) 1534 pci_clear_master(nic_data->pci_dev2); 1535 efx_nic_disable_interrupts(efx); 1536 1537 /* Count errors and reset or disable the NIC accordingly */ 1538 if (efx->int_error_count == 0 || 1539 time_after(jiffies, efx->int_error_expire)) { 1540 efx->int_error_count = 0; 1541 efx->int_error_expire = 1542 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1543 } 1544 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1545 netif_err(efx, hw, efx->net_dev, 1546 "SYSTEM ERROR - reset scheduled\n"); 1547 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1548 } else { 1549 netif_err(efx, hw, efx->net_dev, 1550 "SYSTEM ERROR - max number of errors seen." 1551 "NIC will be disabled\n"); 1552 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1553 } 1554 1555 return IRQ_HANDLED; 1556 } 1557 1558 /* Handle a legacy interrupt 1559 * Acknowledges the interrupt and schedule event queue processing. 1560 */ 1561 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1562 { 1563 struct efx_nic *efx = dev_id; 1564 efx_oword_t *int_ker = efx->irq_status.addr; 1565 irqreturn_t result = IRQ_NONE; 1566 struct efx_channel *channel; 1567 efx_dword_t reg; 1568 u32 queues; 1569 int syserr; 1570 1571 /* Could this be ours? If interrupts are disabled then the 1572 * channel state may not be valid. 1573 */ 1574 if (!efx->legacy_irq_enabled) 1575 return result; 1576 1577 /* Read the ISR which also ACKs the interrupts */ 1578 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1579 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1580 1581 /* Handle non-event-queue sources */ 1582 if (queues & (1U << efx->irq_level)) { 1583 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1584 if (unlikely(syserr)) 1585 return efx_nic_fatal_interrupt(efx); 1586 efx->last_irq_cpu = raw_smp_processor_id(); 1587 } 1588 1589 if (queues != 0) { 1590 if (EFX_WORKAROUND_15783(efx)) 1591 efx->irq_zero_count = 0; 1592 1593 /* Schedule processing of any interrupting queues */ 1594 efx_for_each_channel(channel, efx) { 1595 if (queues & 1) 1596 efx_schedule_channel_irq(channel); 1597 queues >>= 1; 1598 } 1599 result = IRQ_HANDLED; 1600 1601 } else if (EFX_WORKAROUND_15783(efx)) { 1602 efx_qword_t *event; 1603 1604 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1605 * because this might be a shared interrupt. */ 1606 if (efx->irq_zero_count++ == 0) 1607 result = IRQ_HANDLED; 1608 1609 /* Ensure we schedule or rearm all event queues */ 1610 efx_for_each_channel(channel, efx) { 1611 event = efx_event(channel, channel->eventq_read_ptr); 1612 if (efx_event_present(event)) 1613 efx_schedule_channel_irq(channel); 1614 else 1615 efx_nic_eventq_read_ack(channel); 1616 } 1617 } 1618 1619 if (result == IRQ_HANDLED) 1620 netif_vdbg(efx, intr, efx->net_dev, 1621 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1622 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1623 1624 return result; 1625 } 1626 1627 /* Handle an MSI interrupt 1628 * 1629 * Handle an MSI hardware interrupt. This routine schedules event 1630 * queue processing. No interrupt acknowledgement cycle is necessary. 1631 * Also, we never need to check that the interrupt is for us, since 1632 * MSI interrupts cannot be shared. 1633 */ 1634 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1635 { 1636 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1637 struct efx_nic *efx = channel->efx; 1638 efx_oword_t *int_ker = efx->irq_status.addr; 1639 int syserr; 1640 1641 netif_vdbg(efx, intr, efx->net_dev, 1642 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1643 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1644 1645 /* Handle non-event-queue sources */ 1646 if (channel->channel == efx->irq_level) { 1647 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1648 if (unlikely(syserr)) 1649 return efx_nic_fatal_interrupt(efx); 1650 efx->last_irq_cpu = raw_smp_processor_id(); 1651 } 1652 1653 /* Schedule processing of the channel */ 1654 efx_schedule_channel_irq(channel); 1655 1656 return IRQ_HANDLED; 1657 } 1658 1659 1660 /* Setup RSS indirection table. 1661 * This maps from the hash value of the packet to RXQ 1662 */ 1663 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1664 { 1665 size_t i = 0; 1666 efx_dword_t dword; 1667 1668 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1669 return; 1670 1671 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1672 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1673 1674 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1675 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1676 efx->rx_indir_table[i]); 1677 efx_writed(efx, &dword, 1678 FR_BZ_RX_INDIRECTION_TBL + 1679 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1680 } 1681 } 1682 1683 /* Hook interrupt handler(s) 1684 * Try MSI and then legacy interrupts. 1685 */ 1686 int efx_nic_init_interrupt(struct efx_nic *efx) 1687 { 1688 struct efx_channel *channel; 1689 int rc; 1690 1691 if (!EFX_INT_MODE_USE_MSI(efx)) { 1692 irq_handler_t handler; 1693 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1694 handler = efx_legacy_interrupt; 1695 else 1696 handler = falcon_legacy_interrupt_a1; 1697 1698 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1699 efx->name, efx); 1700 if (rc) { 1701 netif_err(efx, drv, efx->net_dev, 1702 "failed to hook legacy IRQ %d\n", 1703 efx->pci_dev->irq); 1704 goto fail1; 1705 } 1706 return 0; 1707 } 1708 1709 /* Hook MSI or MSI-X interrupt */ 1710 efx_for_each_channel(channel, efx) { 1711 rc = request_irq(channel->irq, efx_msi_interrupt, 1712 IRQF_PROBE_SHARED, /* Not shared */ 1713 efx->channel_name[channel->channel], 1714 &efx->channel[channel->channel]); 1715 if (rc) { 1716 netif_err(efx, drv, efx->net_dev, 1717 "failed to hook IRQ %d\n", channel->irq); 1718 goto fail2; 1719 } 1720 } 1721 1722 return 0; 1723 1724 fail2: 1725 efx_for_each_channel(channel, efx) 1726 free_irq(channel->irq, &efx->channel[channel->channel]); 1727 fail1: 1728 return rc; 1729 } 1730 1731 void efx_nic_fini_interrupt(struct efx_nic *efx) 1732 { 1733 struct efx_channel *channel; 1734 efx_oword_t reg; 1735 1736 /* Disable MSI/MSI-X interrupts */ 1737 efx_for_each_channel(channel, efx) { 1738 if (channel->irq) 1739 free_irq(channel->irq, &efx->channel[channel->channel]); 1740 } 1741 1742 /* ACK legacy interrupt */ 1743 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1744 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1745 else 1746 falcon_irq_ack_a1(efx); 1747 1748 /* Disable legacy interrupt */ 1749 if (efx->legacy_irq) 1750 free_irq(efx->legacy_irq, efx); 1751 } 1752 1753 /* Looks at available SRAM resources and works out how many queues we 1754 * can support, and where things like descriptor caches should live. 1755 * 1756 * SRAM is split up as follows: 1757 * 0 buftbl entries for channels 1758 * efx->vf_buftbl_base buftbl entries for SR-IOV 1759 * efx->rx_dc_base RX descriptor caches 1760 * efx->tx_dc_base TX descriptor caches 1761 */ 1762 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1763 { 1764 unsigned vi_count, buftbl_min; 1765 1766 /* Account for the buffer table entries backing the datapath channels 1767 * and the descriptor caches for those channels. 1768 */ 1769 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1770 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1771 efx->n_channels * EFX_MAX_EVQ_SIZE) 1772 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1773 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1774 1775 #ifdef CONFIG_SFC_SRIOV 1776 if (efx_sriov_wanted(efx)) { 1777 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1778 1779 efx->vf_buftbl_base = buftbl_min; 1780 1781 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1782 vi_count = max(vi_count, EFX_VI_BASE); 1783 buftbl_free = (sram_lim_qw - buftbl_min - 1784 vi_count * vi_dc_entries); 1785 1786 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1787 efx_vf_size(efx)); 1788 vf_limit = min(buftbl_free / entries_per_vf, 1789 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1790 1791 if (efx->vf_count > vf_limit) { 1792 netif_err(efx, probe, efx->net_dev, 1793 "Reducing VF count from from %d to %d\n", 1794 efx->vf_count, vf_limit); 1795 efx->vf_count = vf_limit; 1796 } 1797 vi_count += efx->vf_count * efx_vf_size(efx); 1798 } 1799 #endif 1800 1801 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1802 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1803 } 1804 1805 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1806 { 1807 efx_oword_t altera_build; 1808 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1809 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1810 } 1811 1812 void efx_nic_init_common(struct efx_nic *efx) 1813 { 1814 efx_oword_t temp; 1815 1816 /* Set positions of descriptor caches in SRAM. */ 1817 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1818 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1819 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1820 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1821 1822 /* Set TX descriptor cache size. */ 1823 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1824 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1825 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1826 1827 /* Set RX descriptor cache size. Set low watermark to size-8, as 1828 * this allows most efficient prefetching. 1829 */ 1830 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1831 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1832 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1833 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1834 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1835 1836 /* Program INT_KER address */ 1837 EFX_POPULATE_OWORD_2(temp, 1838 FRF_AZ_NORM_INT_VEC_DIS_KER, 1839 EFX_INT_MODE_USE_MSI(efx), 1840 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1841 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1842 1843 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1844 /* Use an interrupt level unused by event queues */ 1845 efx->irq_level = 0x1f; 1846 else 1847 /* Use a valid MSI-X vector */ 1848 efx->irq_level = 0; 1849 1850 /* Enable all the genuinely fatal interrupts. (They are still 1851 * masked by the overall interrupt mask, controlled by 1852 * falcon_interrupts()). 1853 * 1854 * Note: All other fatal interrupts are enabled 1855 */ 1856 EFX_POPULATE_OWORD_3(temp, 1857 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1858 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1859 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1860 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1861 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1862 EFX_INVERT_OWORD(temp); 1863 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1864 1865 efx_nic_push_rx_indir_table(efx); 1866 1867 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1868 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1869 */ 1870 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1871 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1872 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1873 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1874 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1875 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1876 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1877 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1878 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1879 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1880 /* Disable hardware watchdog which can misfire */ 1881 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1882 /* Squash TX of packets of 16 bytes or less */ 1883 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1884 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1885 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1886 1887 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1888 EFX_POPULATE_OWORD_4(temp, 1889 /* Default values */ 1890 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1891 FRF_BZ_TX_PACE_SB_AF, 0xb, 1892 FRF_BZ_TX_PACE_FB_BASE, 0, 1893 /* Allow large pace values in the 1894 * fast bin. */ 1895 FRF_BZ_TX_PACE_BIN_TH, 1896 FFE_BZ_TX_PACE_RESERVED); 1897 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1898 } 1899 } 1900 1901 /* Register dump */ 1902 1903 #define REGISTER_REVISION_A 1 1904 #define REGISTER_REVISION_B 2 1905 #define REGISTER_REVISION_C 3 1906 #define REGISTER_REVISION_Z 3 /* latest revision */ 1907 1908 struct efx_nic_reg { 1909 u32 offset:24; 1910 u32 min_revision:2, max_revision:2; 1911 }; 1912 1913 #define REGISTER(name, min_rev, max_rev) { \ 1914 FR_ ## min_rev ## max_rev ## _ ## name, \ 1915 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1916 } 1917 #define REGISTER_AA(name) REGISTER(name, A, A) 1918 #define REGISTER_AB(name) REGISTER(name, A, B) 1919 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1920 #define REGISTER_BB(name) REGISTER(name, B, B) 1921 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1922 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1923 1924 static const struct efx_nic_reg efx_nic_regs[] = { 1925 REGISTER_AZ(ADR_REGION), 1926 REGISTER_AZ(INT_EN_KER), 1927 REGISTER_BZ(INT_EN_CHAR), 1928 REGISTER_AZ(INT_ADR_KER), 1929 REGISTER_BZ(INT_ADR_CHAR), 1930 /* INT_ACK_KER is WO */ 1931 /* INT_ISR0 is RC */ 1932 REGISTER_AZ(HW_INIT), 1933 REGISTER_CZ(USR_EV_CFG), 1934 REGISTER_AB(EE_SPI_HCMD), 1935 REGISTER_AB(EE_SPI_HADR), 1936 REGISTER_AB(EE_SPI_HDATA), 1937 REGISTER_AB(EE_BASE_PAGE), 1938 REGISTER_AB(EE_VPD_CFG0), 1939 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1940 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1941 /* PCIE_CORE_INDIRECT is indirect */ 1942 REGISTER_AB(NIC_STAT), 1943 REGISTER_AB(GPIO_CTL), 1944 REGISTER_AB(GLB_CTL), 1945 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1946 REGISTER_BZ(DP_CTRL), 1947 REGISTER_AZ(MEM_STAT), 1948 REGISTER_AZ(CS_DEBUG), 1949 REGISTER_AZ(ALTERA_BUILD), 1950 REGISTER_AZ(CSR_SPARE), 1951 REGISTER_AB(PCIE_SD_CTL0123), 1952 REGISTER_AB(PCIE_SD_CTL45), 1953 REGISTER_AB(PCIE_PCS_CTL_STAT), 1954 /* DEBUG_DATA_OUT is not used */ 1955 /* DRV_EV is WO */ 1956 REGISTER_AZ(EVQ_CTL), 1957 REGISTER_AZ(EVQ_CNT1), 1958 REGISTER_AZ(EVQ_CNT2), 1959 REGISTER_AZ(BUF_TBL_CFG), 1960 REGISTER_AZ(SRM_RX_DC_CFG), 1961 REGISTER_AZ(SRM_TX_DC_CFG), 1962 REGISTER_AZ(SRM_CFG), 1963 /* BUF_TBL_UPD is WO */ 1964 REGISTER_AZ(SRM_UPD_EVQ), 1965 REGISTER_AZ(SRAM_PARITY), 1966 REGISTER_AZ(RX_CFG), 1967 REGISTER_BZ(RX_FILTER_CTL), 1968 /* RX_FLUSH_DESCQ is WO */ 1969 REGISTER_AZ(RX_DC_CFG), 1970 REGISTER_AZ(RX_DC_PF_WM), 1971 REGISTER_BZ(RX_RSS_TKEY), 1972 /* RX_NODESC_DROP is RC */ 1973 REGISTER_AA(RX_SELF_RST), 1974 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1975 REGISTER_CZ(RX_RSS_IPV6_REG1), 1976 REGISTER_CZ(RX_RSS_IPV6_REG2), 1977 REGISTER_CZ(RX_RSS_IPV6_REG3), 1978 /* TX_FLUSH_DESCQ is WO */ 1979 REGISTER_AZ(TX_DC_CFG), 1980 REGISTER_AA(TX_CHKSM_CFG), 1981 REGISTER_AZ(TX_CFG), 1982 /* TX_PUSH_DROP is not used */ 1983 REGISTER_AZ(TX_RESERVED), 1984 REGISTER_BZ(TX_PACE), 1985 /* TX_PACE_DROP_QID is RC */ 1986 REGISTER_BB(TX_VLAN), 1987 REGISTER_BZ(TX_IPFIL_PORTEN), 1988 REGISTER_AB(MD_TXD), 1989 REGISTER_AB(MD_RXD), 1990 REGISTER_AB(MD_CS), 1991 REGISTER_AB(MD_PHY_ADR), 1992 REGISTER_AB(MD_ID), 1993 /* MD_STAT is RC */ 1994 REGISTER_AB(MAC_STAT_DMA), 1995 REGISTER_AB(MAC_CTRL), 1996 REGISTER_BB(GEN_MODE), 1997 REGISTER_AB(MAC_MC_HASH_REG0), 1998 REGISTER_AB(MAC_MC_HASH_REG1), 1999 REGISTER_AB(GM_CFG1), 2000 REGISTER_AB(GM_CFG2), 2001 /* GM_IPG and GM_HD are not used */ 2002 REGISTER_AB(GM_MAX_FLEN), 2003 /* GM_TEST is not used */ 2004 REGISTER_AB(GM_ADR1), 2005 REGISTER_AB(GM_ADR2), 2006 REGISTER_AB(GMF_CFG0), 2007 REGISTER_AB(GMF_CFG1), 2008 REGISTER_AB(GMF_CFG2), 2009 REGISTER_AB(GMF_CFG3), 2010 REGISTER_AB(GMF_CFG4), 2011 REGISTER_AB(GMF_CFG5), 2012 REGISTER_BB(TX_SRC_MAC_CTL), 2013 REGISTER_AB(XM_ADR_LO), 2014 REGISTER_AB(XM_ADR_HI), 2015 REGISTER_AB(XM_GLB_CFG), 2016 REGISTER_AB(XM_TX_CFG), 2017 REGISTER_AB(XM_RX_CFG), 2018 REGISTER_AB(XM_MGT_INT_MASK), 2019 REGISTER_AB(XM_FC), 2020 REGISTER_AB(XM_PAUSE_TIME), 2021 REGISTER_AB(XM_TX_PARAM), 2022 REGISTER_AB(XM_RX_PARAM), 2023 /* XM_MGT_INT_MSK (note no 'A') is RC */ 2024 REGISTER_AB(XX_PWR_RST), 2025 REGISTER_AB(XX_SD_CTL), 2026 REGISTER_AB(XX_TXDRV_CTL), 2027 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 2028 /* XX_CORE_STAT is partly RC */ 2029 }; 2030 2031 struct efx_nic_reg_table { 2032 u32 offset:24; 2033 u32 min_revision:2, max_revision:2; 2034 u32 step:6, rows:21; 2035 }; 2036 2037 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 2038 offset, \ 2039 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 2040 step, rows \ 2041 } 2042 #define REGISTER_TABLE(name, min_rev, max_rev) \ 2043 REGISTER_TABLE_DIMENSIONS( \ 2044 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 2045 min_rev, max_rev, \ 2046 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 2047 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 2048 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 2049 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 2050 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 2051 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 2052 #define REGISTER_TABLE_BB_CZ(name) \ 2053 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 2054 FR_BZ_ ## name ## _STEP, \ 2055 FR_BB_ ## name ## _ROWS), \ 2056 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 2057 FR_BZ_ ## name ## _STEP, \ 2058 FR_CZ_ ## name ## _ROWS) 2059 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 2060 2061 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 2062 /* DRIVER is not used */ 2063 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 2064 REGISTER_TABLE_BB(TX_IPFIL_TBL), 2065 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 2066 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 2067 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 2068 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 2069 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 2070 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 2071 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 2072 /* We can't reasonably read all of the buffer table (up to 8MB!). 2073 * However this driver will only use a few entries. Reading 2074 * 1K entries allows for some expansion of queue count and 2075 * size before we need to change the version. */ 2076 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 2077 A, A, 8, 1024), 2078 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 2079 B, Z, 8, 1024), 2080 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 2081 REGISTER_TABLE_BB_CZ(TIMER_TBL), 2082 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 2083 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 2084 /* TX_FILTER_TBL0 is huge and not used by this driver */ 2085 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 2086 REGISTER_TABLE_CZ(MC_TREG_SMEM), 2087 /* MSIX_PBA_TABLE is not mapped */ 2088 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 2089 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 2090 }; 2091 2092 size_t efx_nic_get_regs_len(struct efx_nic *efx) 2093 { 2094 const struct efx_nic_reg *reg; 2095 const struct efx_nic_reg_table *table; 2096 size_t len = 0; 2097 2098 for (reg = efx_nic_regs; 2099 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2100 reg++) 2101 if (efx->type->revision >= reg->min_revision && 2102 efx->type->revision <= reg->max_revision) 2103 len += sizeof(efx_oword_t); 2104 2105 for (table = efx_nic_reg_tables; 2106 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2107 table++) 2108 if (efx->type->revision >= table->min_revision && 2109 efx->type->revision <= table->max_revision) 2110 len += table->rows * min_t(size_t, table->step, 16); 2111 2112 return len; 2113 } 2114 2115 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2116 { 2117 const struct efx_nic_reg *reg; 2118 const struct efx_nic_reg_table *table; 2119 2120 for (reg = efx_nic_regs; 2121 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2122 reg++) { 2123 if (efx->type->revision >= reg->min_revision && 2124 efx->type->revision <= reg->max_revision) { 2125 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2126 buf += sizeof(efx_oword_t); 2127 } 2128 } 2129 2130 for (table = efx_nic_reg_tables; 2131 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2132 table++) { 2133 size_t size, i; 2134 2135 if (!(efx->type->revision >= table->min_revision && 2136 efx->type->revision <= table->max_revision)) 2137 continue; 2138 2139 size = min_t(size_t, table->step, 16); 2140 2141 for (i = 0; i < table->rows; i++) { 2142 switch (table->step) { 2143 case 4: /* 32-bit SRAM */ 2144 efx_readd(efx, buf, table->offset + 4 * i); 2145 break; 2146 case 8: /* 64-bit SRAM */ 2147 efx_sram_readq(efx, 2148 efx->membase + table->offset, 2149 buf, i); 2150 break; 2151 case 16: /* 128-bit-readable register */ 2152 efx_reado_table(efx, buf, table->offset, i); 2153 break; 2154 case 32: /* 128-bit register, interleaved */ 2155 efx_reado_table(efx, buf, table->offset, 2 * i); 2156 break; 2157 default: 2158 WARN_ON(1); 2159 return; 2160 } 2161 buf += size; 2162 } 2163 } 2164 } 2165