1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include <linux/cpu_rmap.h> 18 #include "net_driver.h" 19 #include "bitfield.h" 20 #include "efx.h" 21 #include "nic.h" 22 #include "farch_regs.h" 23 #include "io.h" 24 #include "workarounds.h" 25 26 /************************************************************************** 27 * 28 * Configurable values 29 * 30 ************************************************************************** 31 */ 32 33 /* This is set to 16 for a good reason. In summary, if larger than 34 * 16, the descriptor cache holds more than a default socket 35 * buffer's worth of packets (for UDP we can only have at most one 36 * socket buffer's worth outstanding). This combined with the fact 37 * that we only get 1 TX event per descriptor cache means the NIC 38 * goes idle. 39 */ 40 #define TX_DC_ENTRIES 16 41 #define TX_DC_ENTRIES_ORDER 1 42 43 #define RX_DC_ENTRIES 64 44 #define RX_DC_ENTRIES_ORDER 3 45 46 /* If EFX_MAX_INT_ERRORS internal errors occur within 47 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 48 * disable it. 49 */ 50 #define EFX_INT_ERROR_EXPIRE 3600 51 #define EFX_MAX_INT_ERRORS 5 52 53 /* Depth of RX flush request fifo */ 54 #define EFX_RX_FLUSH_COUNT 4 55 56 /* Driver generated events */ 57 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 58 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 59 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 60 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 61 62 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 63 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 64 65 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 66 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 67 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 69 efx_rx_queue_index(_rx_queue)) 70 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 71 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 72 efx_rx_queue_index(_rx_queue)) 73 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 74 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 75 (_tx_queue)->queue) 76 77 static void efx_magic_event(struct efx_channel *channel, u32 magic); 78 79 /************************************************************************** 80 * 81 * Solarstorm hardware access 82 * 83 **************************************************************************/ 84 85 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 86 unsigned int index) 87 { 88 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 89 value, index); 90 } 91 92 /* Read the current event from the event queue */ 93 static inline efx_qword_t *efx_event(struct efx_channel *channel, 94 unsigned int index) 95 { 96 return ((efx_qword_t *) (channel->eventq.buf.addr)) + 97 (index & channel->eventq_mask); 98 } 99 100 /* See if an event is present 101 * 102 * We check both the high and low dword of the event for all ones. We 103 * wrote all ones when we cleared the event, and no valid event can 104 * have all ones in either its high or low dwords. This approach is 105 * robust against reordering. 106 * 107 * Note that using a single 64-bit comparison is incorrect; even 108 * though the CPU read will be atomic, the DMA write may not be. 109 */ 110 static inline int efx_event_present(efx_qword_t *event) 111 { 112 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 113 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 114 } 115 116 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 117 const efx_oword_t *mask) 118 { 119 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 120 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 121 } 122 123 int efx_nic_test_registers(struct efx_nic *efx, 124 const struct efx_nic_register_test *regs, 125 size_t n_regs) 126 { 127 unsigned address = 0, i, j; 128 efx_oword_t mask, imask, original, reg, buf; 129 130 for (i = 0; i < n_regs; ++i) { 131 address = regs[i].address; 132 mask = imask = regs[i].mask; 133 EFX_INVERT_OWORD(imask); 134 135 efx_reado(efx, &original, address); 136 137 /* bit sweep on and off */ 138 for (j = 0; j < 128; j++) { 139 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 140 continue; 141 142 /* Test this testable bit can be set in isolation */ 143 EFX_AND_OWORD(reg, original, mask); 144 EFX_SET_OWORD32(reg, j, j, 1); 145 146 efx_writeo(efx, ®, address); 147 efx_reado(efx, &buf, address); 148 149 if (efx_masked_compare_oword(®, &buf, &mask)) 150 goto fail; 151 152 /* Test this testable bit can be cleared in isolation */ 153 EFX_OR_OWORD(reg, original, mask); 154 EFX_SET_OWORD32(reg, j, j, 0); 155 156 efx_writeo(efx, ®, address); 157 efx_reado(efx, &buf, address); 158 159 if (efx_masked_compare_oword(®, &buf, &mask)) 160 goto fail; 161 } 162 163 efx_writeo(efx, &original, address); 164 } 165 166 return 0; 167 168 fail: 169 netif_err(efx, hw, efx->net_dev, 170 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 171 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 172 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 173 return -EIO; 174 } 175 176 /************************************************************************** 177 * 178 * Special buffer handling 179 * Special buffers are used for event queues and the TX and RX 180 * descriptor rings. 181 * 182 *************************************************************************/ 183 184 /* 185 * Initialise a special buffer 186 * 187 * This will define a buffer (previously allocated via 188 * efx_alloc_special_buffer()) in the buffer table, allowing 189 * it to be used for event queues, descriptor rings etc. 190 */ 191 static void 192 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 193 { 194 efx_qword_t buf_desc; 195 unsigned int index; 196 dma_addr_t dma_addr; 197 int i; 198 199 EFX_BUG_ON_PARANOID(!buffer->buf.addr); 200 201 /* Write buffer descriptors to NIC */ 202 for (i = 0; i < buffer->entries; i++) { 203 index = buffer->index + i; 204 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); 205 netif_dbg(efx, probe, efx->net_dev, 206 "mapping special buffer %d at %llx\n", 207 index, (unsigned long long)dma_addr); 208 EFX_POPULATE_QWORD_3(buf_desc, 209 FRF_AZ_BUF_ADR_REGION, 0, 210 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 211 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 212 efx_write_buf_tbl(efx, &buf_desc, index); 213 } 214 } 215 216 /* Unmaps a buffer and clears the buffer table entries */ 217 static void 218 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 219 { 220 efx_oword_t buf_tbl_upd; 221 unsigned int start = buffer->index; 222 unsigned int end = (buffer->index + buffer->entries - 1); 223 224 if (!buffer->entries) 225 return; 226 227 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 228 buffer->index, buffer->index + buffer->entries - 1); 229 230 EFX_POPULATE_OWORD_4(buf_tbl_upd, 231 FRF_AZ_BUF_UPD_CMD, 0, 232 FRF_AZ_BUF_CLR_CMD, 1, 233 FRF_AZ_BUF_CLR_END_ID, end, 234 FRF_AZ_BUF_CLR_START_ID, start); 235 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 236 } 237 238 /* 239 * Allocate a new special buffer 240 * 241 * This allocates memory for a new buffer, clears it and allocates a 242 * new buffer ID range. It does not write into the buffer table. 243 * 244 * This call will allocate 4KB buffers, since 8KB buffers can't be 245 * used for event queues and descriptor rings. 246 */ 247 static int efx_alloc_special_buffer(struct efx_nic *efx, 248 struct efx_special_buffer *buffer, 249 unsigned int len) 250 { 251 len = ALIGN(len, EFX_BUF_SIZE); 252 253 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) 254 return -ENOMEM; 255 buffer->entries = len / EFX_BUF_SIZE; 256 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); 257 258 /* Select new buffer ID */ 259 buffer->index = efx->next_buffer_table; 260 efx->next_buffer_table += buffer->entries; 261 #ifdef CONFIG_SFC_SRIOV 262 BUG_ON(efx_sriov_enabled(efx) && 263 efx->vf_buftbl_base < efx->next_buffer_table); 264 #endif 265 266 netif_dbg(efx, probe, efx->net_dev, 267 "allocating special buffers %d-%d at %llx+%x " 268 "(virt %p phys %llx)\n", buffer->index, 269 buffer->index + buffer->entries - 1, 270 (u64)buffer->buf.dma_addr, len, 271 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 272 273 return 0; 274 } 275 276 static void 277 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 278 { 279 if (!buffer->buf.addr) 280 return; 281 282 netif_dbg(efx, hw, efx->net_dev, 283 "deallocating special buffers %d-%d at %llx+%x " 284 "(virt %p phys %llx)\n", buffer->index, 285 buffer->index + buffer->entries - 1, 286 (u64)buffer->buf.dma_addr, buffer->buf.len, 287 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 288 289 efx_nic_free_buffer(efx, &buffer->buf); 290 buffer->entries = 0; 291 } 292 293 /************************************************************************** 294 * 295 * Generic buffer handling 296 * These buffers are used for interrupt status, MAC stats, etc. 297 * 298 **************************************************************************/ 299 300 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 301 unsigned int len, gfp_t gfp_flags) 302 { 303 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 304 &buffer->dma_addr, 305 gfp_flags | __GFP_ZERO); 306 if (!buffer->addr) 307 return -ENOMEM; 308 buffer->len = len; 309 return 0; 310 } 311 312 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 313 { 314 if (buffer->addr) { 315 dma_free_coherent(&efx->pci_dev->dev, buffer->len, 316 buffer->addr, buffer->dma_addr); 317 buffer->addr = NULL; 318 } 319 } 320 321 /************************************************************************** 322 * 323 * TX path 324 * 325 **************************************************************************/ 326 327 /* Returns a pointer to the specified transmit descriptor in the TX 328 * descriptor queue belonging to the specified channel. 329 */ 330 static inline efx_qword_t * 331 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 332 { 333 return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; 334 } 335 336 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 337 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 338 { 339 unsigned write_ptr; 340 efx_dword_t reg; 341 342 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 343 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 344 efx_writed_page(tx_queue->efx, ®, 345 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 346 } 347 348 /* Write pointer and first descriptor for TX descriptor ring */ 349 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 350 const efx_qword_t *txd) 351 { 352 unsigned write_ptr; 353 efx_oword_t reg; 354 355 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 356 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 357 358 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 359 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 360 FRF_AZ_TX_DESC_WPTR, write_ptr); 361 reg.qword[0] = *txd; 362 efx_writeo_page(tx_queue->efx, ®, 363 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 364 } 365 366 static inline bool 367 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 368 { 369 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 370 371 if (empty_read_count == 0) 372 return false; 373 374 tx_queue->empty_read_count = 0; 375 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 376 && tx_queue->write_count - write_count == 1; 377 } 378 379 /* For each entry inserted into the software descriptor ring, create a 380 * descriptor in the hardware TX descriptor ring (in host memory), and 381 * write a doorbell. 382 */ 383 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 384 { 385 386 struct efx_tx_buffer *buffer; 387 efx_qword_t *txd; 388 unsigned write_ptr; 389 unsigned old_write_count = tx_queue->write_count; 390 391 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 392 393 do { 394 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 395 buffer = &tx_queue->buffer[write_ptr]; 396 txd = efx_tx_desc(tx_queue, write_ptr); 397 ++tx_queue->write_count; 398 399 /* Create TX descriptor ring entry */ 400 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 401 EFX_POPULATE_QWORD_4(*txd, 402 FSF_AZ_TX_KER_CONT, 403 buffer->flags & EFX_TX_BUF_CONT, 404 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 405 FSF_AZ_TX_KER_BUF_REGION, 0, 406 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 407 } while (tx_queue->write_count != tx_queue->insert_count); 408 409 wmb(); /* Ensure descriptors are written before they are fetched */ 410 411 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 412 txd = efx_tx_desc(tx_queue, 413 old_write_count & tx_queue->ptr_mask); 414 efx_push_tx_desc(tx_queue, txd); 415 ++tx_queue->pushes; 416 } else { 417 efx_notify_tx_desc(tx_queue); 418 } 419 } 420 421 /* Allocate hardware resources for a TX queue */ 422 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 423 { 424 struct efx_nic *efx = tx_queue->efx; 425 unsigned entries; 426 427 entries = tx_queue->ptr_mask + 1; 428 return efx_alloc_special_buffer(efx, &tx_queue->txd, 429 entries * sizeof(efx_qword_t)); 430 } 431 432 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 433 { 434 struct efx_nic *efx = tx_queue->efx; 435 efx_oword_t reg; 436 437 /* Pin TX descriptor ring */ 438 efx_init_special_buffer(efx, &tx_queue->txd); 439 440 /* Push TX descriptor ring to card */ 441 EFX_POPULATE_OWORD_10(reg, 442 FRF_AZ_TX_DESCQ_EN, 1, 443 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 444 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 445 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 446 FRF_AZ_TX_DESCQ_EVQ_ID, 447 tx_queue->channel->channel, 448 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 449 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 450 FRF_AZ_TX_DESCQ_SIZE, 451 __ffs(tx_queue->txd.entries), 452 FRF_AZ_TX_DESCQ_TYPE, 0, 453 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 454 455 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 456 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 457 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 458 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 459 !csum); 460 } 461 462 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 463 tx_queue->queue); 464 465 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 466 /* Only 128 bits in this register */ 467 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 468 469 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 470 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 471 __clear_bit_le(tx_queue->queue, ®); 472 else 473 __set_bit_le(tx_queue->queue, ®); 474 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 475 } 476 477 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 478 EFX_POPULATE_OWORD_1(reg, 479 FRF_BZ_TX_PACE, 480 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 481 FFE_BZ_TX_PACE_OFF : 482 FFE_BZ_TX_PACE_RESERVED); 483 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 484 tx_queue->queue); 485 } 486 } 487 488 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 489 { 490 struct efx_nic *efx = tx_queue->efx; 491 efx_oword_t tx_flush_descq; 492 493 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 494 atomic_set(&tx_queue->flush_outstanding, 1); 495 496 EFX_POPULATE_OWORD_2(tx_flush_descq, 497 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 498 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 499 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 500 } 501 502 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 503 { 504 struct efx_nic *efx = tx_queue->efx; 505 efx_oword_t tx_desc_ptr; 506 507 /* Remove TX descriptor ring from card */ 508 EFX_ZERO_OWORD(tx_desc_ptr); 509 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 510 tx_queue->queue); 511 512 /* Unpin TX descriptor ring */ 513 efx_fini_special_buffer(efx, &tx_queue->txd); 514 } 515 516 /* Free buffers backing TX queue */ 517 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 518 { 519 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 520 } 521 522 /************************************************************************** 523 * 524 * RX path 525 * 526 **************************************************************************/ 527 528 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 529 static inline efx_qword_t * 530 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 531 { 532 return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; 533 } 534 535 /* This creates an entry in the RX descriptor queue */ 536 static inline void 537 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 538 { 539 struct efx_rx_buffer *rx_buf; 540 efx_qword_t *rxd; 541 542 rxd = efx_rx_desc(rx_queue, index); 543 rx_buf = efx_rx_buffer(rx_queue, index); 544 EFX_POPULATE_QWORD_3(*rxd, 545 FSF_AZ_RX_KER_BUF_SIZE, 546 rx_buf->len - 547 rx_queue->efx->type->rx_buffer_padding, 548 FSF_AZ_RX_KER_BUF_REGION, 0, 549 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 550 } 551 552 /* This writes to the RX_DESC_WPTR register for the specified receive 553 * descriptor ring. 554 */ 555 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 556 { 557 struct efx_nic *efx = rx_queue->efx; 558 efx_dword_t reg; 559 unsigned write_ptr; 560 561 while (rx_queue->notified_count != rx_queue->added_count) { 562 efx_build_rx_desc( 563 rx_queue, 564 rx_queue->notified_count & rx_queue->ptr_mask); 565 ++rx_queue->notified_count; 566 } 567 568 wmb(); 569 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 570 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 571 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 572 efx_rx_queue_index(rx_queue)); 573 } 574 575 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 576 { 577 struct efx_nic *efx = rx_queue->efx; 578 unsigned entries; 579 580 entries = rx_queue->ptr_mask + 1; 581 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 582 entries * sizeof(efx_qword_t)); 583 } 584 585 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 586 { 587 efx_oword_t rx_desc_ptr; 588 struct efx_nic *efx = rx_queue->efx; 589 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 590 bool iscsi_digest_en = is_b0; 591 bool jumbo_en; 592 593 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables 594 * DMA to continue after a PCIe page boundary (and scattering 595 * is not possible). In Falcon B0 and Siena, it enables 596 * scatter. 597 */ 598 jumbo_en = !is_b0 || efx->rx_scatter; 599 600 netif_dbg(efx, hw, efx->net_dev, 601 "RX queue %d ring in special buffers %d-%d\n", 602 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 603 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 604 605 rx_queue->scatter_n = 0; 606 607 /* Pin RX descriptor ring */ 608 efx_init_special_buffer(efx, &rx_queue->rxd); 609 610 /* Push RX descriptor ring to card */ 611 EFX_POPULATE_OWORD_10(rx_desc_ptr, 612 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 613 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 614 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 615 FRF_AZ_RX_DESCQ_EVQ_ID, 616 efx_rx_queue_channel(rx_queue)->channel, 617 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 618 FRF_AZ_RX_DESCQ_LABEL, 619 efx_rx_queue_index(rx_queue), 620 FRF_AZ_RX_DESCQ_SIZE, 621 __ffs(rx_queue->rxd.entries), 622 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 623 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 624 FRF_AZ_RX_DESCQ_EN, 1); 625 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 626 efx_rx_queue_index(rx_queue)); 627 } 628 629 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 630 { 631 struct efx_nic *efx = rx_queue->efx; 632 efx_oword_t rx_flush_descq; 633 634 EFX_POPULATE_OWORD_2(rx_flush_descq, 635 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 636 FRF_AZ_RX_FLUSH_DESCQ, 637 efx_rx_queue_index(rx_queue)); 638 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 639 } 640 641 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 642 { 643 efx_oword_t rx_desc_ptr; 644 struct efx_nic *efx = rx_queue->efx; 645 646 /* Remove RX descriptor ring from card */ 647 EFX_ZERO_OWORD(rx_desc_ptr); 648 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 649 efx_rx_queue_index(rx_queue)); 650 651 /* Unpin RX descriptor ring */ 652 efx_fini_special_buffer(efx, &rx_queue->rxd); 653 } 654 655 /* Free buffers backing RX queue */ 656 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 657 { 658 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 659 } 660 661 /************************************************************************** 662 * 663 * Flush handling 664 * 665 **************************************************************************/ 666 667 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 668 * or more RX flushes can be kicked off. 669 */ 670 static bool efx_flush_wake(struct efx_nic *efx) 671 { 672 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 673 smp_mb(); 674 675 return (atomic_read(&efx->drain_pending) == 0 || 676 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 677 && atomic_read(&efx->rxq_flush_pending) > 0)); 678 } 679 680 static bool efx_check_tx_flush_complete(struct efx_nic *efx) 681 { 682 bool i = true; 683 efx_oword_t txd_ptr_tbl; 684 struct efx_channel *channel; 685 struct efx_tx_queue *tx_queue; 686 687 efx_for_each_channel(channel, efx) { 688 efx_for_each_channel_tx_queue(tx_queue, channel) { 689 efx_reado_table(efx, &txd_ptr_tbl, 690 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 691 if (EFX_OWORD_FIELD(txd_ptr_tbl, 692 FRF_AZ_TX_DESCQ_FLUSH) || 693 EFX_OWORD_FIELD(txd_ptr_tbl, 694 FRF_AZ_TX_DESCQ_EN)) { 695 netif_dbg(efx, hw, efx->net_dev, 696 "flush did not complete on TXQ %d\n", 697 tx_queue->queue); 698 i = false; 699 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 700 1, 0)) { 701 /* The flush is complete, but we didn't 702 * receive a flush completion event 703 */ 704 netif_dbg(efx, hw, efx->net_dev, 705 "flush complete on TXQ %d, so drain " 706 "the queue\n", tx_queue->queue); 707 /* Don't need to increment drain_pending as it 708 * has already been incremented for the queues 709 * which did not drain 710 */ 711 efx_magic_event(channel, 712 EFX_CHANNEL_MAGIC_TX_DRAIN( 713 tx_queue)); 714 } 715 } 716 } 717 718 return i; 719 } 720 721 /* Flush all the transmit queues, and continue flushing receive queues until 722 * they're all flushed. Wait for the DRAIN events to be recieved so that there 723 * are no more RX and TX events left on any channel. */ 724 static int efx_farch_do_flush(struct efx_nic *efx) 725 { 726 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 727 struct efx_channel *channel; 728 struct efx_rx_queue *rx_queue; 729 struct efx_tx_queue *tx_queue; 730 int rc = 0; 731 732 efx_for_each_channel(channel, efx) { 733 efx_for_each_channel_tx_queue(tx_queue, channel) { 734 atomic_inc(&efx->drain_pending); 735 efx_flush_tx_queue(tx_queue); 736 } 737 efx_for_each_channel_rx_queue(rx_queue, channel) { 738 atomic_inc(&efx->drain_pending); 739 rx_queue->flush_pending = true; 740 atomic_inc(&efx->rxq_flush_pending); 741 } 742 } 743 744 while (timeout && atomic_read(&efx->drain_pending) > 0) { 745 /* If SRIOV is enabled, then offload receive queue flushing to 746 * the firmware (though we will still have to poll for 747 * completion). If that fails, fall back to the old scheme. 748 */ 749 if (efx_sriov_enabled(efx)) { 750 rc = efx_mcdi_flush_rxqs(efx); 751 if (!rc) 752 goto wait; 753 } 754 755 /* The hardware supports four concurrent rx flushes, each of 756 * which may need to be retried if there is an outstanding 757 * descriptor fetch 758 */ 759 efx_for_each_channel(channel, efx) { 760 efx_for_each_channel_rx_queue(rx_queue, channel) { 761 if (atomic_read(&efx->rxq_flush_outstanding) >= 762 EFX_RX_FLUSH_COUNT) 763 break; 764 765 if (rx_queue->flush_pending) { 766 rx_queue->flush_pending = false; 767 atomic_dec(&efx->rxq_flush_pending); 768 atomic_inc(&efx->rxq_flush_outstanding); 769 efx_flush_rx_queue(rx_queue); 770 } 771 } 772 } 773 774 wait: 775 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 776 timeout); 777 } 778 779 if (atomic_read(&efx->drain_pending) && 780 !efx_check_tx_flush_complete(efx)) { 781 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 782 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 783 atomic_read(&efx->rxq_flush_outstanding), 784 atomic_read(&efx->rxq_flush_pending)); 785 rc = -ETIMEDOUT; 786 787 atomic_set(&efx->drain_pending, 0); 788 atomic_set(&efx->rxq_flush_pending, 0); 789 atomic_set(&efx->rxq_flush_outstanding, 0); 790 } 791 792 return rc; 793 } 794 795 int efx_farch_fini_dmaq(struct efx_nic *efx) 796 { 797 struct efx_channel *channel; 798 struct efx_tx_queue *tx_queue; 799 struct efx_rx_queue *rx_queue; 800 int rc = 0; 801 802 /* Do not attempt to write to the NIC during EEH recovery */ 803 if (efx->state != STATE_RECOVERY) { 804 /* Only perform flush if DMA is enabled */ 805 if (efx->pci_dev->is_busmaster) { 806 efx->type->prepare_flush(efx); 807 rc = efx_farch_do_flush(efx); 808 efx->type->finish_flush(efx); 809 } 810 811 efx_for_each_channel(channel, efx) { 812 efx_for_each_channel_rx_queue(rx_queue, channel) 813 efx_nic_fini_rx(rx_queue); 814 efx_for_each_channel_tx_queue(tx_queue, channel) 815 efx_nic_fini_tx(tx_queue); 816 } 817 } 818 819 return rc; 820 } 821 822 /************************************************************************** 823 * 824 * Event queue processing 825 * Event queues are processed by per-channel tasklets. 826 * 827 **************************************************************************/ 828 829 /* Update a channel's event queue's read pointer (RPTR) register 830 * 831 * This writes the EVQ_RPTR_REG register for the specified channel's 832 * event queue. 833 */ 834 void efx_nic_eventq_read_ack(struct efx_channel *channel) 835 { 836 efx_dword_t reg; 837 struct efx_nic *efx = channel->efx; 838 839 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 840 channel->eventq_read_ptr & channel->eventq_mask); 841 842 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 843 * of 4 bytes, but it is really 16 bytes just like later revisions. 844 */ 845 efx_writed(efx, ®, 846 efx->type->evq_rptr_tbl_base + 847 FR_BZ_EVQ_RPTR_STEP * channel->channel); 848 } 849 850 /* Use HW to insert a SW defined event */ 851 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 852 efx_qword_t *event) 853 { 854 efx_oword_t drv_ev_reg; 855 856 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 857 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 858 drv_ev_reg.u32[0] = event->u32[0]; 859 drv_ev_reg.u32[1] = event->u32[1]; 860 drv_ev_reg.u32[2] = 0; 861 drv_ev_reg.u32[3] = 0; 862 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 863 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 864 } 865 866 static void efx_magic_event(struct efx_channel *channel, u32 magic) 867 { 868 efx_qword_t event; 869 870 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 871 FSE_AZ_EV_CODE_DRV_GEN_EV, 872 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 873 efx_generate_event(channel->efx, channel->channel, &event); 874 } 875 876 /* Handle a transmit completion event 877 * 878 * The NIC batches TX completion events; the message we receive is of 879 * the form "complete all TX events up to this index". 880 */ 881 static int 882 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 883 { 884 unsigned int tx_ev_desc_ptr; 885 unsigned int tx_ev_q_label; 886 struct efx_tx_queue *tx_queue; 887 struct efx_nic *efx = channel->efx; 888 int tx_packets = 0; 889 890 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 891 return 0; 892 893 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 894 /* Transmit completion */ 895 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 896 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 897 tx_queue = efx_channel_get_tx_queue( 898 channel, tx_ev_q_label % EFX_TXQ_TYPES); 899 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 900 tx_queue->ptr_mask); 901 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 902 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 903 /* Rewrite the FIFO write pointer */ 904 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 905 tx_queue = efx_channel_get_tx_queue( 906 channel, tx_ev_q_label % EFX_TXQ_TYPES); 907 908 netif_tx_lock(efx->net_dev); 909 efx_notify_tx_desc(tx_queue); 910 netif_tx_unlock(efx->net_dev); 911 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 912 EFX_WORKAROUND_10727(efx)) { 913 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 914 } else { 915 netif_err(efx, tx_err, efx->net_dev, 916 "channel %d unexpected TX event " 917 EFX_QWORD_FMT"\n", channel->channel, 918 EFX_QWORD_VAL(*event)); 919 } 920 921 return tx_packets; 922 } 923 924 /* Detect errors included in the rx_evt_pkt_ok bit. */ 925 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 926 const efx_qword_t *event) 927 { 928 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 929 struct efx_nic *efx = rx_queue->efx; 930 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 931 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 932 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 933 bool rx_ev_other_err, rx_ev_pause_frm; 934 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 935 unsigned rx_ev_pkt_type; 936 937 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 938 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 939 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 940 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 941 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 942 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 943 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 944 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 945 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 946 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 947 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 948 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 949 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 950 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 951 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 952 953 /* Every error apart from tobe_disc and pause_frm */ 954 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 955 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 956 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 957 958 /* Count errors that are not in MAC stats. Ignore expected 959 * checksum errors during self-test. */ 960 if (rx_ev_frm_trunc) 961 ++channel->n_rx_frm_trunc; 962 else if (rx_ev_tobe_disc) 963 ++channel->n_rx_tobe_disc; 964 else if (!efx->loopback_selftest) { 965 if (rx_ev_ip_hdr_chksum_err) 966 ++channel->n_rx_ip_hdr_chksum_err; 967 else if (rx_ev_tcp_udp_chksum_err) 968 ++channel->n_rx_tcp_udp_chksum_err; 969 } 970 971 /* TOBE_DISC is expected on unicast mismatches; don't print out an 972 * error message. FRM_TRUNC indicates RXDP dropped the packet due 973 * to a FIFO overflow. 974 */ 975 #ifdef DEBUG 976 if (rx_ev_other_err && net_ratelimit()) { 977 netif_dbg(efx, rx_err, efx->net_dev, 978 " RX queue %d unexpected RX event " 979 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 980 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 981 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 982 rx_ev_ip_hdr_chksum_err ? 983 " [IP_HDR_CHKSUM_ERR]" : "", 984 rx_ev_tcp_udp_chksum_err ? 985 " [TCP_UDP_CHKSUM_ERR]" : "", 986 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 987 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 988 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 989 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 990 rx_ev_pause_frm ? " [PAUSE]" : ""); 991 } 992 #endif 993 994 /* The frame must be discarded if any of these are true. */ 995 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 996 rx_ev_tobe_disc | rx_ev_pause_frm) ? 997 EFX_RX_PKT_DISCARD : 0; 998 } 999 1000 /* Handle receive events that are not in-order. Return true if this 1001 * can be handled as a partial packet discard, false if it's more 1002 * serious. 1003 */ 1004 static bool 1005 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 1006 { 1007 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 1008 struct efx_nic *efx = rx_queue->efx; 1009 unsigned expected, dropped; 1010 1011 if (rx_queue->scatter_n && 1012 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 1013 rx_queue->ptr_mask)) { 1014 ++channel->n_rx_nodesc_trunc; 1015 return true; 1016 } 1017 1018 expected = rx_queue->removed_count & rx_queue->ptr_mask; 1019 dropped = (index - expected) & rx_queue->ptr_mask; 1020 netif_info(efx, rx_err, efx->net_dev, 1021 "dropped %d events (index=%d expected=%d)\n", 1022 dropped, index, expected); 1023 1024 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1025 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1026 return false; 1027 } 1028 1029 /* Handle a packet received event 1030 * 1031 * The NIC gives a "discard" flag if it's a unicast packet with the 1032 * wrong destination address 1033 * Also "is multicast" and "matches multicast filter" flags can be used to 1034 * discard non-matching multicast packets. 1035 */ 1036 static void 1037 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 1038 { 1039 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1040 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1041 unsigned expected_ptr; 1042 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 1043 u16 flags; 1044 struct efx_rx_queue *rx_queue; 1045 struct efx_nic *efx = channel->efx; 1046 1047 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1048 return; 1049 1050 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 1051 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 1052 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1053 channel->channel); 1054 1055 rx_queue = efx_channel_get_rx_queue(channel); 1056 1057 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1058 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 1059 rx_queue->ptr_mask); 1060 1061 /* Check for partial drops and other errors */ 1062 if (unlikely(rx_ev_desc_ptr != expected_ptr) || 1063 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 1064 if (rx_ev_desc_ptr != expected_ptr && 1065 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 1066 return; 1067 1068 /* Discard all pending fragments */ 1069 if (rx_queue->scatter_n) { 1070 efx_rx_packet( 1071 rx_queue, 1072 rx_queue->removed_count & rx_queue->ptr_mask, 1073 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 1074 rx_queue->removed_count += rx_queue->scatter_n; 1075 rx_queue->scatter_n = 0; 1076 } 1077 1078 /* Return if there is no new fragment */ 1079 if (rx_ev_desc_ptr != expected_ptr) 1080 return; 1081 1082 /* Discard new fragment if not SOP */ 1083 if (!rx_ev_sop) { 1084 efx_rx_packet( 1085 rx_queue, 1086 rx_queue->removed_count & rx_queue->ptr_mask, 1087 1, 0, EFX_RX_PKT_DISCARD); 1088 ++rx_queue->removed_count; 1089 return; 1090 } 1091 } 1092 1093 ++rx_queue->scatter_n; 1094 if (rx_ev_cont) 1095 return; 1096 1097 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1098 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1099 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1100 1101 if (likely(rx_ev_pkt_ok)) { 1102 /* If packet is marked as OK then we can rely on the 1103 * hardware checksum and classification. 1104 */ 1105 flags = 0; 1106 switch (rx_ev_hdr_type) { 1107 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 1108 flags |= EFX_RX_PKT_TCP; 1109 /* fall through */ 1110 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 1111 flags |= EFX_RX_PKT_CSUMMED; 1112 /* fall through */ 1113 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 1114 case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 1115 break; 1116 } 1117 } else { 1118 flags = efx_handle_rx_not_ok(rx_queue, event); 1119 } 1120 1121 /* Detect multicast packets that didn't match the filter */ 1122 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1123 if (rx_ev_mcast_pkt) { 1124 unsigned int rx_ev_mcast_hash_match = 1125 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1126 1127 if (unlikely(!rx_ev_mcast_hash_match)) { 1128 ++channel->n_rx_mcast_mismatch; 1129 flags |= EFX_RX_PKT_DISCARD; 1130 } 1131 } 1132 1133 channel->irq_mod_score += 2; 1134 1135 /* Handle received packet */ 1136 efx_rx_packet(rx_queue, 1137 rx_queue->removed_count & rx_queue->ptr_mask, 1138 rx_queue->scatter_n, rx_ev_byte_cnt, flags); 1139 rx_queue->removed_count += rx_queue->scatter_n; 1140 rx_queue->scatter_n = 0; 1141 } 1142 1143 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1144 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1145 * of all transmit completions. 1146 */ 1147 static void 1148 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1149 { 1150 struct efx_tx_queue *tx_queue; 1151 int qid; 1152 1153 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1154 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1155 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1156 qid % EFX_TXQ_TYPES); 1157 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1158 efx_magic_event(tx_queue->channel, 1159 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1160 } 1161 } 1162 } 1163 1164 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1165 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1166 * the RX queue back to the mask of RX queues in need of flushing. 1167 */ 1168 static void 1169 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1170 { 1171 struct efx_channel *channel; 1172 struct efx_rx_queue *rx_queue; 1173 int qid; 1174 bool failed; 1175 1176 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1177 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1178 if (qid >= efx->n_channels) 1179 return; 1180 channel = efx_get_channel(efx, qid); 1181 if (!efx_channel_has_rx_queue(channel)) 1182 return; 1183 rx_queue = efx_channel_get_rx_queue(channel); 1184 1185 if (failed) { 1186 netif_info(efx, hw, efx->net_dev, 1187 "RXQ %d flush retry\n", qid); 1188 rx_queue->flush_pending = true; 1189 atomic_inc(&efx->rxq_flush_pending); 1190 } else { 1191 efx_magic_event(efx_rx_queue_channel(rx_queue), 1192 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1193 } 1194 atomic_dec(&efx->rxq_flush_outstanding); 1195 if (efx_flush_wake(efx)) 1196 wake_up(&efx->flush_wq); 1197 } 1198 1199 static void 1200 efx_handle_drain_event(struct efx_channel *channel) 1201 { 1202 struct efx_nic *efx = channel->efx; 1203 1204 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1205 atomic_dec(&efx->drain_pending); 1206 if (efx_flush_wake(efx)) 1207 wake_up(&efx->flush_wq); 1208 } 1209 1210 static void 1211 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1212 { 1213 struct efx_nic *efx = channel->efx; 1214 struct efx_rx_queue *rx_queue = 1215 efx_channel_has_rx_queue(channel) ? 1216 efx_channel_get_rx_queue(channel) : NULL; 1217 unsigned magic, code; 1218 1219 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1220 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1221 1222 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1223 channel->event_test_cpu = raw_smp_processor_id(); 1224 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1225 /* The queue must be empty, so we won't receive any rx 1226 * events, so efx_process_channel() won't refill the 1227 * queue. Refill it here */ 1228 efx_fast_push_rx_descriptors(rx_queue); 1229 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1230 efx_handle_drain_event(channel); 1231 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1232 efx_handle_drain_event(channel); 1233 } else { 1234 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1235 "generated event "EFX_QWORD_FMT"\n", 1236 channel->channel, EFX_QWORD_VAL(*event)); 1237 } 1238 } 1239 1240 static void 1241 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1242 { 1243 struct efx_nic *efx = channel->efx; 1244 unsigned int ev_sub_code; 1245 unsigned int ev_sub_data; 1246 1247 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1248 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1249 1250 switch (ev_sub_code) { 1251 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1252 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1253 channel->channel, ev_sub_data); 1254 efx_handle_tx_flush_done(efx, event); 1255 efx_sriov_tx_flush_done(efx, event); 1256 break; 1257 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1258 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1259 channel->channel, ev_sub_data); 1260 efx_handle_rx_flush_done(efx, event); 1261 efx_sriov_rx_flush_done(efx, event); 1262 break; 1263 case FSE_AZ_EVQ_INIT_DONE_EV: 1264 netif_dbg(efx, hw, efx->net_dev, 1265 "channel %d EVQ %d initialised\n", 1266 channel->channel, ev_sub_data); 1267 break; 1268 case FSE_AZ_SRM_UPD_DONE_EV: 1269 netif_vdbg(efx, hw, efx->net_dev, 1270 "channel %d SRAM update done\n", channel->channel); 1271 break; 1272 case FSE_AZ_WAKE_UP_EV: 1273 netif_vdbg(efx, hw, efx->net_dev, 1274 "channel %d RXQ %d wakeup event\n", 1275 channel->channel, ev_sub_data); 1276 break; 1277 case FSE_AZ_TIMER_EV: 1278 netif_vdbg(efx, hw, efx->net_dev, 1279 "channel %d RX queue %d timer expired\n", 1280 channel->channel, ev_sub_data); 1281 break; 1282 case FSE_AA_RX_RECOVER_EV: 1283 netif_err(efx, rx_err, efx->net_dev, 1284 "channel %d seen DRIVER RX_RESET event. " 1285 "Resetting.\n", channel->channel); 1286 atomic_inc(&efx->rx_reset); 1287 efx_schedule_reset(efx, 1288 EFX_WORKAROUND_6555(efx) ? 1289 RESET_TYPE_RX_RECOVERY : 1290 RESET_TYPE_DISABLE); 1291 break; 1292 case FSE_BZ_RX_DSC_ERROR_EV: 1293 if (ev_sub_data < EFX_VI_BASE) { 1294 netif_err(efx, rx_err, efx->net_dev, 1295 "RX DMA Q %d reports descriptor fetch error." 1296 " RX Q %d is disabled.\n", ev_sub_data, 1297 ev_sub_data); 1298 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1299 } else 1300 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1301 break; 1302 case FSE_BZ_TX_DSC_ERROR_EV: 1303 if (ev_sub_data < EFX_VI_BASE) { 1304 netif_err(efx, tx_err, efx->net_dev, 1305 "TX DMA Q %d reports descriptor fetch error." 1306 " TX Q %d is disabled.\n", ev_sub_data, 1307 ev_sub_data); 1308 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1309 } else 1310 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1311 break; 1312 default: 1313 netif_vdbg(efx, hw, efx->net_dev, 1314 "channel %d unknown driver event code %d " 1315 "data %04x\n", channel->channel, ev_sub_code, 1316 ev_sub_data); 1317 break; 1318 } 1319 } 1320 1321 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1322 { 1323 struct efx_nic *efx = channel->efx; 1324 unsigned int read_ptr; 1325 efx_qword_t event, *p_event; 1326 int ev_code; 1327 int tx_packets = 0; 1328 int spent = 0; 1329 1330 read_ptr = channel->eventq_read_ptr; 1331 1332 for (;;) { 1333 p_event = efx_event(channel, read_ptr); 1334 event = *p_event; 1335 1336 if (!efx_event_present(&event)) 1337 /* End of events */ 1338 break; 1339 1340 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1341 "channel %d event is "EFX_QWORD_FMT"\n", 1342 channel->channel, EFX_QWORD_VAL(event)); 1343 1344 /* Clear this event by marking it all ones */ 1345 EFX_SET_QWORD(*p_event); 1346 1347 ++read_ptr; 1348 1349 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1350 1351 switch (ev_code) { 1352 case FSE_AZ_EV_CODE_RX_EV: 1353 efx_handle_rx_event(channel, &event); 1354 if (++spent == budget) 1355 goto out; 1356 break; 1357 case FSE_AZ_EV_CODE_TX_EV: 1358 tx_packets += efx_handle_tx_event(channel, &event); 1359 if (tx_packets > efx->txq_entries) { 1360 spent = budget; 1361 goto out; 1362 } 1363 break; 1364 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1365 efx_handle_generated_event(channel, &event); 1366 break; 1367 case FSE_AZ_EV_CODE_DRIVER_EV: 1368 efx_handle_driver_event(channel, &event); 1369 break; 1370 case FSE_CZ_EV_CODE_USER_EV: 1371 efx_sriov_event(channel, &event); 1372 break; 1373 case FSE_CZ_EV_CODE_MCDI_EV: 1374 efx_mcdi_process_event(channel, &event); 1375 break; 1376 case FSE_AZ_EV_CODE_GLOBAL_EV: 1377 if (efx->type->handle_global_event && 1378 efx->type->handle_global_event(channel, &event)) 1379 break; 1380 /* else fall through */ 1381 default: 1382 netif_err(channel->efx, hw, channel->efx->net_dev, 1383 "channel %d unknown event type %d (data " 1384 EFX_QWORD_FMT ")\n", channel->channel, 1385 ev_code, EFX_QWORD_VAL(event)); 1386 } 1387 } 1388 1389 out: 1390 channel->eventq_read_ptr = read_ptr; 1391 return spent; 1392 } 1393 1394 /* Check whether an event is present in the eventq at the current 1395 * read pointer. Only useful for self-test. 1396 */ 1397 bool efx_nic_event_present(struct efx_channel *channel) 1398 { 1399 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1400 } 1401 1402 /* Allocate buffer table entries for event queue */ 1403 int efx_nic_probe_eventq(struct efx_channel *channel) 1404 { 1405 struct efx_nic *efx = channel->efx; 1406 unsigned entries; 1407 1408 entries = channel->eventq_mask + 1; 1409 return efx_alloc_special_buffer(efx, &channel->eventq, 1410 entries * sizeof(efx_qword_t)); 1411 } 1412 1413 void efx_nic_init_eventq(struct efx_channel *channel) 1414 { 1415 efx_oword_t reg; 1416 struct efx_nic *efx = channel->efx; 1417 1418 netif_dbg(efx, hw, efx->net_dev, 1419 "channel %d event queue in special buffers %d-%d\n", 1420 channel->channel, channel->eventq.index, 1421 channel->eventq.index + channel->eventq.entries - 1); 1422 1423 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1424 EFX_POPULATE_OWORD_3(reg, 1425 FRF_CZ_TIMER_Q_EN, 1, 1426 FRF_CZ_HOST_NOTIFY_MODE, 0, 1427 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1428 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1429 } 1430 1431 /* Pin event queue buffer */ 1432 efx_init_special_buffer(efx, &channel->eventq); 1433 1434 /* Fill event queue with all ones (i.e. empty events) */ 1435 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 1436 1437 /* Push event queue to card */ 1438 EFX_POPULATE_OWORD_3(reg, 1439 FRF_AZ_EVQ_EN, 1, 1440 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1441 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1442 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1443 channel->channel); 1444 1445 efx->type->push_irq_moderation(channel); 1446 } 1447 1448 void efx_nic_fini_eventq(struct efx_channel *channel) 1449 { 1450 efx_oword_t reg; 1451 struct efx_nic *efx = channel->efx; 1452 1453 /* Remove event queue from card */ 1454 EFX_ZERO_OWORD(reg); 1455 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1456 channel->channel); 1457 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1458 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1459 1460 /* Unpin event queue */ 1461 efx_fini_special_buffer(efx, &channel->eventq); 1462 } 1463 1464 /* Free buffers backing event queue */ 1465 void efx_nic_remove_eventq(struct efx_channel *channel) 1466 { 1467 efx_free_special_buffer(channel->efx, &channel->eventq); 1468 } 1469 1470 1471 void efx_nic_event_test_start(struct efx_channel *channel) 1472 { 1473 channel->event_test_cpu = -1; 1474 smp_wmb(); 1475 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1476 } 1477 1478 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1479 { 1480 efx_magic_event(efx_rx_queue_channel(rx_queue), 1481 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1482 } 1483 1484 /************************************************************************** 1485 * 1486 * Hardware interrupts 1487 * The hardware interrupt handler does very little work; all the event 1488 * queue processing is carried out by per-channel tasklets. 1489 * 1490 **************************************************************************/ 1491 1492 /* Enable/disable/generate interrupts */ 1493 static inline void efx_nic_interrupts(struct efx_nic *efx, 1494 bool enabled, bool force) 1495 { 1496 efx_oword_t int_en_reg_ker; 1497 1498 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1499 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1500 FRF_AZ_KER_INT_KER, force, 1501 FRF_AZ_DRV_INT_EN_KER, enabled); 1502 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1503 } 1504 1505 void efx_nic_enable_interrupts(struct efx_nic *efx) 1506 { 1507 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1508 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1509 1510 efx_nic_interrupts(efx, true, false); 1511 } 1512 1513 void efx_nic_disable_interrupts(struct efx_nic *efx) 1514 { 1515 /* Disable interrupts */ 1516 efx_nic_interrupts(efx, false, false); 1517 } 1518 1519 /* Generate a test interrupt 1520 * Interrupt must already have been enabled, otherwise nasty things 1521 * may happen. 1522 */ 1523 void efx_nic_irq_test_start(struct efx_nic *efx) 1524 { 1525 efx->last_irq_cpu = -1; 1526 smp_wmb(); 1527 efx_nic_interrupts(efx, true, true); 1528 } 1529 1530 /* Process a fatal interrupt 1531 * Disable bus mastering ASAP and schedule a reset 1532 */ 1533 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1534 { 1535 struct falcon_nic_data *nic_data = efx->nic_data; 1536 efx_oword_t *int_ker = efx->irq_status.addr; 1537 efx_oword_t fatal_intr; 1538 int error, mem_perr; 1539 1540 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1541 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1542 1543 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1544 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1545 EFX_OWORD_VAL(fatal_intr), 1546 error ? "disabling bus mastering" : "no recognised error"); 1547 1548 /* If this is a memory parity error dump which blocks are offending */ 1549 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1550 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1551 if (mem_perr) { 1552 efx_oword_t reg; 1553 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1554 netif_err(efx, hw, efx->net_dev, 1555 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1556 EFX_OWORD_VAL(reg)); 1557 } 1558 1559 /* Disable both devices */ 1560 pci_clear_master(efx->pci_dev); 1561 if (efx_nic_is_dual_func(efx)) 1562 pci_clear_master(nic_data->pci_dev2); 1563 efx_nic_disable_interrupts(efx); 1564 1565 /* Count errors and reset or disable the NIC accordingly */ 1566 if (efx->int_error_count == 0 || 1567 time_after(jiffies, efx->int_error_expire)) { 1568 efx->int_error_count = 0; 1569 efx->int_error_expire = 1570 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1571 } 1572 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1573 netif_err(efx, hw, efx->net_dev, 1574 "SYSTEM ERROR - reset scheduled\n"); 1575 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1576 } else { 1577 netif_err(efx, hw, efx->net_dev, 1578 "SYSTEM ERROR - max number of errors seen." 1579 "NIC will be disabled\n"); 1580 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1581 } 1582 1583 return IRQ_HANDLED; 1584 } 1585 1586 /* Handle a legacy interrupt 1587 * Acknowledges the interrupt and schedule event queue processing. 1588 */ 1589 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1590 { 1591 struct efx_nic *efx = dev_id; 1592 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); 1593 efx_oword_t *int_ker = efx->irq_status.addr; 1594 irqreturn_t result = IRQ_NONE; 1595 struct efx_channel *channel; 1596 efx_dword_t reg; 1597 u32 queues; 1598 int syserr; 1599 1600 /* Read the ISR which also ACKs the interrupts */ 1601 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1602 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1603 1604 /* Legacy interrupts are disabled too late by the EEH kernel 1605 * code. Disable them earlier. 1606 * If an EEH error occurred, the read will have returned all ones. 1607 */ 1608 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && 1609 !efx->eeh_disabled_legacy_irq) { 1610 disable_irq_nosync(efx->legacy_irq); 1611 efx->eeh_disabled_legacy_irq = true; 1612 } 1613 1614 /* Handle non-event-queue sources */ 1615 if (queues & (1U << efx->irq_level) && soft_enabled) { 1616 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1617 if (unlikely(syserr)) 1618 return efx_nic_fatal_interrupt(efx); 1619 efx->last_irq_cpu = raw_smp_processor_id(); 1620 } 1621 1622 if (queues != 0) { 1623 if (EFX_WORKAROUND_15783(efx)) 1624 efx->irq_zero_count = 0; 1625 1626 /* Schedule processing of any interrupting queues */ 1627 if (likely(soft_enabled)) { 1628 efx_for_each_channel(channel, efx) { 1629 if (queues & 1) 1630 efx_schedule_channel_irq(channel); 1631 queues >>= 1; 1632 } 1633 } 1634 result = IRQ_HANDLED; 1635 1636 } else if (EFX_WORKAROUND_15783(efx)) { 1637 efx_qword_t *event; 1638 1639 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1640 * because this might be a shared interrupt. */ 1641 if (efx->irq_zero_count++ == 0) 1642 result = IRQ_HANDLED; 1643 1644 /* Ensure we schedule or rearm all event queues */ 1645 if (likely(soft_enabled)) { 1646 efx_for_each_channel(channel, efx) { 1647 event = efx_event(channel, 1648 channel->eventq_read_ptr); 1649 if (efx_event_present(event)) 1650 efx_schedule_channel_irq(channel); 1651 else 1652 efx_nic_eventq_read_ack(channel); 1653 } 1654 } 1655 } 1656 1657 if (result == IRQ_HANDLED) 1658 netif_vdbg(efx, intr, efx->net_dev, 1659 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1660 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1661 1662 return result; 1663 } 1664 1665 /* Handle an MSI interrupt 1666 * 1667 * Handle an MSI hardware interrupt. This routine schedules event 1668 * queue processing. No interrupt acknowledgement cycle is necessary. 1669 * Also, we never need to check that the interrupt is for us, since 1670 * MSI interrupts cannot be shared. 1671 */ 1672 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1673 { 1674 struct efx_msi_context *context = dev_id; 1675 struct efx_nic *efx = context->efx; 1676 efx_oword_t *int_ker = efx->irq_status.addr; 1677 int syserr; 1678 1679 netif_vdbg(efx, intr, efx->net_dev, 1680 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1681 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1682 1683 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) 1684 return IRQ_HANDLED; 1685 1686 /* Handle non-event-queue sources */ 1687 if (context->index == efx->irq_level) { 1688 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1689 if (unlikely(syserr)) 1690 return efx_nic_fatal_interrupt(efx); 1691 efx->last_irq_cpu = raw_smp_processor_id(); 1692 } 1693 1694 /* Schedule processing of the channel */ 1695 efx_schedule_channel_irq(efx->channel[context->index]); 1696 1697 return IRQ_HANDLED; 1698 } 1699 1700 1701 /* Setup RSS indirection table. 1702 * This maps from the hash value of the packet to RXQ 1703 */ 1704 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1705 { 1706 size_t i = 0; 1707 efx_dword_t dword; 1708 1709 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1710 return; 1711 1712 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1713 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1714 1715 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1716 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1717 efx->rx_indir_table[i]); 1718 efx_writed(efx, &dword, 1719 FR_BZ_RX_INDIRECTION_TBL + 1720 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1721 } 1722 } 1723 1724 /* Hook interrupt handler(s) 1725 * Try MSI and then legacy interrupts. 1726 */ 1727 int efx_nic_init_interrupt(struct efx_nic *efx) 1728 { 1729 struct efx_channel *channel; 1730 unsigned int n_irqs; 1731 int rc; 1732 1733 if (!EFX_INT_MODE_USE_MSI(efx)) { 1734 irq_handler_t handler; 1735 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1736 handler = efx_legacy_interrupt; 1737 else 1738 handler = falcon_legacy_interrupt_a1; 1739 1740 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1741 efx->name, efx); 1742 if (rc) { 1743 netif_err(efx, drv, efx->net_dev, 1744 "failed to hook legacy IRQ %d\n", 1745 efx->pci_dev->irq); 1746 goto fail1; 1747 } 1748 return 0; 1749 } 1750 1751 #ifdef CONFIG_RFS_ACCEL 1752 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 1753 efx->net_dev->rx_cpu_rmap = 1754 alloc_irq_cpu_rmap(efx->n_rx_channels); 1755 if (!efx->net_dev->rx_cpu_rmap) { 1756 rc = -ENOMEM; 1757 goto fail1; 1758 } 1759 } 1760 #endif 1761 1762 /* Hook MSI or MSI-X interrupt */ 1763 n_irqs = 0; 1764 efx_for_each_channel(channel, efx) { 1765 rc = request_irq(channel->irq, efx_msi_interrupt, 1766 IRQF_PROBE_SHARED, /* Not shared */ 1767 efx->msi_context[channel->channel].name, 1768 &efx->msi_context[channel->channel]); 1769 if (rc) { 1770 netif_err(efx, drv, efx->net_dev, 1771 "failed to hook IRQ %d\n", channel->irq); 1772 goto fail2; 1773 } 1774 ++n_irqs; 1775 1776 #ifdef CONFIG_RFS_ACCEL 1777 if (efx->interrupt_mode == EFX_INT_MODE_MSIX && 1778 channel->channel < efx->n_rx_channels) { 1779 rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, 1780 channel->irq); 1781 if (rc) 1782 goto fail2; 1783 } 1784 #endif 1785 } 1786 1787 return 0; 1788 1789 fail2: 1790 #ifdef CONFIG_RFS_ACCEL 1791 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 1792 efx->net_dev->rx_cpu_rmap = NULL; 1793 #endif 1794 efx_for_each_channel(channel, efx) { 1795 if (n_irqs-- == 0) 1796 break; 1797 free_irq(channel->irq, &efx->msi_context[channel->channel]); 1798 } 1799 fail1: 1800 return rc; 1801 } 1802 1803 void efx_nic_fini_interrupt(struct efx_nic *efx) 1804 { 1805 struct efx_channel *channel; 1806 1807 #ifdef CONFIG_RFS_ACCEL 1808 free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); 1809 efx->net_dev->rx_cpu_rmap = NULL; 1810 #endif 1811 1812 /* Disable MSI/MSI-X interrupts */ 1813 efx_for_each_channel(channel, efx) 1814 free_irq(channel->irq, &efx->msi_context[channel->channel]); 1815 1816 /* Disable legacy interrupt */ 1817 if (efx->legacy_irq) 1818 free_irq(efx->legacy_irq, efx); 1819 } 1820 1821 /* Looks at available SRAM resources and works out how many queues we 1822 * can support, and where things like descriptor caches should live. 1823 * 1824 * SRAM is split up as follows: 1825 * 0 buftbl entries for channels 1826 * efx->vf_buftbl_base buftbl entries for SR-IOV 1827 * efx->rx_dc_base RX descriptor caches 1828 * efx->tx_dc_base TX descriptor caches 1829 */ 1830 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1831 { 1832 unsigned vi_count, buftbl_min; 1833 1834 /* Account for the buffer table entries backing the datapath channels 1835 * and the descriptor caches for those channels. 1836 */ 1837 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1838 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1839 efx->n_channels * EFX_MAX_EVQ_SIZE) 1840 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1841 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1842 1843 #ifdef CONFIG_SFC_SRIOV 1844 if (efx_sriov_wanted(efx)) { 1845 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1846 1847 efx->vf_buftbl_base = buftbl_min; 1848 1849 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1850 vi_count = max(vi_count, EFX_VI_BASE); 1851 buftbl_free = (sram_lim_qw - buftbl_min - 1852 vi_count * vi_dc_entries); 1853 1854 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1855 efx_vf_size(efx)); 1856 vf_limit = min(buftbl_free / entries_per_vf, 1857 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1858 1859 if (efx->vf_count > vf_limit) { 1860 netif_err(efx, probe, efx->net_dev, 1861 "Reducing VF count from from %d to %d\n", 1862 efx->vf_count, vf_limit); 1863 efx->vf_count = vf_limit; 1864 } 1865 vi_count += efx->vf_count * efx_vf_size(efx); 1866 } 1867 #endif 1868 1869 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1870 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1871 } 1872 1873 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1874 { 1875 efx_oword_t altera_build; 1876 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1877 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1878 } 1879 1880 void efx_nic_init_common(struct efx_nic *efx) 1881 { 1882 efx_oword_t temp; 1883 1884 /* Set positions of descriptor caches in SRAM. */ 1885 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1886 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1887 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1888 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1889 1890 /* Set TX descriptor cache size. */ 1891 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1892 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1893 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1894 1895 /* Set RX descriptor cache size. Set low watermark to size-8, as 1896 * this allows most efficient prefetching. 1897 */ 1898 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1899 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1900 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1901 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1902 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1903 1904 /* Program INT_KER address */ 1905 EFX_POPULATE_OWORD_2(temp, 1906 FRF_AZ_NORM_INT_VEC_DIS_KER, 1907 EFX_INT_MODE_USE_MSI(efx), 1908 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1909 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1910 1911 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1912 /* Use an interrupt level unused by event queues */ 1913 efx->irq_level = 0x1f; 1914 else 1915 /* Use a valid MSI-X vector */ 1916 efx->irq_level = 0; 1917 1918 /* Enable all the genuinely fatal interrupts. (They are still 1919 * masked by the overall interrupt mask, controlled by 1920 * falcon_interrupts()). 1921 * 1922 * Note: All other fatal interrupts are enabled 1923 */ 1924 EFX_POPULATE_OWORD_3(temp, 1925 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1926 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1927 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1928 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1929 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1930 EFX_INVERT_OWORD(temp); 1931 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1932 1933 efx_nic_push_rx_indir_table(efx); 1934 1935 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1936 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1937 */ 1938 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1939 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1940 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1941 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1942 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1943 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1944 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1945 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1946 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1947 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1948 /* Disable hardware watchdog which can misfire */ 1949 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1950 /* Squash TX of packets of 16 bytes or less */ 1951 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1952 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1953 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1954 1955 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1956 EFX_POPULATE_OWORD_4(temp, 1957 /* Default values */ 1958 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1959 FRF_BZ_TX_PACE_SB_AF, 0xb, 1960 FRF_BZ_TX_PACE_FB_BASE, 0, 1961 /* Allow large pace values in the 1962 * fast bin. */ 1963 FRF_BZ_TX_PACE_BIN_TH, 1964 FFE_BZ_TX_PACE_RESERVED); 1965 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1966 } 1967 } 1968 1969 /* Register dump */ 1970 1971 #define REGISTER_REVISION_A 1 1972 #define REGISTER_REVISION_B 2 1973 #define REGISTER_REVISION_C 3 1974 #define REGISTER_REVISION_Z 3 /* latest revision */ 1975 1976 struct efx_nic_reg { 1977 u32 offset:24; 1978 u32 min_revision:2, max_revision:2; 1979 }; 1980 1981 #define REGISTER(name, min_rev, max_rev) { \ 1982 FR_ ## min_rev ## max_rev ## _ ## name, \ 1983 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1984 } 1985 #define REGISTER_AA(name) REGISTER(name, A, A) 1986 #define REGISTER_AB(name) REGISTER(name, A, B) 1987 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1988 #define REGISTER_BB(name) REGISTER(name, B, B) 1989 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1990 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1991 1992 static const struct efx_nic_reg efx_nic_regs[] = { 1993 REGISTER_AZ(ADR_REGION), 1994 REGISTER_AZ(INT_EN_KER), 1995 REGISTER_BZ(INT_EN_CHAR), 1996 REGISTER_AZ(INT_ADR_KER), 1997 REGISTER_BZ(INT_ADR_CHAR), 1998 /* INT_ACK_KER is WO */ 1999 /* INT_ISR0 is RC */ 2000 REGISTER_AZ(HW_INIT), 2001 REGISTER_CZ(USR_EV_CFG), 2002 REGISTER_AB(EE_SPI_HCMD), 2003 REGISTER_AB(EE_SPI_HADR), 2004 REGISTER_AB(EE_SPI_HDATA), 2005 REGISTER_AB(EE_BASE_PAGE), 2006 REGISTER_AB(EE_VPD_CFG0), 2007 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 2008 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 2009 /* PCIE_CORE_INDIRECT is indirect */ 2010 REGISTER_AB(NIC_STAT), 2011 REGISTER_AB(GPIO_CTL), 2012 REGISTER_AB(GLB_CTL), 2013 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 2014 REGISTER_BZ(DP_CTRL), 2015 REGISTER_AZ(MEM_STAT), 2016 REGISTER_AZ(CS_DEBUG), 2017 REGISTER_AZ(ALTERA_BUILD), 2018 REGISTER_AZ(CSR_SPARE), 2019 REGISTER_AB(PCIE_SD_CTL0123), 2020 REGISTER_AB(PCIE_SD_CTL45), 2021 REGISTER_AB(PCIE_PCS_CTL_STAT), 2022 /* DEBUG_DATA_OUT is not used */ 2023 /* DRV_EV is WO */ 2024 REGISTER_AZ(EVQ_CTL), 2025 REGISTER_AZ(EVQ_CNT1), 2026 REGISTER_AZ(EVQ_CNT2), 2027 REGISTER_AZ(BUF_TBL_CFG), 2028 REGISTER_AZ(SRM_RX_DC_CFG), 2029 REGISTER_AZ(SRM_TX_DC_CFG), 2030 REGISTER_AZ(SRM_CFG), 2031 /* BUF_TBL_UPD is WO */ 2032 REGISTER_AZ(SRM_UPD_EVQ), 2033 REGISTER_AZ(SRAM_PARITY), 2034 REGISTER_AZ(RX_CFG), 2035 REGISTER_BZ(RX_FILTER_CTL), 2036 /* RX_FLUSH_DESCQ is WO */ 2037 REGISTER_AZ(RX_DC_CFG), 2038 REGISTER_AZ(RX_DC_PF_WM), 2039 REGISTER_BZ(RX_RSS_TKEY), 2040 /* RX_NODESC_DROP is RC */ 2041 REGISTER_AA(RX_SELF_RST), 2042 /* RX_DEBUG, RX_PUSH_DROP are not used */ 2043 REGISTER_CZ(RX_RSS_IPV6_REG1), 2044 REGISTER_CZ(RX_RSS_IPV6_REG2), 2045 REGISTER_CZ(RX_RSS_IPV6_REG3), 2046 /* TX_FLUSH_DESCQ is WO */ 2047 REGISTER_AZ(TX_DC_CFG), 2048 REGISTER_AA(TX_CHKSM_CFG), 2049 REGISTER_AZ(TX_CFG), 2050 /* TX_PUSH_DROP is not used */ 2051 REGISTER_AZ(TX_RESERVED), 2052 REGISTER_BZ(TX_PACE), 2053 /* TX_PACE_DROP_QID is RC */ 2054 REGISTER_BB(TX_VLAN), 2055 REGISTER_BZ(TX_IPFIL_PORTEN), 2056 REGISTER_AB(MD_TXD), 2057 REGISTER_AB(MD_RXD), 2058 REGISTER_AB(MD_CS), 2059 REGISTER_AB(MD_PHY_ADR), 2060 REGISTER_AB(MD_ID), 2061 /* MD_STAT is RC */ 2062 REGISTER_AB(MAC_STAT_DMA), 2063 REGISTER_AB(MAC_CTRL), 2064 REGISTER_BB(GEN_MODE), 2065 REGISTER_AB(MAC_MC_HASH_REG0), 2066 REGISTER_AB(MAC_MC_HASH_REG1), 2067 REGISTER_AB(GM_CFG1), 2068 REGISTER_AB(GM_CFG2), 2069 /* GM_IPG and GM_HD are not used */ 2070 REGISTER_AB(GM_MAX_FLEN), 2071 /* GM_TEST is not used */ 2072 REGISTER_AB(GM_ADR1), 2073 REGISTER_AB(GM_ADR2), 2074 REGISTER_AB(GMF_CFG0), 2075 REGISTER_AB(GMF_CFG1), 2076 REGISTER_AB(GMF_CFG2), 2077 REGISTER_AB(GMF_CFG3), 2078 REGISTER_AB(GMF_CFG4), 2079 REGISTER_AB(GMF_CFG5), 2080 REGISTER_BB(TX_SRC_MAC_CTL), 2081 REGISTER_AB(XM_ADR_LO), 2082 REGISTER_AB(XM_ADR_HI), 2083 REGISTER_AB(XM_GLB_CFG), 2084 REGISTER_AB(XM_TX_CFG), 2085 REGISTER_AB(XM_RX_CFG), 2086 REGISTER_AB(XM_MGT_INT_MASK), 2087 REGISTER_AB(XM_FC), 2088 REGISTER_AB(XM_PAUSE_TIME), 2089 REGISTER_AB(XM_TX_PARAM), 2090 REGISTER_AB(XM_RX_PARAM), 2091 /* XM_MGT_INT_MSK (note no 'A') is RC */ 2092 REGISTER_AB(XX_PWR_RST), 2093 REGISTER_AB(XX_SD_CTL), 2094 REGISTER_AB(XX_TXDRV_CTL), 2095 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 2096 /* XX_CORE_STAT is partly RC */ 2097 }; 2098 2099 struct efx_nic_reg_table { 2100 u32 offset:24; 2101 u32 min_revision:2, max_revision:2; 2102 u32 step:6, rows:21; 2103 }; 2104 2105 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 2106 offset, \ 2107 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 2108 step, rows \ 2109 } 2110 #define REGISTER_TABLE(name, min_rev, max_rev) \ 2111 REGISTER_TABLE_DIMENSIONS( \ 2112 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 2113 min_rev, max_rev, \ 2114 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 2115 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 2116 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 2117 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 2118 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 2119 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 2120 #define REGISTER_TABLE_BB_CZ(name) \ 2121 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 2122 FR_BZ_ ## name ## _STEP, \ 2123 FR_BB_ ## name ## _ROWS), \ 2124 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 2125 FR_BZ_ ## name ## _STEP, \ 2126 FR_CZ_ ## name ## _ROWS) 2127 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 2128 2129 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 2130 /* DRIVER is not used */ 2131 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 2132 REGISTER_TABLE_BB(TX_IPFIL_TBL), 2133 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 2134 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 2135 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 2136 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 2137 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 2138 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 2139 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 2140 /* We can't reasonably read all of the buffer table (up to 8MB!). 2141 * However this driver will only use a few entries. Reading 2142 * 1K entries allows for some expansion of queue count and 2143 * size before we need to change the version. */ 2144 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 2145 A, A, 8, 1024), 2146 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 2147 B, Z, 8, 1024), 2148 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 2149 REGISTER_TABLE_BB_CZ(TIMER_TBL), 2150 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 2151 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 2152 /* TX_FILTER_TBL0 is huge and not used by this driver */ 2153 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 2154 REGISTER_TABLE_CZ(MC_TREG_SMEM), 2155 /* MSIX_PBA_TABLE is not mapped */ 2156 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 2157 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 2158 }; 2159 2160 size_t efx_nic_get_regs_len(struct efx_nic *efx) 2161 { 2162 const struct efx_nic_reg *reg; 2163 const struct efx_nic_reg_table *table; 2164 size_t len = 0; 2165 2166 for (reg = efx_nic_regs; 2167 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2168 reg++) 2169 if (efx->type->revision >= reg->min_revision && 2170 efx->type->revision <= reg->max_revision) 2171 len += sizeof(efx_oword_t); 2172 2173 for (table = efx_nic_reg_tables; 2174 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2175 table++) 2176 if (efx->type->revision >= table->min_revision && 2177 efx->type->revision <= table->max_revision) 2178 len += table->rows * min_t(size_t, table->step, 16); 2179 2180 return len; 2181 } 2182 2183 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2184 { 2185 const struct efx_nic_reg *reg; 2186 const struct efx_nic_reg_table *table; 2187 2188 for (reg = efx_nic_regs; 2189 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2190 reg++) { 2191 if (efx->type->revision >= reg->min_revision && 2192 efx->type->revision <= reg->max_revision) { 2193 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2194 buf += sizeof(efx_oword_t); 2195 } 2196 } 2197 2198 for (table = efx_nic_reg_tables; 2199 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2200 table++) { 2201 size_t size, i; 2202 2203 if (!(efx->type->revision >= table->min_revision && 2204 efx->type->revision <= table->max_revision)) 2205 continue; 2206 2207 size = min_t(size_t, table->step, 16); 2208 2209 for (i = 0; i < table->rows; i++) { 2210 switch (table->step) { 2211 case 4: /* 32-bit SRAM */ 2212 efx_readd(efx, buf, table->offset + 4 * i); 2213 break; 2214 case 8: /* 64-bit SRAM */ 2215 efx_sram_readq(efx, 2216 efx->membase + table->offset, 2217 buf, i); 2218 break; 2219 case 16: /* 128-bit-readable register */ 2220 efx_reado_table(efx, buf, table->offset, i); 2221 break; 2222 case 32: /* 128-bit register, interleaved */ 2223 efx_reado_table(efx, buf, table->offset, 2 * i); 2224 break; 2225 default: 2226 WARN_ON(1); 2227 return; 2228 } 2229 buf += size; 2230 } 2231 } 2232 } 2233