1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* Depth of RX flush request fifo */ 53 #define EFX_RX_FLUSH_COUNT 4 54 55 /* Driver generated events */ 56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60 61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 63 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 68 efx_rx_queue_index(_rx_queue)) 69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 71 efx_rx_queue_index(_rx_queue)) 72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 74 (_tx_queue)->queue) 75 76 /************************************************************************** 77 * 78 * Solarstorm hardware access 79 * 80 **************************************************************************/ 81 82 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 83 unsigned int index) 84 { 85 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 86 value, index); 87 } 88 89 /* Read the current event from the event queue */ 90 static inline efx_qword_t *efx_event(struct efx_channel *channel, 91 unsigned int index) 92 { 93 return ((efx_qword_t *) (channel->eventq.addr)) + 94 (index & channel->eventq_mask); 95 } 96 97 /* See if an event is present 98 * 99 * We check both the high and low dword of the event for all ones. We 100 * wrote all ones when we cleared the event, and no valid event can 101 * have all ones in either its high or low dwords. This approach is 102 * robust against reordering. 103 * 104 * Note that using a single 64-bit comparison is incorrect; even 105 * though the CPU read will be atomic, the DMA write may not be. 106 */ 107 static inline int efx_event_present(efx_qword_t *event) 108 { 109 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 110 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 111 } 112 113 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 114 const efx_oword_t *mask) 115 { 116 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 117 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 118 } 119 120 int efx_nic_test_registers(struct efx_nic *efx, 121 const struct efx_nic_register_test *regs, 122 size_t n_regs) 123 { 124 unsigned address = 0, i, j; 125 efx_oword_t mask, imask, original, reg, buf; 126 127 /* Falcon should be in loopback to isolate the XMAC from the PHY */ 128 WARN_ON(!LOOPBACK_INTERNAL(efx)); 129 130 for (i = 0; i < n_regs; ++i) { 131 address = regs[i].address; 132 mask = imask = regs[i].mask; 133 EFX_INVERT_OWORD(imask); 134 135 efx_reado(efx, &original, address); 136 137 /* bit sweep on and off */ 138 for (j = 0; j < 128; j++) { 139 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 140 continue; 141 142 /* Test this testable bit can be set in isolation */ 143 EFX_AND_OWORD(reg, original, mask); 144 EFX_SET_OWORD32(reg, j, j, 1); 145 146 efx_writeo(efx, ®, address); 147 efx_reado(efx, &buf, address); 148 149 if (efx_masked_compare_oword(®, &buf, &mask)) 150 goto fail; 151 152 /* Test this testable bit can be cleared in isolation */ 153 EFX_OR_OWORD(reg, original, mask); 154 EFX_SET_OWORD32(reg, j, j, 0); 155 156 efx_writeo(efx, ®, address); 157 efx_reado(efx, &buf, address); 158 159 if (efx_masked_compare_oword(®, &buf, &mask)) 160 goto fail; 161 } 162 163 efx_writeo(efx, &original, address); 164 } 165 166 return 0; 167 168 fail: 169 netif_err(efx, hw, efx->net_dev, 170 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 171 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 172 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 173 return -EIO; 174 } 175 176 /************************************************************************** 177 * 178 * Special buffer handling 179 * Special buffers are used for event queues and the TX and RX 180 * descriptor rings. 181 * 182 *************************************************************************/ 183 184 /* 185 * Initialise a special buffer 186 * 187 * This will define a buffer (previously allocated via 188 * efx_alloc_special_buffer()) in the buffer table, allowing 189 * it to be used for event queues, descriptor rings etc. 190 */ 191 static void 192 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 193 { 194 efx_qword_t buf_desc; 195 int index; 196 dma_addr_t dma_addr; 197 int i; 198 199 EFX_BUG_ON_PARANOID(!buffer->addr); 200 201 /* Write buffer descriptors to NIC */ 202 for (i = 0; i < buffer->entries; i++) { 203 index = buffer->index + i; 204 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 205 netif_dbg(efx, probe, efx->net_dev, 206 "mapping special buffer %d at %llx\n", 207 index, (unsigned long long)dma_addr); 208 EFX_POPULATE_QWORD_3(buf_desc, 209 FRF_AZ_BUF_ADR_REGION, 0, 210 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 211 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 212 efx_write_buf_tbl(efx, &buf_desc, index); 213 } 214 } 215 216 /* Unmaps a buffer and clears the buffer table entries */ 217 static void 218 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 219 { 220 efx_oword_t buf_tbl_upd; 221 unsigned int start = buffer->index; 222 unsigned int end = (buffer->index + buffer->entries - 1); 223 224 if (!buffer->entries) 225 return; 226 227 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 228 buffer->index, buffer->index + buffer->entries - 1); 229 230 EFX_POPULATE_OWORD_4(buf_tbl_upd, 231 FRF_AZ_BUF_UPD_CMD, 0, 232 FRF_AZ_BUF_CLR_CMD, 1, 233 FRF_AZ_BUF_CLR_END_ID, end, 234 FRF_AZ_BUF_CLR_START_ID, start); 235 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 236 } 237 238 /* 239 * Allocate a new special buffer 240 * 241 * This allocates memory for a new buffer, clears it and allocates a 242 * new buffer ID range. It does not write into the buffer table. 243 * 244 * This call will allocate 4KB buffers, since 8KB buffers can't be 245 * used for event queues and descriptor rings. 246 */ 247 static int efx_alloc_special_buffer(struct efx_nic *efx, 248 struct efx_special_buffer *buffer, 249 unsigned int len) 250 { 251 len = ALIGN(len, EFX_BUF_SIZE); 252 253 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 254 &buffer->dma_addr, GFP_KERNEL); 255 if (!buffer->addr) 256 return -ENOMEM; 257 buffer->len = len; 258 buffer->entries = len / EFX_BUF_SIZE; 259 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 260 261 /* All zeros is a potentially valid event so memset to 0xff */ 262 memset(buffer->addr, 0xff, len); 263 264 /* Select new buffer ID */ 265 buffer->index = efx->next_buffer_table; 266 efx->next_buffer_table += buffer->entries; 267 268 netif_dbg(efx, probe, efx->net_dev, 269 "allocating special buffers %d-%d at %llx+%x " 270 "(virt %p phys %llx)\n", buffer->index, 271 buffer->index + buffer->entries - 1, 272 (u64)buffer->dma_addr, len, 273 buffer->addr, (u64)virt_to_phys(buffer->addr)); 274 275 return 0; 276 } 277 278 static void 279 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 280 { 281 if (!buffer->addr) 282 return; 283 284 netif_dbg(efx, hw, efx->net_dev, 285 "deallocating special buffers %d-%d at %llx+%x " 286 "(virt %p phys %llx)\n", buffer->index, 287 buffer->index + buffer->entries - 1, 288 (u64)buffer->dma_addr, buffer->len, 289 buffer->addr, (u64)virt_to_phys(buffer->addr)); 290 291 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 292 buffer->dma_addr); 293 buffer->addr = NULL; 294 buffer->entries = 0; 295 } 296 297 /************************************************************************** 298 * 299 * Generic buffer handling 300 * These buffers are used for interrupt status and MAC stats 301 * 302 **************************************************************************/ 303 304 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 305 unsigned int len) 306 { 307 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 308 &buffer->dma_addr); 309 if (!buffer->addr) 310 return -ENOMEM; 311 buffer->len = len; 312 memset(buffer->addr, 0, len); 313 return 0; 314 } 315 316 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 317 { 318 if (buffer->addr) { 319 pci_free_consistent(efx->pci_dev, buffer->len, 320 buffer->addr, buffer->dma_addr); 321 buffer->addr = NULL; 322 } 323 } 324 325 /************************************************************************** 326 * 327 * TX path 328 * 329 **************************************************************************/ 330 331 /* Returns a pointer to the specified transmit descriptor in the TX 332 * descriptor queue belonging to the specified channel. 333 */ 334 static inline efx_qword_t * 335 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 336 { 337 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 338 } 339 340 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 341 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 342 { 343 unsigned write_ptr; 344 efx_dword_t reg; 345 346 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 347 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 348 efx_writed_page(tx_queue->efx, ®, 349 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 350 } 351 352 /* Write pointer and first descriptor for TX descriptor ring */ 353 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 354 const efx_qword_t *txd) 355 { 356 unsigned write_ptr; 357 efx_oword_t reg; 358 359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 361 362 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 363 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 364 FRF_AZ_TX_DESC_WPTR, write_ptr); 365 reg.qword[0] = *txd; 366 efx_writeo_page(tx_queue->efx, ®, 367 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 368 } 369 370 static inline bool 371 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 372 { 373 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 374 375 if (empty_read_count == 0) 376 return false; 377 378 tx_queue->empty_read_count = 0; 379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 380 } 381 382 /* For each entry inserted into the software descriptor ring, create a 383 * descriptor in the hardware TX descriptor ring (in host memory), and 384 * write a doorbell. 385 */ 386 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 387 { 388 389 struct efx_tx_buffer *buffer; 390 efx_qword_t *txd; 391 unsigned write_ptr; 392 unsigned old_write_count = tx_queue->write_count; 393 394 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 395 396 do { 397 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 398 buffer = &tx_queue->buffer[write_ptr]; 399 txd = efx_tx_desc(tx_queue, write_ptr); 400 ++tx_queue->write_count; 401 402 /* Create TX descriptor ring entry */ 403 EFX_POPULATE_QWORD_4(*txd, 404 FSF_AZ_TX_KER_CONT, buffer->continuation, 405 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 406 FSF_AZ_TX_KER_BUF_REGION, 0, 407 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 408 } while (tx_queue->write_count != tx_queue->insert_count); 409 410 wmb(); /* Ensure descriptors are written before they are fetched */ 411 412 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 413 txd = efx_tx_desc(tx_queue, 414 old_write_count & tx_queue->ptr_mask); 415 efx_push_tx_desc(tx_queue, txd); 416 ++tx_queue->pushes; 417 } else { 418 efx_notify_tx_desc(tx_queue); 419 } 420 } 421 422 /* Allocate hardware resources for a TX queue */ 423 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 424 { 425 struct efx_nic *efx = tx_queue->efx; 426 unsigned entries; 427 428 entries = tx_queue->ptr_mask + 1; 429 return efx_alloc_special_buffer(efx, &tx_queue->txd, 430 entries * sizeof(efx_qword_t)); 431 } 432 433 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 434 { 435 struct efx_nic *efx = tx_queue->efx; 436 efx_oword_t reg; 437 438 /* Pin TX descriptor ring */ 439 efx_init_special_buffer(efx, &tx_queue->txd); 440 441 /* Push TX descriptor ring to card */ 442 EFX_POPULATE_OWORD_10(reg, 443 FRF_AZ_TX_DESCQ_EN, 1, 444 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 445 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 446 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 447 FRF_AZ_TX_DESCQ_EVQ_ID, 448 tx_queue->channel->channel, 449 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 450 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 451 FRF_AZ_TX_DESCQ_SIZE, 452 __ffs(tx_queue->txd.entries), 453 FRF_AZ_TX_DESCQ_TYPE, 0, 454 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 455 456 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 457 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 458 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 459 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 460 !csum); 461 } 462 463 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 464 tx_queue->queue); 465 466 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 467 /* Only 128 bits in this register */ 468 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 469 470 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 471 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 472 clear_bit_le(tx_queue->queue, (void *)®); 473 else 474 set_bit_le(tx_queue->queue, (void *)®); 475 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 476 } 477 478 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 479 EFX_POPULATE_OWORD_1(reg, 480 FRF_BZ_TX_PACE, 481 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 482 FFE_BZ_TX_PACE_OFF : 483 FFE_BZ_TX_PACE_RESERVED); 484 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 485 tx_queue->queue); 486 } 487 } 488 489 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 490 { 491 struct efx_nic *efx = tx_queue->efx; 492 efx_oword_t tx_flush_descq; 493 494 EFX_POPULATE_OWORD_2(tx_flush_descq, 495 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 496 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 497 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 498 } 499 500 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 501 { 502 struct efx_nic *efx = tx_queue->efx; 503 efx_oword_t tx_desc_ptr; 504 505 /* Remove TX descriptor ring from card */ 506 EFX_ZERO_OWORD(tx_desc_ptr); 507 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 508 tx_queue->queue); 509 510 /* Unpin TX descriptor ring */ 511 efx_fini_special_buffer(efx, &tx_queue->txd); 512 } 513 514 /* Free buffers backing TX queue */ 515 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 516 { 517 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 518 } 519 520 /************************************************************************** 521 * 522 * RX path 523 * 524 **************************************************************************/ 525 526 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 527 static inline efx_qword_t * 528 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 529 { 530 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 531 } 532 533 /* This creates an entry in the RX descriptor queue */ 534 static inline void 535 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 536 { 537 struct efx_rx_buffer *rx_buf; 538 efx_qword_t *rxd; 539 540 rxd = efx_rx_desc(rx_queue, index); 541 rx_buf = efx_rx_buffer(rx_queue, index); 542 EFX_POPULATE_QWORD_3(*rxd, 543 FSF_AZ_RX_KER_BUF_SIZE, 544 rx_buf->len - 545 rx_queue->efx->type->rx_buffer_padding, 546 FSF_AZ_RX_KER_BUF_REGION, 0, 547 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 548 } 549 550 /* This writes to the RX_DESC_WPTR register for the specified receive 551 * descriptor ring. 552 */ 553 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 554 { 555 struct efx_nic *efx = rx_queue->efx; 556 efx_dword_t reg; 557 unsigned write_ptr; 558 559 while (rx_queue->notified_count != rx_queue->added_count) { 560 efx_build_rx_desc( 561 rx_queue, 562 rx_queue->notified_count & rx_queue->ptr_mask); 563 ++rx_queue->notified_count; 564 } 565 566 wmb(); 567 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 568 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 569 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 570 efx_rx_queue_index(rx_queue)); 571 } 572 573 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 574 { 575 struct efx_nic *efx = rx_queue->efx; 576 unsigned entries; 577 578 entries = rx_queue->ptr_mask + 1; 579 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 580 entries * sizeof(efx_qword_t)); 581 } 582 583 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 584 { 585 efx_oword_t rx_desc_ptr; 586 struct efx_nic *efx = rx_queue->efx; 587 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 588 bool iscsi_digest_en = is_b0; 589 590 netif_dbg(efx, hw, efx->net_dev, 591 "RX queue %d ring in special buffers %d-%d\n", 592 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 593 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 594 595 /* Pin RX descriptor ring */ 596 efx_init_special_buffer(efx, &rx_queue->rxd); 597 598 /* Push RX descriptor ring to card */ 599 EFX_POPULATE_OWORD_10(rx_desc_ptr, 600 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 601 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 602 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 603 FRF_AZ_RX_DESCQ_EVQ_ID, 604 efx_rx_queue_channel(rx_queue)->channel, 605 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 606 FRF_AZ_RX_DESCQ_LABEL, 607 efx_rx_queue_index(rx_queue), 608 FRF_AZ_RX_DESCQ_SIZE, 609 __ffs(rx_queue->rxd.entries), 610 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 611 /* For >=B0 this is scatter so disable */ 612 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 613 FRF_AZ_RX_DESCQ_EN, 1); 614 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 615 efx_rx_queue_index(rx_queue)); 616 } 617 618 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 619 { 620 struct efx_nic *efx = rx_queue->efx; 621 efx_oword_t rx_flush_descq; 622 623 EFX_POPULATE_OWORD_2(rx_flush_descq, 624 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 625 FRF_AZ_RX_FLUSH_DESCQ, 626 efx_rx_queue_index(rx_queue)); 627 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 628 } 629 630 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 631 { 632 efx_oword_t rx_desc_ptr; 633 struct efx_nic *efx = rx_queue->efx; 634 635 /* Remove RX descriptor ring from card */ 636 EFX_ZERO_OWORD(rx_desc_ptr); 637 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 638 efx_rx_queue_index(rx_queue)); 639 640 /* Unpin RX descriptor ring */ 641 efx_fini_special_buffer(efx, &rx_queue->rxd); 642 } 643 644 /* Free buffers backing RX queue */ 645 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 646 { 647 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 648 } 649 650 /************************************************************************** 651 * 652 * Flush handling 653 * 654 **************************************************************************/ 655 656 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 657 * or more RX flushes can be kicked off. 658 */ 659 static bool efx_flush_wake(struct efx_nic *efx) 660 { 661 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 662 smp_mb(); 663 664 return (atomic_read(&efx->drain_pending) == 0 || 665 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 666 && atomic_read(&efx->rxq_flush_pending) > 0)); 667 } 668 669 /* Flush all the transmit queues, and continue flushing receive queues until 670 * they're all flushed. Wait for the DRAIN events to be recieved so that there 671 * are no more RX and TX events left on any channel. */ 672 int efx_nic_flush_queues(struct efx_nic *efx) 673 { 674 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 675 struct efx_channel *channel; 676 struct efx_rx_queue *rx_queue; 677 struct efx_tx_queue *tx_queue; 678 int rc = 0; 679 680 efx->type->prepare_flush(efx); 681 682 efx_for_each_channel(channel, efx) { 683 efx_for_each_channel_tx_queue(tx_queue, channel) { 684 atomic_inc(&efx->drain_pending); 685 efx_flush_tx_queue(tx_queue); 686 } 687 efx_for_each_channel_rx_queue(rx_queue, channel) { 688 atomic_inc(&efx->drain_pending); 689 rx_queue->flush_pending = true; 690 atomic_inc(&efx->rxq_flush_pending); 691 } 692 } 693 694 while (timeout && atomic_read(&efx->drain_pending) > 0) { 695 /* The hardware supports four concurrent rx flushes, each of 696 * which may need to be retried if there is an outstanding 697 * descriptor fetch 698 */ 699 efx_for_each_channel(channel, efx) { 700 efx_for_each_channel_rx_queue(rx_queue, channel) { 701 if (atomic_read(&efx->rxq_flush_outstanding) >= 702 EFX_RX_FLUSH_COUNT) 703 break; 704 705 if (rx_queue->flush_pending) { 706 rx_queue->flush_pending = false; 707 atomic_dec(&efx->rxq_flush_pending); 708 atomic_inc(&efx->rxq_flush_outstanding); 709 efx_flush_rx_queue(rx_queue); 710 } 711 } 712 } 713 714 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 715 timeout); 716 } 717 718 if (atomic_read(&efx->drain_pending)) { 719 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 720 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 721 atomic_read(&efx->rxq_flush_outstanding), 722 atomic_read(&efx->rxq_flush_pending)); 723 rc = -ETIMEDOUT; 724 725 atomic_set(&efx->drain_pending, 0); 726 atomic_set(&efx->rxq_flush_pending, 0); 727 atomic_set(&efx->rxq_flush_outstanding, 0); 728 } 729 730 return rc; 731 } 732 733 /************************************************************************** 734 * 735 * Event queue processing 736 * Event queues are processed by per-channel tasklets. 737 * 738 **************************************************************************/ 739 740 /* Update a channel's event queue's read pointer (RPTR) register 741 * 742 * This writes the EVQ_RPTR_REG register for the specified channel's 743 * event queue. 744 */ 745 void efx_nic_eventq_read_ack(struct efx_channel *channel) 746 { 747 efx_dword_t reg; 748 struct efx_nic *efx = channel->efx; 749 750 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 751 channel->eventq_read_ptr & channel->eventq_mask); 752 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, 753 channel->channel); 754 } 755 756 /* Use HW to insert a SW defined event */ 757 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 758 efx_qword_t *event) 759 { 760 efx_oword_t drv_ev_reg; 761 762 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 763 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 764 drv_ev_reg.u32[0] = event->u32[0]; 765 drv_ev_reg.u32[1] = event->u32[1]; 766 drv_ev_reg.u32[2] = 0; 767 drv_ev_reg.u32[3] = 0; 768 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 769 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 770 } 771 772 static void efx_magic_event(struct efx_channel *channel, u32 magic) 773 { 774 efx_qword_t event; 775 776 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 777 FSE_AZ_EV_CODE_DRV_GEN_EV, 778 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 779 efx_generate_event(channel->efx, channel->channel, &event); 780 } 781 782 /* Handle a transmit completion event 783 * 784 * The NIC batches TX completion events; the message we receive is of 785 * the form "complete all TX events up to this index". 786 */ 787 static int 788 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 789 { 790 unsigned int tx_ev_desc_ptr; 791 unsigned int tx_ev_q_label; 792 struct efx_tx_queue *tx_queue; 793 struct efx_nic *efx = channel->efx; 794 int tx_packets = 0; 795 796 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 797 return 0; 798 799 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 800 /* Transmit completion */ 801 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 802 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 803 tx_queue = efx_channel_get_tx_queue( 804 channel, tx_ev_q_label % EFX_TXQ_TYPES); 805 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 806 tx_queue->ptr_mask); 807 channel->irq_mod_score += tx_packets; 808 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 809 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 810 /* Rewrite the FIFO write pointer */ 811 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 812 tx_queue = efx_channel_get_tx_queue( 813 channel, tx_ev_q_label % EFX_TXQ_TYPES); 814 815 netif_tx_lock(efx->net_dev); 816 efx_notify_tx_desc(tx_queue); 817 netif_tx_unlock(efx->net_dev); 818 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 819 EFX_WORKAROUND_10727(efx)) { 820 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 821 } else { 822 netif_err(efx, tx_err, efx->net_dev, 823 "channel %d unexpected TX event " 824 EFX_QWORD_FMT"\n", channel->channel, 825 EFX_QWORD_VAL(*event)); 826 } 827 828 return tx_packets; 829 } 830 831 /* Detect errors included in the rx_evt_pkt_ok bit. */ 832 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 833 const efx_qword_t *event) 834 { 835 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 836 struct efx_nic *efx = rx_queue->efx; 837 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 838 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 839 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 840 bool rx_ev_other_err, rx_ev_pause_frm; 841 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 842 unsigned rx_ev_pkt_type; 843 844 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 845 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 846 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 847 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 848 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 849 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 850 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 851 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 852 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 853 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 854 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 855 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 856 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 857 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 858 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 859 860 /* Every error apart from tobe_disc and pause_frm */ 861 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 862 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 863 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 864 865 /* Count errors that are not in MAC stats. Ignore expected 866 * checksum errors during self-test. */ 867 if (rx_ev_frm_trunc) 868 ++channel->n_rx_frm_trunc; 869 else if (rx_ev_tobe_disc) 870 ++channel->n_rx_tobe_disc; 871 else if (!efx->loopback_selftest) { 872 if (rx_ev_ip_hdr_chksum_err) 873 ++channel->n_rx_ip_hdr_chksum_err; 874 else if (rx_ev_tcp_udp_chksum_err) 875 ++channel->n_rx_tcp_udp_chksum_err; 876 } 877 878 /* TOBE_DISC is expected on unicast mismatches; don't print out an 879 * error message. FRM_TRUNC indicates RXDP dropped the packet due 880 * to a FIFO overflow. 881 */ 882 #ifdef DEBUG 883 if (rx_ev_other_err && net_ratelimit()) { 884 netif_dbg(efx, rx_err, efx->net_dev, 885 " RX queue %d unexpected RX event " 886 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 887 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 888 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 889 rx_ev_ip_hdr_chksum_err ? 890 " [IP_HDR_CHKSUM_ERR]" : "", 891 rx_ev_tcp_udp_chksum_err ? 892 " [TCP_UDP_CHKSUM_ERR]" : "", 893 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 894 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 895 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 896 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 897 rx_ev_pause_frm ? " [PAUSE]" : ""); 898 } 899 #endif 900 901 /* The frame must be discarded if any of these are true. */ 902 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 903 rx_ev_tobe_disc | rx_ev_pause_frm) ? 904 EFX_RX_PKT_DISCARD : 0; 905 } 906 907 /* Handle receive events that are not in-order. */ 908 static void 909 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 910 { 911 struct efx_nic *efx = rx_queue->efx; 912 unsigned expected, dropped; 913 914 expected = rx_queue->removed_count & rx_queue->ptr_mask; 915 dropped = (index - expected) & rx_queue->ptr_mask; 916 netif_info(efx, rx_err, efx->net_dev, 917 "dropped %d events (index=%d expected=%d)\n", 918 dropped, index, expected); 919 920 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 921 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 922 } 923 924 /* Handle a packet received event 925 * 926 * The NIC gives a "discard" flag if it's a unicast packet with the 927 * wrong destination address 928 * Also "is multicast" and "matches multicast filter" flags can be used to 929 * discard non-matching multicast packets. 930 */ 931 static void 932 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 933 { 934 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 935 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 936 unsigned expected_ptr; 937 bool rx_ev_pkt_ok; 938 u16 flags; 939 struct efx_rx_queue *rx_queue; 940 struct efx_nic *efx = channel->efx; 941 942 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 943 return; 944 945 /* Basic packet information */ 946 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 947 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 948 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 949 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 950 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 951 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 952 channel->channel); 953 954 rx_queue = efx_channel_get_rx_queue(channel); 955 956 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 957 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 958 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 959 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 960 961 if (likely(rx_ev_pkt_ok)) { 962 /* If packet is marked as OK and packet type is TCP/IP or 963 * UDP/IP, then we can rely on the hardware checksum. 964 */ 965 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 966 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 967 EFX_RX_PKT_CSUMMED : 0; 968 } else { 969 flags = efx_handle_rx_not_ok(rx_queue, event); 970 } 971 972 /* Detect multicast packets that didn't match the filter */ 973 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 974 if (rx_ev_mcast_pkt) { 975 unsigned int rx_ev_mcast_hash_match = 976 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 977 978 if (unlikely(!rx_ev_mcast_hash_match)) { 979 ++channel->n_rx_mcast_mismatch; 980 flags |= EFX_RX_PKT_DISCARD; 981 } 982 } 983 984 channel->irq_mod_score += 2; 985 986 /* Handle received packet */ 987 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 988 } 989 990 /* If this flush done event corresponds to a &struct efx_tx_queue, then 991 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 992 * of all transmit completions. 993 */ 994 static void 995 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 996 { 997 struct efx_tx_queue *tx_queue; 998 int qid; 999 1000 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1001 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1002 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1003 qid % EFX_TXQ_TYPES); 1004 1005 efx_magic_event(tx_queue->channel, 1006 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1007 } 1008 } 1009 1010 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1011 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1012 * the RX queue back to the mask of RX queues in need of flushing. 1013 */ 1014 static void 1015 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1016 { 1017 struct efx_channel *channel; 1018 struct efx_rx_queue *rx_queue; 1019 int qid; 1020 bool failed; 1021 1022 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1023 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1024 if (qid >= efx->n_channels) 1025 return; 1026 channel = efx_get_channel(efx, qid); 1027 if (!efx_channel_has_rx_queue(channel)) 1028 return; 1029 rx_queue = efx_channel_get_rx_queue(channel); 1030 1031 if (failed) { 1032 netif_info(efx, hw, efx->net_dev, 1033 "RXQ %d flush retry\n", qid); 1034 rx_queue->flush_pending = true; 1035 atomic_inc(&efx->rxq_flush_pending); 1036 } else { 1037 efx_magic_event(efx_rx_queue_channel(rx_queue), 1038 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1039 } 1040 atomic_dec(&efx->rxq_flush_outstanding); 1041 if (efx_flush_wake(efx)) 1042 wake_up(&efx->flush_wq); 1043 } 1044 1045 static void 1046 efx_handle_drain_event(struct efx_channel *channel) 1047 { 1048 struct efx_nic *efx = channel->efx; 1049 1050 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1051 atomic_dec(&efx->drain_pending); 1052 if (efx_flush_wake(efx)) 1053 wake_up(&efx->flush_wq); 1054 } 1055 1056 static void 1057 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1058 { 1059 struct efx_nic *efx = channel->efx; 1060 struct efx_rx_queue *rx_queue = 1061 efx_channel_has_rx_queue(channel) ? 1062 efx_channel_get_rx_queue(channel) : NULL; 1063 unsigned magic, code; 1064 1065 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1066 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1067 1068 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1069 /* ignore */ 1070 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1071 /* The queue must be empty, so we won't receive any rx 1072 * events, so efx_process_channel() won't refill the 1073 * queue. Refill it here */ 1074 efx_fast_push_rx_descriptors(rx_queue); 1075 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1076 rx_queue->enabled = false; 1077 efx_handle_drain_event(channel); 1078 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1079 efx_handle_drain_event(channel); 1080 } else { 1081 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1082 "generated event "EFX_QWORD_FMT"\n", 1083 channel->channel, EFX_QWORD_VAL(*event)); 1084 } 1085 } 1086 1087 static void 1088 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1089 { 1090 struct efx_nic *efx = channel->efx; 1091 unsigned int ev_sub_code; 1092 unsigned int ev_sub_data; 1093 1094 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1095 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1096 1097 switch (ev_sub_code) { 1098 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1099 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1100 channel->channel, ev_sub_data); 1101 efx_handle_tx_flush_done(efx, event); 1102 break; 1103 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1104 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1105 channel->channel, ev_sub_data); 1106 efx_handle_rx_flush_done(efx, event); 1107 break; 1108 case FSE_AZ_EVQ_INIT_DONE_EV: 1109 netif_dbg(efx, hw, efx->net_dev, 1110 "channel %d EVQ %d initialised\n", 1111 channel->channel, ev_sub_data); 1112 break; 1113 case FSE_AZ_SRM_UPD_DONE_EV: 1114 netif_vdbg(efx, hw, efx->net_dev, 1115 "channel %d SRAM update done\n", channel->channel); 1116 break; 1117 case FSE_AZ_WAKE_UP_EV: 1118 netif_vdbg(efx, hw, efx->net_dev, 1119 "channel %d RXQ %d wakeup event\n", 1120 channel->channel, ev_sub_data); 1121 break; 1122 case FSE_AZ_TIMER_EV: 1123 netif_vdbg(efx, hw, efx->net_dev, 1124 "channel %d RX queue %d timer expired\n", 1125 channel->channel, ev_sub_data); 1126 break; 1127 case FSE_AA_RX_RECOVER_EV: 1128 netif_err(efx, rx_err, efx->net_dev, 1129 "channel %d seen DRIVER RX_RESET event. " 1130 "Resetting.\n", channel->channel); 1131 atomic_inc(&efx->rx_reset); 1132 efx_schedule_reset(efx, 1133 EFX_WORKAROUND_6555(efx) ? 1134 RESET_TYPE_RX_RECOVERY : 1135 RESET_TYPE_DISABLE); 1136 break; 1137 case FSE_BZ_RX_DSC_ERROR_EV: 1138 netif_err(efx, rx_err, efx->net_dev, 1139 "RX DMA Q %d reports descriptor fetch error." 1140 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 1141 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1142 break; 1143 case FSE_BZ_TX_DSC_ERROR_EV: 1144 netif_err(efx, tx_err, efx->net_dev, 1145 "TX DMA Q %d reports descriptor fetch error." 1146 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 1147 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1148 break; 1149 default: 1150 netif_vdbg(efx, hw, efx->net_dev, 1151 "channel %d unknown driver event code %d " 1152 "data %04x\n", channel->channel, ev_sub_code, 1153 ev_sub_data); 1154 break; 1155 } 1156 } 1157 1158 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1159 { 1160 struct efx_nic *efx = channel->efx; 1161 unsigned int read_ptr; 1162 efx_qword_t event, *p_event; 1163 int ev_code; 1164 int tx_packets = 0; 1165 int spent = 0; 1166 1167 read_ptr = channel->eventq_read_ptr; 1168 1169 for (;;) { 1170 p_event = efx_event(channel, read_ptr); 1171 event = *p_event; 1172 1173 if (!efx_event_present(&event)) 1174 /* End of events */ 1175 break; 1176 1177 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1178 "channel %d event is "EFX_QWORD_FMT"\n", 1179 channel->channel, EFX_QWORD_VAL(event)); 1180 1181 /* Clear this event by marking it all ones */ 1182 EFX_SET_QWORD(*p_event); 1183 1184 ++read_ptr; 1185 1186 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1187 1188 switch (ev_code) { 1189 case FSE_AZ_EV_CODE_RX_EV: 1190 efx_handle_rx_event(channel, &event); 1191 if (++spent == budget) 1192 goto out; 1193 break; 1194 case FSE_AZ_EV_CODE_TX_EV: 1195 tx_packets += efx_handle_tx_event(channel, &event); 1196 if (tx_packets > efx->txq_entries) { 1197 spent = budget; 1198 goto out; 1199 } 1200 break; 1201 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1202 efx_handle_generated_event(channel, &event); 1203 break; 1204 case FSE_AZ_EV_CODE_DRIVER_EV: 1205 efx_handle_driver_event(channel, &event); 1206 break; 1207 case FSE_CZ_EV_CODE_MCDI_EV: 1208 efx_mcdi_process_event(channel, &event); 1209 break; 1210 case FSE_AZ_EV_CODE_GLOBAL_EV: 1211 if (efx->type->handle_global_event && 1212 efx->type->handle_global_event(channel, &event)) 1213 break; 1214 /* else fall through */ 1215 default: 1216 netif_err(channel->efx, hw, channel->efx->net_dev, 1217 "channel %d unknown event type %d (data " 1218 EFX_QWORD_FMT ")\n", channel->channel, 1219 ev_code, EFX_QWORD_VAL(event)); 1220 } 1221 } 1222 1223 out: 1224 channel->eventq_read_ptr = read_ptr; 1225 return spent; 1226 } 1227 1228 /* Check whether an event is present in the eventq at the current 1229 * read pointer. Only useful for self-test. 1230 */ 1231 bool efx_nic_event_present(struct efx_channel *channel) 1232 { 1233 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1234 } 1235 1236 /* Allocate buffer table entries for event queue */ 1237 int efx_nic_probe_eventq(struct efx_channel *channel) 1238 { 1239 struct efx_nic *efx = channel->efx; 1240 unsigned entries; 1241 1242 entries = channel->eventq_mask + 1; 1243 return efx_alloc_special_buffer(efx, &channel->eventq, 1244 entries * sizeof(efx_qword_t)); 1245 } 1246 1247 void efx_nic_init_eventq(struct efx_channel *channel) 1248 { 1249 efx_oword_t reg; 1250 struct efx_nic *efx = channel->efx; 1251 1252 netif_dbg(efx, hw, efx->net_dev, 1253 "channel %d event queue in special buffers %d-%d\n", 1254 channel->channel, channel->eventq.index, 1255 channel->eventq.index + channel->eventq.entries - 1); 1256 1257 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1258 EFX_POPULATE_OWORD_3(reg, 1259 FRF_CZ_TIMER_Q_EN, 1, 1260 FRF_CZ_HOST_NOTIFY_MODE, 0, 1261 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1262 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1263 } 1264 1265 /* Pin event queue buffer */ 1266 efx_init_special_buffer(efx, &channel->eventq); 1267 1268 /* Fill event queue with all ones (i.e. empty events) */ 1269 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1270 1271 /* Push event queue to card */ 1272 EFX_POPULATE_OWORD_3(reg, 1273 FRF_AZ_EVQ_EN, 1, 1274 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1275 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1276 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1277 channel->channel); 1278 1279 efx->type->push_irq_moderation(channel); 1280 } 1281 1282 void efx_nic_fini_eventq(struct efx_channel *channel) 1283 { 1284 efx_oword_t reg; 1285 struct efx_nic *efx = channel->efx; 1286 1287 /* Remove event queue from card */ 1288 EFX_ZERO_OWORD(reg); 1289 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1290 channel->channel); 1291 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1292 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1293 1294 /* Unpin event queue */ 1295 efx_fini_special_buffer(efx, &channel->eventq); 1296 } 1297 1298 /* Free buffers backing event queue */ 1299 void efx_nic_remove_eventq(struct efx_channel *channel) 1300 { 1301 efx_free_special_buffer(channel->efx, &channel->eventq); 1302 } 1303 1304 1305 void efx_nic_generate_test_event(struct efx_channel *channel) 1306 { 1307 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1308 } 1309 1310 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1311 { 1312 efx_magic_event(efx_rx_queue_channel(rx_queue), 1313 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1314 } 1315 1316 /************************************************************************** 1317 * 1318 * Hardware interrupts 1319 * The hardware interrupt handler does very little work; all the event 1320 * queue processing is carried out by per-channel tasklets. 1321 * 1322 **************************************************************************/ 1323 1324 /* Enable/disable/generate interrupts */ 1325 static inline void efx_nic_interrupts(struct efx_nic *efx, 1326 bool enabled, bool force) 1327 { 1328 efx_oword_t int_en_reg_ker; 1329 1330 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1331 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1332 FRF_AZ_KER_INT_KER, force, 1333 FRF_AZ_DRV_INT_EN_KER, enabled); 1334 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1335 } 1336 1337 void efx_nic_enable_interrupts(struct efx_nic *efx) 1338 { 1339 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1340 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1341 1342 efx_nic_interrupts(efx, true, false); 1343 } 1344 1345 void efx_nic_disable_interrupts(struct efx_nic *efx) 1346 { 1347 /* Disable interrupts */ 1348 efx_nic_interrupts(efx, false, false); 1349 } 1350 1351 /* Generate a test interrupt 1352 * Interrupt must already have been enabled, otherwise nasty things 1353 * may happen. 1354 */ 1355 void efx_nic_generate_interrupt(struct efx_nic *efx) 1356 { 1357 efx_nic_interrupts(efx, true, true); 1358 } 1359 1360 /* Process a fatal interrupt 1361 * Disable bus mastering ASAP and schedule a reset 1362 */ 1363 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1364 { 1365 struct falcon_nic_data *nic_data = efx->nic_data; 1366 efx_oword_t *int_ker = efx->irq_status.addr; 1367 efx_oword_t fatal_intr; 1368 int error, mem_perr; 1369 1370 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1371 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1372 1373 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1374 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1375 EFX_OWORD_VAL(fatal_intr), 1376 error ? "disabling bus mastering" : "no recognised error"); 1377 1378 /* If this is a memory parity error dump which blocks are offending */ 1379 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1380 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1381 if (mem_perr) { 1382 efx_oword_t reg; 1383 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1384 netif_err(efx, hw, efx->net_dev, 1385 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1386 EFX_OWORD_VAL(reg)); 1387 } 1388 1389 /* Disable both devices */ 1390 pci_clear_master(efx->pci_dev); 1391 if (efx_nic_is_dual_func(efx)) 1392 pci_clear_master(nic_data->pci_dev2); 1393 efx_nic_disable_interrupts(efx); 1394 1395 /* Count errors and reset or disable the NIC accordingly */ 1396 if (efx->int_error_count == 0 || 1397 time_after(jiffies, efx->int_error_expire)) { 1398 efx->int_error_count = 0; 1399 efx->int_error_expire = 1400 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1401 } 1402 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1403 netif_err(efx, hw, efx->net_dev, 1404 "SYSTEM ERROR - reset scheduled\n"); 1405 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1406 } else { 1407 netif_err(efx, hw, efx->net_dev, 1408 "SYSTEM ERROR - max number of errors seen." 1409 "NIC will be disabled\n"); 1410 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1411 } 1412 1413 return IRQ_HANDLED; 1414 } 1415 1416 /* Handle a legacy interrupt 1417 * Acknowledges the interrupt and schedule event queue processing. 1418 */ 1419 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1420 { 1421 struct efx_nic *efx = dev_id; 1422 efx_oword_t *int_ker = efx->irq_status.addr; 1423 irqreturn_t result = IRQ_NONE; 1424 struct efx_channel *channel; 1425 efx_dword_t reg; 1426 u32 queues; 1427 int syserr; 1428 1429 /* Could this be ours? If interrupts are disabled then the 1430 * channel state may not be valid. 1431 */ 1432 if (!efx->legacy_irq_enabled) 1433 return result; 1434 1435 /* Read the ISR which also ACKs the interrupts */ 1436 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1437 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1438 1439 /* Handle non-event-queue sources */ 1440 if (queues & (1U << efx->irq_level)) { 1441 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1442 if (unlikely(syserr)) 1443 return efx_nic_fatal_interrupt(efx); 1444 efx->last_irq_cpu = raw_smp_processor_id(); 1445 } 1446 1447 if (queues != 0) { 1448 if (EFX_WORKAROUND_15783(efx)) 1449 efx->irq_zero_count = 0; 1450 1451 /* Schedule processing of any interrupting queues */ 1452 efx_for_each_channel(channel, efx) { 1453 if (queues & 1) 1454 efx_schedule_channel_irq(channel); 1455 queues >>= 1; 1456 } 1457 result = IRQ_HANDLED; 1458 1459 } else if (EFX_WORKAROUND_15783(efx)) { 1460 efx_qword_t *event; 1461 1462 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1463 * because this might be a shared interrupt. */ 1464 if (efx->irq_zero_count++ == 0) 1465 result = IRQ_HANDLED; 1466 1467 /* Ensure we schedule or rearm all event queues */ 1468 efx_for_each_channel(channel, efx) { 1469 event = efx_event(channel, channel->eventq_read_ptr); 1470 if (efx_event_present(event)) 1471 efx_schedule_channel_irq(channel); 1472 else 1473 efx_nic_eventq_read_ack(channel); 1474 } 1475 } 1476 1477 if (result == IRQ_HANDLED) 1478 netif_vdbg(efx, intr, efx->net_dev, 1479 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1480 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1481 1482 return result; 1483 } 1484 1485 /* Handle an MSI interrupt 1486 * 1487 * Handle an MSI hardware interrupt. This routine schedules event 1488 * queue processing. No interrupt acknowledgement cycle is necessary. 1489 * Also, we never need to check that the interrupt is for us, since 1490 * MSI interrupts cannot be shared. 1491 */ 1492 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1493 { 1494 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1495 struct efx_nic *efx = channel->efx; 1496 efx_oword_t *int_ker = efx->irq_status.addr; 1497 int syserr; 1498 1499 netif_vdbg(efx, intr, efx->net_dev, 1500 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1501 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1502 1503 /* Handle non-event-queue sources */ 1504 if (channel->channel == efx->irq_level) { 1505 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1506 if (unlikely(syserr)) 1507 return efx_nic_fatal_interrupt(efx); 1508 efx->last_irq_cpu = raw_smp_processor_id(); 1509 } 1510 1511 /* Schedule processing of the channel */ 1512 efx_schedule_channel_irq(channel); 1513 1514 return IRQ_HANDLED; 1515 } 1516 1517 1518 /* Setup RSS indirection table. 1519 * This maps from the hash value of the packet to RXQ 1520 */ 1521 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1522 { 1523 size_t i = 0; 1524 efx_dword_t dword; 1525 1526 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1527 return; 1528 1529 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1530 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1531 1532 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1533 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1534 efx->rx_indir_table[i]); 1535 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1536 } 1537 } 1538 1539 /* Hook interrupt handler(s) 1540 * Try MSI and then legacy interrupts. 1541 */ 1542 int efx_nic_init_interrupt(struct efx_nic *efx) 1543 { 1544 struct efx_channel *channel; 1545 int rc; 1546 1547 if (!EFX_INT_MODE_USE_MSI(efx)) { 1548 irq_handler_t handler; 1549 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1550 handler = efx_legacy_interrupt; 1551 else 1552 handler = falcon_legacy_interrupt_a1; 1553 1554 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1555 efx->name, efx); 1556 if (rc) { 1557 netif_err(efx, drv, efx->net_dev, 1558 "failed to hook legacy IRQ %d\n", 1559 efx->pci_dev->irq); 1560 goto fail1; 1561 } 1562 return 0; 1563 } 1564 1565 /* Hook MSI or MSI-X interrupt */ 1566 efx_for_each_channel(channel, efx) { 1567 rc = request_irq(channel->irq, efx_msi_interrupt, 1568 IRQF_PROBE_SHARED, /* Not shared */ 1569 efx->channel_name[channel->channel], 1570 &efx->channel[channel->channel]); 1571 if (rc) { 1572 netif_err(efx, drv, efx->net_dev, 1573 "failed to hook IRQ %d\n", channel->irq); 1574 goto fail2; 1575 } 1576 } 1577 1578 return 0; 1579 1580 fail2: 1581 efx_for_each_channel(channel, efx) 1582 free_irq(channel->irq, &efx->channel[channel->channel]); 1583 fail1: 1584 return rc; 1585 } 1586 1587 void efx_nic_fini_interrupt(struct efx_nic *efx) 1588 { 1589 struct efx_channel *channel; 1590 efx_oword_t reg; 1591 1592 /* Disable MSI/MSI-X interrupts */ 1593 efx_for_each_channel(channel, efx) { 1594 if (channel->irq) 1595 free_irq(channel->irq, &efx->channel[channel->channel]); 1596 } 1597 1598 /* ACK legacy interrupt */ 1599 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1600 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1601 else 1602 falcon_irq_ack_a1(efx); 1603 1604 /* Disable legacy interrupt */ 1605 if (efx->legacy_irq) 1606 free_irq(efx->legacy_irq, efx); 1607 } 1608 1609 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1610 { 1611 efx_oword_t altera_build; 1612 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1613 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1614 } 1615 1616 void efx_nic_init_common(struct efx_nic *efx) 1617 { 1618 efx_oword_t temp; 1619 1620 /* Set positions of descriptor caches in SRAM. */ 1621 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 1622 efx->type->tx_dc_base / 8); 1623 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1624 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, 1625 efx->type->rx_dc_base / 8); 1626 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1627 1628 /* Set TX descriptor cache size. */ 1629 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1630 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1631 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1632 1633 /* Set RX descriptor cache size. Set low watermark to size-8, as 1634 * this allows most efficient prefetching. 1635 */ 1636 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1637 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1638 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1639 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1640 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1641 1642 /* Program INT_KER address */ 1643 EFX_POPULATE_OWORD_2(temp, 1644 FRF_AZ_NORM_INT_VEC_DIS_KER, 1645 EFX_INT_MODE_USE_MSI(efx), 1646 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1647 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1648 1649 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1650 /* Use an interrupt level unused by event queues */ 1651 efx->irq_level = 0x1f; 1652 else 1653 /* Use a valid MSI-X vector */ 1654 efx->irq_level = 0; 1655 1656 /* Enable all the genuinely fatal interrupts. (They are still 1657 * masked by the overall interrupt mask, controlled by 1658 * falcon_interrupts()). 1659 * 1660 * Note: All other fatal interrupts are enabled 1661 */ 1662 EFX_POPULATE_OWORD_3(temp, 1663 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1664 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1665 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1666 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1667 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1668 EFX_INVERT_OWORD(temp); 1669 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1670 1671 efx_nic_push_rx_indir_table(efx); 1672 1673 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1674 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1675 */ 1676 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1677 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1678 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1679 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1680 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1681 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1682 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1683 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1684 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1685 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1686 /* Disable hardware watchdog which can misfire */ 1687 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1688 /* Squash TX of packets of 16 bytes or less */ 1689 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1690 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1691 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1692 1693 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1694 EFX_POPULATE_OWORD_4(temp, 1695 /* Default values */ 1696 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1697 FRF_BZ_TX_PACE_SB_AF, 0xb, 1698 FRF_BZ_TX_PACE_FB_BASE, 0, 1699 /* Allow large pace values in the 1700 * fast bin. */ 1701 FRF_BZ_TX_PACE_BIN_TH, 1702 FFE_BZ_TX_PACE_RESERVED); 1703 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1704 } 1705 } 1706 1707 /* Register dump */ 1708 1709 #define REGISTER_REVISION_A 1 1710 #define REGISTER_REVISION_B 2 1711 #define REGISTER_REVISION_C 3 1712 #define REGISTER_REVISION_Z 3 /* latest revision */ 1713 1714 struct efx_nic_reg { 1715 u32 offset:24; 1716 u32 min_revision:2, max_revision:2; 1717 }; 1718 1719 #define REGISTER(name, min_rev, max_rev) { \ 1720 FR_ ## min_rev ## max_rev ## _ ## name, \ 1721 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1722 } 1723 #define REGISTER_AA(name) REGISTER(name, A, A) 1724 #define REGISTER_AB(name) REGISTER(name, A, B) 1725 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1726 #define REGISTER_BB(name) REGISTER(name, B, B) 1727 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1728 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1729 1730 static const struct efx_nic_reg efx_nic_regs[] = { 1731 REGISTER_AZ(ADR_REGION), 1732 REGISTER_AZ(INT_EN_KER), 1733 REGISTER_BZ(INT_EN_CHAR), 1734 REGISTER_AZ(INT_ADR_KER), 1735 REGISTER_BZ(INT_ADR_CHAR), 1736 /* INT_ACK_KER is WO */ 1737 /* INT_ISR0 is RC */ 1738 REGISTER_AZ(HW_INIT), 1739 REGISTER_CZ(USR_EV_CFG), 1740 REGISTER_AB(EE_SPI_HCMD), 1741 REGISTER_AB(EE_SPI_HADR), 1742 REGISTER_AB(EE_SPI_HDATA), 1743 REGISTER_AB(EE_BASE_PAGE), 1744 REGISTER_AB(EE_VPD_CFG0), 1745 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1746 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1747 /* PCIE_CORE_INDIRECT is indirect */ 1748 REGISTER_AB(NIC_STAT), 1749 REGISTER_AB(GPIO_CTL), 1750 REGISTER_AB(GLB_CTL), 1751 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1752 REGISTER_BZ(DP_CTRL), 1753 REGISTER_AZ(MEM_STAT), 1754 REGISTER_AZ(CS_DEBUG), 1755 REGISTER_AZ(ALTERA_BUILD), 1756 REGISTER_AZ(CSR_SPARE), 1757 REGISTER_AB(PCIE_SD_CTL0123), 1758 REGISTER_AB(PCIE_SD_CTL45), 1759 REGISTER_AB(PCIE_PCS_CTL_STAT), 1760 /* DEBUG_DATA_OUT is not used */ 1761 /* DRV_EV is WO */ 1762 REGISTER_AZ(EVQ_CTL), 1763 REGISTER_AZ(EVQ_CNT1), 1764 REGISTER_AZ(EVQ_CNT2), 1765 REGISTER_AZ(BUF_TBL_CFG), 1766 REGISTER_AZ(SRM_RX_DC_CFG), 1767 REGISTER_AZ(SRM_TX_DC_CFG), 1768 REGISTER_AZ(SRM_CFG), 1769 /* BUF_TBL_UPD is WO */ 1770 REGISTER_AZ(SRM_UPD_EVQ), 1771 REGISTER_AZ(SRAM_PARITY), 1772 REGISTER_AZ(RX_CFG), 1773 REGISTER_BZ(RX_FILTER_CTL), 1774 /* RX_FLUSH_DESCQ is WO */ 1775 REGISTER_AZ(RX_DC_CFG), 1776 REGISTER_AZ(RX_DC_PF_WM), 1777 REGISTER_BZ(RX_RSS_TKEY), 1778 /* RX_NODESC_DROP is RC */ 1779 REGISTER_AA(RX_SELF_RST), 1780 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1781 REGISTER_CZ(RX_RSS_IPV6_REG1), 1782 REGISTER_CZ(RX_RSS_IPV6_REG2), 1783 REGISTER_CZ(RX_RSS_IPV6_REG3), 1784 /* TX_FLUSH_DESCQ is WO */ 1785 REGISTER_AZ(TX_DC_CFG), 1786 REGISTER_AA(TX_CHKSM_CFG), 1787 REGISTER_AZ(TX_CFG), 1788 /* TX_PUSH_DROP is not used */ 1789 REGISTER_AZ(TX_RESERVED), 1790 REGISTER_BZ(TX_PACE), 1791 /* TX_PACE_DROP_QID is RC */ 1792 REGISTER_BB(TX_VLAN), 1793 REGISTER_BZ(TX_IPFIL_PORTEN), 1794 REGISTER_AB(MD_TXD), 1795 REGISTER_AB(MD_RXD), 1796 REGISTER_AB(MD_CS), 1797 REGISTER_AB(MD_PHY_ADR), 1798 REGISTER_AB(MD_ID), 1799 /* MD_STAT is RC */ 1800 REGISTER_AB(MAC_STAT_DMA), 1801 REGISTER_AB(MAC_CTRL), 1802 REGISTER_BB(GEN_MODE), 1803 REGISTER_AB(MAC_MC_HASH_REG0), 1804 REGISTER_AB(MAC_MC_HASH_REG1), 1805 REGISTER_AB(GM_CFG1), 1806 REGISTER_AB(GM_CFG2), 1807 /* GM_IPG and GM_HD are not used */ 1808 REGISTER_AB(GM_MAX_FLEN), 1809 /* GM_TEST is not used */ 1810 REGISTER_AB(GM_ADR1), 1811 REGISTER_AB(GM_ADR2), 1812 REGISTER_AB(GMF_CFG0), 1813 REGISTER_AB(GMF_CFG1), 1814 REGISTER_AB(GMF_CFG2), 1815 REGISTER_AB(GMF_CFG3), 1816 REGISTER_AB(GMF_CFG4), 1817 REGISTER_AB(GMF_CFG5), 1818 REGISTER_BB(TX_SRC_MAC_CTL), 1819 REGISTER_AB(XM_ADR_LO), 1820 REGISTER_AB(XM_ADR_HI), 1821 REGISTER_AB(XM_GLB_CFG), 1822 REGISTER_AB(XM_TX_CFG), 1823 REGISTER_AB(XM_RX_CFG), 1824 REGISTER_AB(XM_MGT_INT_MASK), 1825 REGISTER_AB(XM_FC), 1826 REGISTER_AB(XM_PAUSE_TIME), 1827 REGISTER_AB(XM_TX_PARAM), 1828 REGISTER_AB(XM_RX_PARAM), 1829 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1830 REGISTER_AB(XX_PWR_RST), 1831 REGISTER_AB(XX_SD_CTL), 1832 REGISTER_AB(XX_TXDRV_CTL), 1833 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1834 /* XX_CORE_STAT is partly RC */ 1835 }; 1836 1837 struct efx_nic_reg_table { 1838 u32 offset:24; 1839 u32 min_revision:2, max_revision:2; 1840 u32 step:6, rows:21; 1841 }; 1842 1843 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1844 offset, \ 1845 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1846 step, rows \ 1847 } 1848 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1849 REGISTER_TABLE_DIMENSIONS( \ 1850 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1851 min_rev, max_rev, \ 1852 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1853 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1854 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1855 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1856 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1857 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1858 #define REGISTER_TABLE_BB_CZ(name) \ 1859 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1860 FR_BZ_ ## name ## _STEP, \ 1861 FR_BB_ ## name ## _ROWS), \ 1862 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1863 FR_BZ_ ## name ## _STEP, \ 1864 FR_CZ_ ## name ## _ROWS) 1865 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1866 1867 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1868 /* DRIVER is not used */ 1869 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1870 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1871 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1872 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1873 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1874 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1875 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1876 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1877 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1878 /* We can't reasonably read all of the buffer table (up to 8MB!). 1879 * However this driver will only use a few entries. Reading 1880 * 1K entries allows for some expansion of queue count and 1881 * size before we need to change the version. */ 1882 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1883 A, A, 8, 1024), 1884 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1885 B, Z, 8, 1024), 1886 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1887 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1888 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1889 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1890 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1891 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1892 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1893 /* MSIX_PBA_TABLE is not mapped */ 1894 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1895 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 1896 }; 1897 1898 size_t efx_nic_get_regs_len(struct efx_nic *efx) 1899 { 1900 const struct efx_nic_reg *reg; 1901 const struct efx_nic_reg_table *table; 1902 size_t len = 0; 1903 1904 for (reg = efx_nic_regs; 1905 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1906 reg++) 1907 if (efx->type->revision >= reg->min_revision && 1908 efx->type->revision <= reg->max_revision) 1909 len += sizeof(efx_oword_t); 1910 1911 for (table = efx_nic_reg_tables; 1912 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1913 table++) 1914 if (efx->type->revision >= table->min_revision && 1915 efx->type->revision <= table->max_revision) 1916 len += table->rows * min_t(size_t, table->step, 16); 1917 1918 return len; 1919 } 1920 1921 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 1922 { 1923 const struct efx_nic_reg *reg; 1924 const struct efx_nic_reg_table *table; 1925 1926 for (reg = efx_nic_regs; 1927 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1928 reg++) { 1929 if (efx->type->revision >= reg->min_revision && 1930 efx->type->revision <= reg->max_revision) { 1931 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 1932 buf += sizeof(efx_oword_t); 1933 } 1934 } 1935 1936 for (table = efx_nic_reg_tables; 1937 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1938 table++) { 1939 size_t size, i; 1940 1941 if (!(efx->type->revision >= table->min_revision && 1942 efx->type->revision <= table->max_revision)) 1943 continue; 1944 1945 size = min_t(size_t, table->step, 16); 1946 1947 for (i = 0; i < table->rows; i++) { 1948 switch (table->step) { 1949 case 4: /* 32-bit register or SRAM */ 1950 efx_readd_table(efx, buf, table->offset, i); 1951 break; 1952 case 8: /* 64-bit SRAM */ 1953 efx_sram_readq(efx, 1954 efx->membase + table->offset, 1955 buf, i); 1956 break; 1957 case 16: /* 128-bit register */ 1958 efx_reado_table(efx, buf, table->offset, i); 1959 break; 1960 case 32: /* 128-bit register, interleaved */ 1961 efx_reado_table(efx, buf, table->offset, 2 * i); 1962 break; 1963 default: 1964 WARN_ON(1); 1965 return; 1966 } 1967 buf += size; 1968 } 1969 } 1970 } 1971