1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times 53 */ 54 #define EFX_FLUSH_INTERVAL 10 55 #define EFX_FLUSH_POLL_COUNT 100 56 57 /* Depth of RX flush request fifo */ 58 #define EFX_RX_FLUSH_COUNT 4 59 60 /* Driver generated events */ 61 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 62 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 63 64 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 65 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 66 67 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 69 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 71 efx_rx_queue_index(_rx_queue)) 72 73 /************************************************************************** 74 * 75 * Solarstorm hardware access 76 * 77 **************************************************************************/ 78 79 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 80 unsigned int index) 81 { 82 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 83 value, index); 84 } 85 86 /* Read the current event from the event queue */ 87 static inline efx_qword_t *efx_event(struct efx_channel *channel, 88 unsigned int index) 89 { 90 return ((efx_qword_t *) (channel->eventq.addr)) + 91 (index & channel->eventq_mask); 92 } 93 94 /* See if an event is present 95 * 96 * We check both the high and low dword of the event for all ones. We 97 * wrote all ones when we cleared the event, and no valid event can 98 * have all ones in either its high or low dwords. This approach is 99 * robust against reordering. 100 * 101 * Note that using a single 64-bit comparison is incorrect; even 102 * though the CPU read will be atomic, the DMA write may not be. 103 */ 104 static inline int efx_event_present(efx_qword_t *event) 105 { 106 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 107 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 108 } 109 110 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 111 const efx_oword_t *mask) 112 { 113 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 114 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 115 } 116 117 int efx_nic_test_registers(struct efx_nic *efx, 118 const struct efx_nic_register_test *regs, 119 size_t n_regs) 120 { 121 unsigned address = 0, i, j; 122 efx_oword_t mask, imask, original, reg, buf; 123 124 /* Falcon should be in loopback to isolate the XMAC from the PHY */ 125 WARN_ON(!LOOPBACK_INTERNAL(efx)); 126 127 for (i = 0; i < n_regs; ++i) { 128 address = regs[i].address; 129 mask = imask = regs[i].mask; 130 EFX_INVERT_OWORD(imask); 131 132 efx_reado(efx, &original, address); 133 134 /* bit sweep on and off */ 135 for (j = 0; j < 128; j++) { 136 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 137 continue; 138 139 /* Test this testable bit can be set in isolation */ 140 EFX_AND_OWORD(reg, original, mask); 141 EFX_SET_OWORD32(reg, j, j, 1); 142 143 efx_writeo(efx, ®, address); 144 efx_reado(efx, &buf, address); 145 146 if (efx_masked_compare_oword(®, &buf, &mask)) 147 goto fail; 148 149 /* Test this testable bit can be cleared in isolation */ 150 EFX_OR_OWORD(reg, original, mask); 151 EFX_SET_OWORD32(reg, j, j, 0); 152 153 efx_writeo(efx, ®, address); 154 efx_reado(efx, &buf, address); 155 156 if (efx_masked_compare_oword(®, &buf, &mask)) 157 goto fail; 158 } 159 160 efx_writeo(efx, &original, address); 161 } 162 163 return 0; 164 165 fail: 166 netif_err(efx, hw, efx->net_dev, 167 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 168 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 169 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 170 return -EIO; 171 } 172 173 /************************************************************************** 174 * 175 * Special buffer handling 176 * Special buffers are used for event queues and the TX and RX 177 * descriptor rings. 178 * 179 *************************************************************************/ 180 181 /* 182 * Initialise a special buffer 183 * 184 * This will define a buffer (previously allocated via 185 * efx_alloc_special_buffer()) in the buffer table, allowing 186 * it to be used for event queues, descriptor rings etc. 187 */ 188 static void 189 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 190 { 191 efx_qword_t buf_desc; 192 int index; 193 dma_addr_t dma_addr; 194 int i; 195 196 EFX_BUG_ON_PARANOID(!buffer->addr); 197 198 /* Write buffer descriptors to NIC */ 199 for (i = 0; i < buffer->entries; i++) { 200 index = buffer->index + i; 201 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 202 netif_dbg(efx, probe, efx->net_dev, 203 "mapping special buffer %d at %llx\n", 204 index, (unsigned long long)dma_addr); 205 EFX_POPULATE_QWORD_3(buf_desc, 206 FRF_AZ_BUF_ADR_REGION, 0, 207 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 208 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 209 efx_write_buf_tbl(efx, &buf_desc, index); 210 } 211 } 212 213 /* Unmaps a buffer and clears the buffer table entries */ 214 static void 215 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 216 { 217 efx_oword_t buf_tbl_upd; 218 unsigned int start = buffer->index; 219 unsigned int end = (buffer->index + buffer->entries - 1); 220 221 if (!buffer->entries) 222 return; 223 224 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 225 buffer->index, buffer->index + buffer->entries - 1); 226 227 EFX_POPULATE_OWORD_4(buf_tbl_upd, 228 FRF_AZ_BUF_UPD_CMD, 0, 229 FRF_AZ_BUF_CLR_CMD, 1, 230 FRF_AZ_BUF_CLR_END_ID, end, 231 FRF_AZ_BUF_CLR_START_ID, start); 232 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 233 } 234 235 /* 236 * Allocate a new special buffer 237 * 238 * This allocates memory for a new buffer, clears it and allocates a 239 * new buffer ID range. It does not write into the buffer table. 240 * 241 * This call will allocate 4KB buffers, since 8KB buffers can't be 242 * used for event queues and descriptor rings. 243 */ 244 static int efx_alloc_special_buffer(struct efx_nic *efx, 245 struct efx_special_buffer *buffer, 246 unsigned int len) 247 { 248 len = ALIGN(len, EFX_BUF_SIZE); 249 250 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 251 &buffer->dma_addr, GFP_KERNEL); 252 if (!buffer->addr) 253 return -ENOMEM; 254 buffer->len = len; 255 buffer->entries = len / EFX_BUF_SIZE; 256 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 257 258 /* All zeros is a potentially valid event so memset to 0xff */ 259 memset(buffer->addr, 0xff, len); 260 261 /* Select new buffer ID */ 262 buffer->index = efx->next_buffer_table; 263 efx->next_buffer_table += buffer->entries; 264 265 netif_dbg(efx, probe, efx->net_dev, 266 "allocating special buffers %d-%d at %llx+%x " 267 "(virt %p phys %llx)\n", buffer->index, 268 buffer->index + buffer->entries - 1, 269 (u64)buffer->dma_addr, len, 270 buffer->addr, (u64)virt_to_phys(buffer->addr)); 271 272 return 0; 273 } 274 275 static void 276 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 277 { 278 if (!buffer->addr) 279 return; 280 281 netif_dbg(efx, hw, efx->net_dev, 282 "deallocating special buffers %d-%d at %llx+%x " 283 "(virt %p phys %llx)\n", buffer->index, 284 buffer->index + buffer->entries - 1, 285 (u64)buffer->dma_addr, buffer->len, 286 buffer->addr, (u64)virt_to_phys(buffer->addr)); 287 288 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 289 buffer->dma_addr); 290 buffer->addr = NULL; 291 buffer->entries = 0; 292 } 293 294 /************************************************************************** 295 * 296 * Generic buffer handling 297 * These buffers are used for interrupt status and MAC stats 298 * 299 **************************************************************************/ 300 301 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 302 unsigned int len) 303 { 304 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 305 &buffer->dma_addr); 306 if (!buffer->addr) 307 return -ENOMEM; 308 buffer->len = len; 309 memset(buffer->addr, 0, len); 310 return 0; 311 } 312 313 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 314 { 315 if (buffer->addr) { 316 pci_free_consistent(efx->pci_dev, buffer->len, 317 buffer->addr, buffer->dma_addr); 318 buffer->addr = NULL; 319 } 320 } 321 322 /************************************************************************** 323 * 324 * TX path 325 * 326 **************************************************************************/ 327 328 /* Returns a pointer to the specified transmit descriptor in the TX 329 * descriptor queue belonging to the specified channel. 330 */ 331 static inline efx_qword_t * 332 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 333 { 334 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 335 } 336 337 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 338 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 339 { 340 unsigned write_ptr; 341 efx_dword_t reg; 342 343 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 344 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 345 efx_writed_page(tx_queue->efx, ®, 346 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 347 } 348 349 /* Write pointer and first descriptor for TX descriptor ring */ 350 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 351 const efx_qword_t *txd) 352 { 353 unsigned write_ptr; 354 efx_oword_t reg; 355 356 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 357 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 358 359 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 360 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 361 FRF_AZ_TX_DESC_WPTR, write_ptr); 362 reg.qword[0] = *txd; 363 efx_writeo_page(tx_queue->efx, ®, 364 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 365 } 366 367 static inline bool 368 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 369 { 370 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 371 372 if (empty_read_count == 0) 373 return false; 374 375 tx_queue->empty_read_count = 0; 376 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 377 } 378 379 /* For each entry inserted into the software descriptor ring, create a 380 * descriptor in the hardware TX descriptor ring (in host memory), and 381 * write a doorbell. 382 */ 383 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 384 { 385 386 struct efx_tx_buffer *buffer; 387 efx_qword_t *txd; 388 unsigned write_ptr; 389 unsigned old_write_count = tx_queue->write_count; 390 391 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 392 393 do { 394 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 395 buffer = &tx_queue->buffer[write_ptr]; 396 txd = efx_tx_desc(tx_queue, write_ptr); 397 ++tx_queue->write_count; 398 399 /* Create TX descriptor ring entry */ 400 EFX_POPULATE_QWORD_4(*txd, 401 FSF_AZ_TX_KER_CONT, buffer->continuation, 402 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 403 FSF_AZ_TX_KER_BUF_REGION, 0, 404 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 405 } while (tx_queue->write_count != tx_queue->insert_count); 406 407 wmb(); /* Ensure descriptors are written before they are fetched */ 408 409 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 410 txd = efx_tx_desc(tx_queue, 411 old_write_count & tx_queue->ptr_mask); 412 efx_push_tx_desc(tx_queue, txd); 413 ++tx_queue->pushes; 414 } else { 415 efx_notify_tx_desc(tx_queue); 416 } 417 } 418 419 /* Allocate hardware resources for a TX queue */ 420 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 421 { 422 struct efx_nic *efx = tx_queue->efx; 423 unsigned entries; 424 425 entries = tx_queue->ptr_mask + 1; 426 return efx_alloc_special_buffer(efx, &tx_queue->txd, 427 entries * sizeof(efx_qword_t)); 428 } 429 430 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 431 { 432 struct efx_nic *efx = tx_queue->efx; 433 efx_oword_t reg; 434 435 tx_queue->flushed = FLUSH_NONE; 436 437 /* Pin TX descriptor ring */ 438 efx_init_special_buffer(efx, &tx_queue->txd); 439 440 /* Push TX descriptor ring to card */ 441 EFX_POPULATE_OWORD_10(reg, 442 FRF_AZ_TX_DESCQ_EN, 1, 443 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 444 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 445 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 446 FRF_AZ_TX_DESCQ_EVQ_ID, 447 tx_queue->channel->channel, 448 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 449 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 450 FRF_AZ_TX_DESCQ_SIZE, 451 __ffs(tx_queue->txd.entries), 452 FRF_AZ_TX_DESCQ_TYPE, 0, 453 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 454 455 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 456 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 457 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 458 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 459 !csum); 460 } 461 462 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 463 tx_queue->queue); 464 465 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 466 /* Only 128 bits in this register */ 467 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 468 469 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 470 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 471 clear_bit_le(tx_queue->queue, (void *)®); 472 else 473 set_bit_le(tx_queue->queue, (void *)®); 474 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 475 } 476 477 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 478 EFX_POPULATE_OWORD_1(reg, 479 FRF_BZ_TX_PACE, 480 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 481 FFE_BZ_TX_PACE_OFF : 482 FFE_BZ_TX_PACE_RESERVED); 483 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 484 tx_queue->queue); 485 } 486 } 487 488 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 489 { 490 struct efx_nic *efx = tx_queue->efx; 491 efx_oword_t tx_flush_descq; 492 493 tx_queue->flushed = FLUSH_PENDING; 494 495 /* Post a flush command */ 496 EFX_POPULATE_OWORD_2(tx_flush_descq, 497 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 498 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 499 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 500 } 501 502 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 503 { 504 struct efx_nic *efx = tx_queue->efx; 505 efx_oword_t tx_desc_ptr; 506 507 /* The queue should have been flushed */ 508 WARN_ON(tx_queue->flushed != FLUSH_DONE); 509 510 /* Remove TX descriptor ring from card */ 511 EFX_ZERO_OWORD(tx_desc_ptr); 512 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 513 tx_queue->queue); 514 515 /* Unpin TX descriptor ring */ 516 efx_fini_special_buffer(efx, &tx_queue->txd); 517 } 518 519 /* Free buffers backing TX queue */ 520 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 521 { 522 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 523 } 524 525 /************************************************************************** 526 * 527 * RX path 528 * 529 **************************************************************************/ 530 531 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 532 static inline efx_qword_t * 533 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 534 { 535 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 536 } 537 538 /* This creates an entry in the RX descriptor queue */ 539 static inline void 540 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 541 { 542 struct efx_rx_buffer *rx_buf; 543 efx_qword_t *rxd; 544 545 rxd = efx_rx_desc(rx_queue, index); 546 rx_buf = efx_rx_buffer(rx_queue, index); 547 EFX_POPULATE_QWORD_3(*rxd, 548 FSF_AZ_RX_KER_BUF_SIZE, 549 rx_buf->len - 550 rx_queue->efx->type->rx_buffer_padding, 551 FSF_AZ_RX_KER_BUF_REGION, 0, 552 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 553 } 554 555 /* This writes to the RX_DESC_WPTR register for the specified receive 556 * descriptor ring. 557 */ 558 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 559 { 560 struct efx_nic *efx = rx_queue->efx; 561 efx_dword_t reg; 562 unsigned write_ptr; 563 564 while (rx_queue->notified_count != rx_queue->added_count) { 565 efx_build_rx_desc( 566 rx_queue, 567 rx_queue->notified_count & rx_queue->ptr_mask); 568 ++rx_queue->notified_count; 569 } 570 571 wmb(); 572 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 573 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 574 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 575 efx_rx_queue_index(rx_queue)); 576 } 577 578 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 579 { 580 struct efx_nic *efx = rx_queue->efx; 581 unsigned entries; 582 583 entries = rx_queue->ptr_mask + 1; 584 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 585 entries * sizeof(efx_qword_t)); 586 } 587 588 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 589 { 590 efx_oword_t rx_desc_ptr; 591 struct efx_nic *efx = rx_queue->efx; 592 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 593 bool iscsi_digest_en = is_b0; 594 595 netif_dbg(efx, hw, efx->net_dev, 596 "RX queue %d ring in special buffers %d-%d\n", 597 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 598 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 599 600 rx_queue->flushed = FLUSH_NONE; 601 602 /* Pin RX descriptor ring */ 603 efx_init_special_buffer(efx, &rx_queue->rxd); 604 605 /* Push RX descriptor ring to card */ 606 EFX_POPULATE_OWORD_10(rx_desc_ptr, 607 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 608 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 609 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 610 FRF_AZ_RX_DESCQ_EVQ_ID, 611 efx_rx_queue_channel(rx_queue)->channel, 612 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 613 FRF_AZ_RX_DESCQ_LABEL, 614 efx_rx_queue_index(rx_queue), 615 FRF_AZ_RX_DESCQ_SIZE, 616 __ffs(rx_queue->rxd.entries), 617 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 618 /* For >=B0 this is scatter so disable */ 619 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 620 FRF_AZ_RX_DESCQ_EN, 1); 621 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 622 efx_rx_queue_index(rx_queue)); 623 } 624 625 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 626 { 627 struct efx_nic *efx = rx_queue->efx; 628 efx_oword_t rx_flush_descq; 629 630 rx_queue->flushed = FLUSH_PENDING; 631 632 /* Post a flush command */ 633 EFX_POPULATE_OWORD_2(rx_flush_descq, 634 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 635 FRF_AZ_RX_FLUSH_DESCQ, 636 efx_rx_queue_index(rx_queue)); 637 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 638 } 639 640 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 641 { 642 efx_oword_t rx_desc_ptr; 643 struct efx_nic *efx = rx_queue->efx; 644 645 /* The queue should already have been flushed */ 646 WARN_ON(rx_queue->flushed != FLUSH_DONE); 647 648 /* Remove RX descriptor ring from card */ 649 EFX_ZERO_OWORD(rx_desc_ptr); 650 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 651 efx_rx_queue_index(rx_queue)); 652 653 /* Unpin RX descriptor ring */ 654 efx_fini_special_buffer(efx, &rx_queue->rxd); 655 } 656 657 /* Free buffers backing RX queue */ 658 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 659 { 660 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 661 } 662 663 /************************************************************************** 664 * 665 * Event queue processing 666 * Event queues are processed by per-channel tasklets. 667 * 668 **************************************************************************/ 669 670 /* Update a channel's event queue's read pointer (RPTR) register 671 * 672 * This writes the EVQ_RPTR_REG register for the specified channel's 673 * event queue. 674 */ 675 void efx_nic_eventq_read_ack(struct efx_channel *channel) 676 { 677 efx_dword_t reg; 678 struct efx_nic *efx = channel->efx; 679 680 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 681 channel->eventq_read_ptr & channel->eventq_mask); 682 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, 683 channel->channel); 684 } 685 686 /* Use HW to insert a SW defined event */ 687 static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 688 { 689 efx_oword_t drv_ev_reg; 690 691 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 692 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 693 drv_ev_reg.u32[0] = event->u32[0]; 694 drv_ev_reg.u32[1] = event->u32[1]; 695 drv_ev_reg.u32[2] = 0; 696 drv_ev_reg.u32[3] = 0; 697 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); 698 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); 699 } 700 701 static void efx_magic_event(struct efx_channel *channel, u32 magic) 702 { 703 efx_qword_t event; 704 705 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 706 FSE_AZ_EV_CODE_DRV_GEN_EV, 707 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 708 efx_generate_event(channel, &event); 709 } 710 711 /* Handle a transmit completion event 712 * 713 * The NIC batches TX completion events; the message we receive is of 714 * the form "complete all TX events up to this index". 715 */ 716 static int 717 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 718 { 719 unsigned int tx_ev_desc_ptr; 720 unsigned int tx_ev_q_label; 721 struct efx_tx_queue *tx_queue; 722 struct efx_nic *efx = channel->efx; 723 int tx_packets = 0; 724 725 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 726 /* Transmit completion */ 727 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 728 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 729 tx_queue = efx_channel_get_tx_queue( 730 channel, tx_ev_q_label % EFX_TXQ_TYPES); 731 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 732 tx_queue->ptr_mask); 733 channel->irq_mod_score += tx_packets; 734 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 735 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 736 /* Rewrite the FIFO write pointer */ 737 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 738 tx_queue = efx_channel_get_tx_queue( 739 channel, tx_ev_q_label % EFX_TXQ_TYPES); 740 741 netif_tx_lock(efx->net_dev); 742 efx_notify_tx_desc(tx_queue); 743 netif_tx_unlock(efx->net_dev); 744 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 745 EFX_WORKAROUND_10727(efx)) { 746 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 747 } else { 748 netif_err(efx, tx_err, efx->net_dev, 749 "channel %d unexpected TX event " 750 EFX_QWORD_FMT"\n", channel->channel, 751 EFX_QWORD_VAL(*event)); 752 } 753 754 return tx_packets; 755 } 756 757 /* Detect errors included in the rx_evt_pkt_ok bit. */ 758 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 759 const efx_qword_t *event) 760 { 761 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 762 struct efx_nic *efx = rx_queue->efx; 763 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 764 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 765 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 766 bool rx_ev_other_err, rx_ev_pause_frm; 767 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 768 unsigned rx_ev_pkt_type; 769 770 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 771 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 772 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 773 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 774 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 775 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 776 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 777 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 778 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 779 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 780 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 781 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 782 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 783 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 784 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 785 786 /* Every error apart from tobe_disc and pause_frm */ 787 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 788 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 789 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 790 791 /* Count errors that are not in MAC stats. Ignore expected 792 * checksum errors during self-test. */ 793 if (rx_ev_frm_trunc) 794 ++channel->n_rx_frm_trunc; 795 else if (rx_ev_tobe_disc) 796 ++channel->n_rx_tobe_disc; 797 else if (!efx->loopback_selftest) { 798 if (rx_ev_ip_hdr_chksum_err) 799 ++channel->n_rx_ip_hdr_chksum_err; 800 else if (rx_ev_tcp_udp_chksum_err) 801 ++channel->n_rx_tcp_udp_chksum_err; 802 } 803 804 /* TOBE_DISC is expected on unicast mismatches; don't print out an 805 * error message. FRM_TRUNC indicates RXDP dropped the packet due 806 * to a FIFO overflow. 807 */ 808 #ifdef DEBUG 809 if (rx_ev_other_err && net_ratelimit()) { 810 netif_dbg(efx, rx_err, efx->net_dev, 811 " RX queue %d unexpected RX event " 812 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 813 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 814 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 815 rx_ev_ip_hdr_chksum_err ? 816 " [IP_HDR_CHKSUM_ERR]" : "", 817 rx_ev_tcp_udp_chksum_err ? 818 " [TCP_UDP_CHKSUM_ERR]" : "", 819 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 820 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 821 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 822 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 823 rx_ev_pause_frm ? " [PAUSE]" : ""); 824 } 825 #endif 826 827 /* The frame must be discarded if any of these are true. */ 828 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 829 rx_ev_tobe_disc | rx_ev_pause_frm) ? 830 EFX_RX_PKT_DISCARD : 0; 831 } 832 833 /* Handle receive events that are not in-order. */ 834 static void 835 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 836 { 837 struct efx_nic *efx = rx_queue->efx; 838 unsigned expected, dropped; 839 840 expected = rx_queue->removed_count & rx_queue->ptr_mask; 841 dropped = (index - expected) & rx_queue->ptr_mask; 842 netif_info(efx, rx_err, efx->net_dev, 843 "dropped %d events (index=%d expected=%d)\n", 844 dropped, index, expected); 845 846 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 847 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 848 } 849 850 /* Handle a packet received event 851 * 852 * The NIC gives a "discard" flag if it's a unicast packet with the 853 * wrong destination address 854 * Also "is multicast" and "matches multicast filter" flags can be used to 855 * discard non-matching multicast packets. 856 */ 857 static void 858 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 859 { 860 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 861 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 862 unsigned expected_ptr; 863 bool rx_ev_pkt_ok; 864 u16 flags; 865 struct efx_rx_queue *rx_queue; 866 867 /* Basic packet information */ 868 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 869 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 870 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 871 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 872 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 873 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 874 channel->channel); 875 876 rx_queue = efx_channel_get_rx_queue(channel); 877 878 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 879 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 880 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 881 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 882 883 if (likely(rx_ev_pkt_ok)) { 884 /* If packet is marked as OK and packet type is TCP/IP or 885 * UDP/IP, then we can rely on the hardware checksum. 886 */ 887 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 888 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 889 EFX_RX_PKT_CSUMMED : 0; 890 } else { 891 flags = efx_handle_rx_not_ok(rx_queue, event); 892 } 893 894 /* Detect multicast packets that didn't match the filter */ 895 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 896 if (rx_ev_mcast_pkt) { 897 unsigned int rx_ev_mcast_hash_match = 898 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 899 900 if (unlikely(!rx_ev_mcast_hash_match)) { 901 ++channel->n_rx_mcast_mismatch; 902 flags |= EFX_RX_PKT_DISCARD; 903 } 904 } 905 906 channel->irq_mod_score += 2; 907 908 /* Handle received packet */ 909 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 910 } 911 912 static void 913 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 914 { 915 struct efx_nic *efx = channel->efx; 916 struct efx_rx_queue *rx_queue = 917 efx_channel_has_rx_queue(channel) ? 918 efx_channel_get_rx_queue(channel) : NULL; 919 unsigned magic; 920 921 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 922 923 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) 924 ; /* ignore */ 925 else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) 926 /* The queue must be empty, so we won't receive any rx 927 * events, so efx_process_channel() won't refill the 928 * queue. Refill it here */ 929 efx_fast_push_rx_descriptors(rx_queue); 930 else 931 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 932 "generated event "EFX_QWORD_FMT"\n", 933 channel->channel, EFX_QWORD_VAL(*event)); 934 } 935 936 static void 937 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 938 { 939 struct efx_nic *efx = channel->efx; 940 unsigned int ev_sub_code; 941 unsigned int ev_sub_data; 942 943 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 944 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 945 946 switch (ev_sub_code) { 947 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 948 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 949 channel->channel, ev_sub_data); 950 break; 951 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 952 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 953 channel->channel, ev_sub_data); 954 break; 955 case FSE_AZ_EVQ_INIT_DONE_EV: 956 netif_dbg(efx, hw, efx->net_dev, 957 "channel %d EVQ %d initialised\n", 958 channel->channel, ev_sub_data); 959 break; 960 case FSE_AZ_SRM_UPD_DONE_EV: 961 netif_vdbg(efx, hw, efx->net_dev, 962 "channel %d SRAM update done\n", channel->channel); 963 break; 964 case FSE_AZ_WAKE_UP_EV: 965 netif_vdbg(efx, hw, efx->net_dev, 966 "channel %d RXQ %d wakeup event\n", 967 channel->channel, ev_sub_data); 968 break; 969 case FSE_AZ_TIMER_EV: 970 netif_vdbg(efx, hw, efx->net_dev, 971 "channel %d RX queue %d timer expired\n", 972 channel->channel, ev_sub_data); 973 break; 974 case FSE_AA_RX_RECOVER_EV: 975 netif_err(efx, rx_err, efx->net_dev, 976 "channel %d seen DRIVER RX_RESET event. " 977 "Resetting.\n", channel->channel); 978 atomic_inc(&efx->rx_reset); 979 efx_schedule_reset(efx, 980 EFX_WORKAROUND_6555(efx) ? 981 RESET_TYPE_RX_RECOVERY : 982 RESET_TYPE_DISABLE); 983 break; 984 case FSE_BZ_RX_DSC_ERROR_EV: 985 netif_err(efx, rx_err, efx->net_dev, 986 "RX DMA Q %d reports descriptor fetch error." 987 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 988 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 989 break; 990 case FSE_BZ_TX_DSC_ERROR_EV: 991 netif_err(efx, tx_err, efx->net_dev, 992 "TX DMA Q %d reports descriptor fetch error." 993 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 994 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 995 break; 996 default: 997 netif_vdbg(efx, hw, efx->net_dev, 998 "channel %d unknown driver event code %d " 999 "data %04x\n", channel->channel, ev_sub_code, 1000 ev_sub_data); 1001 break; 1002 } 1003 } 1004 1005 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1006 { 1007 struct efx_nic *efx = channel->efx; 1008 unsigned int read_ptr; 1009 efx_qword_t event, *p_event; 1010 int ev_code; 1011 int tx_packets = 0; 1012 int spent = 0; 1013 1014 read_ptr = channel->eventq_read_ptr; 1015 1016 for (;;) { 1017 p_event = efx_event(channel, read_ptr); 1018 event = *p_event; 1019 1020 if (!efx_event_present(&event)) 1021 /* End of events */ 1022 break; 1023 1024 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1025 "channel %d event is "EFX_QWORD_FMT"\n", 1026 channel->channel, EFX_QWORD_VAL(event)); 1027 1028 /* Clear this event by marking it all ones */ 1029 EFX_SET_QWORD(*p_event); 1030 1031 ++read_ptr; 1032 1033 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1034 1035 switch (ev_code) { 1036 case FSE_AZ_EV_CODE_RX_EV: 1037 efx_handle_rx_event(channel, &event); 1038 if (++spent == budget) 1039 goto out; 1040 break; 1041 case FSE_AZ_EV_CODE_TX_EV: 1042 tx_packets += efx_handle_tx_event(channel, &event); 1043 if (tx_packets > efx->txq_entries) { 1044 spent = budget; 1045 goto out; 1046 } 1047 break; 1048 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1049 efx_handle_generated_event(channel, &event); 1050 break; 1051 case FSE_AZ_EV_CODE_DRIVER_EV: 1052 efx_handle_driver_event(channel, &event); 1053 break; 1054 case FSE_CZ_EV_CODE_MCDI_EV: 1055 efx_mcdi_process_event(channel, &event); 1056 break; 1057 case FSE_AZ_EV_CODE_GLOBAL_EV: 1058 if (efx->type->handle_global_event && 1059 efx->type->handle_global_event(channel, &event)) 1060 break; 1061 /* else fall through */ 1062 default: 1063 netif_err(channel->efx, hw, channel->efx->net_dev, 1064 "channel %d unknown event type %d (data " 1065 EFX_QWORD_FMT ")\n", channel->channel, 1066 ev_code, EFX_QWORD_VAL(event)); 1067 } 1068 } 1069 1070 out: 1071 channel->eventq_read_ptr = read_ptr; 1072 return spent; 1073 } 1074 1075 /* Check whether an event is present in the eventq at the current 1076 * read pointer. Only useful for self-test. 1077 */ 1078 bool efx_nic_event_present(struct efx_channel *channel) 1079 { 1080 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1081 } 1082 1083 /* Allocate buffer table entries for event queue */ 1084 int efx_nic_probe_eventq(struct efx_channel *channel) 1085 { 1086 struct efx_nic *efx = channel->efx; 1087 unsigned entries; 1088 1089 entries = channel->eventq_mask + 1; 1090 return efx_alloc_special_buffer(efx, &channel->eventq, 1091 entries * sizeof(efx_qword_t)); 1092 } 1093 1094 void efx_nic_init_eventq(struct efx_channel *channel) 1095 { 1096 efx_oword_t reg; 1097 struct efx_nic *efx = channel->efx; 1098 1099 netif_dbg(efx, hw, efx->net_dev, 1100 "channel %d event queue in special buffers %d-%d\n", 1101 channel->channel, channel->eventq.index, 1102 channel->eventq.index + channel->eventq.entries - 1); 1103 1104 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1105 EFX_POPULATE_OWORD_3(reg, 1106 FRF_CZ_TIMER_Q_EN, 1, 1107 FRF_CZ_HOST_NOTIFY_MODE, 0, 1108 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1109 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1110 } 1111 1112 /* Pin event queue buffer */ 1113 efx_init_special_buffer(efx, &channel->eventq); 1114 1115 /* Fill event queue with all ones (i.e. empty events) */ 1116 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1117 1118 /* Push event queue to card */ 1119 EFX_POPULATE_OWORD_3(reg, 1120 FRF_AZ_EVQ_EN, 1, 1121 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1122 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1123 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1124 channel->channel); 1125 1126 efx->type->push_irq_moderation(channel); 1127 } 1128 1129 void efx_nic_fini_eventq(struct efx_channel *channel) 1130 { 1131 efx_oword_t reg; 1132 struct efx_nic *efx = channel->efx; 1133 1134 /* Remove event queue from card */ 1135 EFX_ZERO_OWORD(reg); 1136 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1137 channel->channel); 1138 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1139 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1140 1141 /* Unpin event queue */ 1142 efx_fini_special_buffer(efx, &channel->eventq); 1143 } 1144 1145 /* Free buffers backing event queue */ 1146 void efx_nic_remove_eventq(struct efx_channel *channel) 1147 { 1148 efx_free_special_buffer(channel->efx, &channel->eventq); 1149 } 1150 1151 1152 void efx_nic_generate_test_event(struct efx_channel *channel) 1153 { 1154 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1155 } 1156 1157 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1158 { 1159 efx_magic_event(efx_rx_queue_channel(rx_queue), 1160 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1161 } 1162 1163 /************************************************************************** 1164 * 1165 * Flush handling 1166 * 1167 **************************************************************************/ 1168 1169 1170 static void efx_poll_flush_events(struct efx_nic *efx) 1171 { 1172 struct efx_channel *channel = efx_get_channel(efx, 0); 1173 struct efx_tx_queue *tx_queue; 1174 struct efx_rx_queue *rx_queue; 1175 unsigned int read_ptr = channel->eventq_read_ptr; 1176 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; 1177 1178 do { 1179 efx_qword_t *event = efx_event(channel, read_ptr); 1180 int ev_code, ev_sub_code, ev_queue; 1181 bool ev_failed; 1182 1183 if (!efx_event_present(event)) 1184 break; 1185 1186 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); 1187 ev_sub_code = EFX_QWORD_FIELD(*event, 1188 FSF_AZ_DRIVER_EV_SUBCODE); 1189 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1190 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1191 ev_queue = EFX_QWORD_FIELD(*event, 1192 FSF_AZ_DRIVER_EV_SUBDATA); 1193 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1194 tx_queue = efx_get_tx_queue( 1195 efx, ev_queue / EFX_TXQ_TYPES, 1196 ev_queue % EFX_TXQ_TYPES); 1197 tx_queue->flushed = FLUSH_DONE; 1198 } 1199 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1200 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { 1201 ev_queue = EFX_QWORD_FIELD( 1202 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1203 ev_failed = EFX_QWORD_FIELD( 1204 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1205 if (ev_queue < efx->n_rx_channels) { 1206 rx_queue = efx_get_rx_queue(efx, ev_queue); 1207 rx_queue->flushed = 1208 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1209 } 1210 } 1211 1212 /* We're about to destroy the queue anyway, so 1213 * it's ok to throw away every non-flush event */ 1214 EFX_SET_QWORD(*event); 1215 1216 ++read_ptr; 1217 } while (read_ptr != end_ptr); 1218 1219 channel->eventq_read_ptr = read_ptr; 1220 } 1221 1222 /* Handle tx and rx flushes at the same time, since they run in 1223 * parallel in the hardware and there's no reason for us to 1224 * serialise them */ 1225 int efx_nic_flush_queues(struct efx_nic *efx) 1226 { 1227 struct efx_channel *channel; 1228 struct efx_rx_queue *rx_queue; 1229 struct efx_tx_queue *tx_queue; 1230 int i, tx_pending, rx_pending; 1231 1232 /* If necessary prepare the hardware for flushing */ 1233 efx->type->prepare_flush(efx); 1234 1235 /* Flush all tx queues in parallel */ 1236 efx_for_each_channel(channel, efx) { 1237 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1238 if (tx_queue->initialised) 1239 efx_flush_tx_queue(tx_queue); 1240 } 1241 } 1242 1243 /* The hardware supports four concurrent rx flushes, each of which may 1244 * need to be retried if there is an outstanding descriptor fetch */ 1245 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1246 rx_pending = tx_pending = 0; 1247 efx_for_each_channel(channel, efx) { 1248 efx_for_each_channel_rx_queue(rx_queue, channel) { 1249 if (rx_queue->flushed == FLUSH_PENDING) 1250 ++rx_pending; 1251 } 1252 } 1253 efx_for_each_channel(channel, efx) { 1254 efx_for_each_channel_rx_queue(rx_queue, channel) { 1255 if (rx_pending == EFX_RX_FLUSH_COUNT) 1256 break; 1257 if (rx_queue->flushed == FLUSH_FAILED || 1258 rx_queue->flushed == FLUSH_NONE) { 1259 efx_flush_rx_queue(rx_queue); 1260 ++rx_pending; 1261 } 1262 } 1263 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1264 if (tx_queue->initialised && 1265 tx_queue->flushed != FLUSH_DONE) 1266 ++tx_pending; 1267 } 1268 } 1269 1270 if (rx_pending == 0 && tx_pending == 0) 1271 return 0; 1272 1273 msleep(EFX_FLUSH_INTERVAL); 1274 efx_poll_flush_events(efx); 1275 } 1276 1277 /* Mark the queues as all flushed. We're going to return failure 1278 * leading to a reset, or fake up success anyway */ 1279 efx_for_each_channel(channel, efx) { 1280 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1281 if (tx_queue->initialised && 1282 tx_queue->flushed != FLUSH_DONE) 1283 netif_err(efx, hw, efx->net_dev, 1284 "tx queue %d flush command timed out\n", 1285 tx_queue->queue); 1286 tx_queue->flushed = FLUSH_DONE; 1287 } 1288 efx_for_each_channel_rx_queue(rx_queue, channel) { 1289 if (rx_queue->flushed != FLUSH_DONE) 1290 netif_err(efx, hw, efx->net_dev, 1291 "rx queue %d flush command timed out\n", 1292 efx_rx_queue_index(rx_queue)); 1293 rx_queue->flushed = FLUSH_DONE; 1294 } 1295 } 1296 1297 return -ETIMEDOUT; 1298 } 1299 1300 /************************************************************************** 1301 * 1302 * Hardware interrupts 1303 * The hardware interrupt handler does very little work; all the event 1304 * queue processing is carried out by per-channel tasklets. 1305 * 1306 **************************************************************************/ 1307 1308 /* Enable/disable/generate interrupts */ 1309 static inline void efx_nic_interrupts(struct efx_nic *efx, 1310 bool enabled, bool force) 1311 { 1312 efx_oword_t int_en_reg_ker; 1313 1314 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1315 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1316 FRF_AZ_KER_INT_KER, force, 1317 FRF_AZ_DRV_INT_EN_KER, enabled); 1318 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1319 } 1320 1321 void efx_nic_enable_interrupts(struct efx_nic *efx) 1322 { 1323 struct efx_channel *channel; 1324 1325 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1326 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1327 1328 /* Enable interrupts */ 1329 efx_nic_interrupts(efx, true, false); 1330 1331 /* Force processing of all the channels to get the EVQ RPTRs up to 1332 date */ 1333 efx_for_each_channel(channel, efx) 1334 efx_schedule_channel(channel); 1335 } 1336 1337 void efx_nic_disable_interrupts(struct efx_nic *efx) 1338 { 1339 /* Disable interrupts */ 1340 efx_nic_interrupts(efx, false, false); 1341 } 1342 1343 /* Generate a test interrupt 1344 * Interrupt must already have been enabled, otherwise nasty things 1345 * may happen. 1346 */ 1347 void efx_nic_generate_interrupt(struct efx_nic *efx) 1348 { 1349 efx_nic_interrupts(efx, true, true); 1350 } 1351 1352 /* Process a fatal interrupt 1353 * Disable bus mastering ASAP and schedule a reset 1354 */ 1355 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1356 { 1357 struct falcon_nic_data *nic_data = efx->nic_data; 1358 efx_oword_t *int_ker = efx->irq_status.addr; 1359 efx_oword_t fatal_intr; 1360 int error, mem_perr; 1361 1362 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1363 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1364 1365 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1366 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1367 EFX_OWORD_VAL(fatal_intr), 1368 error ? "disabling bus mastering" : "no recognised error"); 1369 1370 /* If this is a memory parity error dump which blocks are offending */ 1371 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1372 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1373 if (mem_perr) { 1374 efx_oword_t reg; 1375 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1376 netif_err(efx, hw, efx->net_dev, 1377 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1378 EFX_OWORD_VAL(reg)); 1379 } 1380 1381 /* Disable both devices */ 1382 pci_clear_master(efx->pci_dev); 1383 if (efx_nic_is_dual_func(efx)) 1384 pci_clear_master(nic_data->pci_dev2); 1385 efx_nic_disable_interrupts(efx); 1386 1387 /* Count errors and reset or disable the NIC accordingly */ 1388 if (efx->int_error_count == 0 || 1389 time_after(jiffies, efx->int_error_expire)) { 1390 efx->int_error_count = 0; 1391 efx->int_error_expire = 1392 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1393 } 1394 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1395 netif_err(efx, hw, efx->net_dev, 1396 "SYSTEM ERROR - reset scheduled\n"); 1397 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1398 } else { 1399 netif_err(efx, hw, efx->net_dev, 1400 "SYSTEM ERROR - max number of errors seen." 1401 "NIC will be disabled\n"); 1402 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1403 } 1404 1405 return IRQ_HANDLED; 1406 } 1407 1408 /* Handle a legacy interrupt 1409 * Acknowledges the interrupt and schedule event queue processing. 1410 */ 1411 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1412 { 1413 struct efx_nic *efx = dev_id; 1414 efx_oword_t *int_ker = efx->irq_status.addr; 1415 irqreturn_t result = IRQ_NONE; 1416 struct efx_channel *channel; 1417 efx_dword_t reg; 1418 u32 queues; 1419 int syserr; 1420 1421 /* Could this be ours? If interrupts are disabled then the 1422 * channel state may not be valid. 1423 */ 1424 if (!efx->legacy_irq_enabled) 1425 return result; 1426 1427 /* Read the ISR which also ACKs the interrupts */ 1428 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1429 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1430 1431 /* Handle non-event-queue sources */ 1432 if (queues & (1U << efx->irq_level)) { 1433 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1434 if (unlikely(syserr)) 1435 return efx_nic_fatal_interrupt(efx); 1436 efx->last_irq_cpu = raw_smp_processor_id(); 1437 } 1438 1439 if (queues != 0) { 1440 if (EFX_WORKAROUND_15783(efx)) 1441 efx->irq_zero_count = 0; 1442 1443 /* Schedule processing of any interrupting queues */ 1444 efx_for_each_channel(channel, efx) { 1445 if (queues & 1) 1446 efx_schedule_channel_irq(channel); 1447 queues >>= 1; 1448 } 1449 result = IRQ_HANDLED; 1450 1451 } else if (EFX_WORKAROUND_15783(efx)) { 1452 efx_qword_t *event; 1453 1454 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1455 * because this might be a shared interrupt. */ 1456 if (efx->irq_zero_count++ == 0) 1457 result = IRQ_HANDLED; 1458 1459 /* Ensure we schedule or rearm all event queues */ 1460 efx_for_each_channel(channel, efx) { 1461 event = efx_event(channel, channel->eventq_read_ptr); 1462 if (efx_event_present(event)) 1463 efx_schedule_channel_irq(channel); 1464 else 1465 efx_nic_eventq_read_ack(channel); 1466 } 1467 } 1468 1469 if (result == IRQ_HANDLED) 1470 netif_vdbg(efx, intr, efx->net_dev, 1471 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1472 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1473 1474 return result; 1475 } 1476 1477 /* Handle an MSI interrupt 1478 * 1479 * Handle an MSI hardware interrupt. This routine schedules event 1480 * queue processing. No interrupt acknowledgement cycle is necessary. 1481 * Also, we never need to check that the interrupt is for us, since 1482 * MSI interrupts cannot be shared. 1483 */ 1484 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1485 { 1486 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1487 struct efx_nic *efx = channel->efx; 1488 efx_oword_t *int_ker = efx->irq_status.addr; 1489 int syserr; 1490 1491 netif_vdbg(efx, intr, efx->net_dev, 1492 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1493 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1494 1495 /* Handle non-event-queue sources */ 1496 if (channel->channel == efx->irq_level) { 1497 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1498 if (unlikely(syserr)) 1499 return efx_nic_fatal_interrupt(efx); 1500 efx->last_irq_cpu = raw_smp_processor_id(); 1501 } 1502 1503 /* Schedule processing of the channel */ 1504 efx_schedule_channel_irq(channel); 1505 1506 return IRQ_HANDLED; 1507 } 1508 1509 1510 /* Setup RSS indirection table. 1511 * This maps from the hash value of the packet to RXQ 1512 */ 1513 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1514 { 1515 size_t i = 0; 1516 efx_dword_t dword; 1517 1518 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1519 return; 1520 1521 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1522 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1523 1524 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1525 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1526 efx->rx_indir_table[i]); 1527 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1528 } 1529 } 1530 1531 /* Hook interrupt handler(s) 1532 * Try MSI and then legacy interrupts. 1533 */ 1534 int efx_nic_init_interrupt(struct efx_nic *efx) 1535 { 1536 struct efx_channel *channel; 1537 int rc; 1538 1539 if (!EFX_INT_MODE_USE_MSI(efx)) { 1540 irq_handler_t handler; 1541 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1542 handler = efx_legacy_interrupt; 1543 else 1544 handler = falcon_legacy_interrupt_a1; 1545 1546 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1547 efx->name, efx); 1548 if (rc) { 1549 netif_err(efx, drv, efx->net_dev, 1550 "failed to hook legacy IRQ %d\n", 1551 efx->pci_dev->irq); 1552 goto fail1; 1553 } 1554 return 0; 1555 } 1556 1557 /* Hook MSI or MSI-X interrupt */ 1558 efx_for_each_channel(channel, efx) { 1559 rc = request_irq(channel->irq, efx_msi_interrupt, 1560 IRQF_PROBE_SHARED, /* Not shared */ 1561 efx->channel_name[channel->channel], 1562 &efx->channel[channel->channel]); 1563 if (rc) { 1564 netif_err(efx, drv, efx->net_dev, 1565 "failed to hook IRQ %d\n", channel->irq); 1566 goto fail2; 1567 } 1568 } 1569 1570 return 0; 1571 1572 fail2: 1573 efx_for_each_channel(channel, efx) 1574 free_irq(channel->irq, &efx->channel[channel->channel]); 1575 fail1: 1576 return rc; 1577 } 1578 1579 void efx_nic_fini_interrupt(struct efx_nic *efx) 1580 { 1581 struct efx_channel *channel; 1582 efx_oword_t reg; 1583 1584 /* Disable MSI/MSI-X interrupts */ 1585 efx_for_each_channel(channel, efx) { 1586 if (channel->irq) 1587 free_irq(channel->irq, &efx->channel[channel->channel]); 1588 } 1589 1590 /* ACK legacy interrupt */ 1591 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1592 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1593 else 1594 falcon_irq_ack_a1(efx); 1595 1596 /* Disable legacy interrupt */ 1597 if (efx->legacy_irq) 1598 free_irq(efx->legacy_irq, efx); 1599 } 1600 1601 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1602 { 1603 efx_oword_t altera_build; 1604 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1605 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1606 } 1607 1608 void efx_nic_init_common(struct efx_nic *efx) 1609 { 1610 efx_oword_t temp; 1611 1612 /* Set positions of descriptor caches in SRAM. */ 1613 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 1614 efx->type->tx_dc_base / 8); 1615 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1616 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, 1617 efx->type->rx_dc_base / 8); 1618 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1619 1620 /* Set TX descriptor cache size. */ 1621 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1622 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1623 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1624 1625 /* Set RX descriptor cache size. Set low watermark to size-8, as 1626 * this allows most efficient prefetching. 1627 */ 1628 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1629 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1630 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1631 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1632 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1633 1634 /* Program INT_KER address */ 1635 EFX_POPULATE_OWORD_2(temp, 1636 FRF_AZ_NORM_INT_VEC_DIS_KER, 1637 EFX_INT_MODE_USE_MSI(efx), 1638 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1639 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1640 1641 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1642 /* Use an interrupt level unused by event queues */ 1643 efx->irq_level = 0x1f; 1644 else 1645 /* Use a valid MSI-X vector */ 1646 efx->irq_level = 0; 1647 1648 /* Enable all the genuinely fatal interrupts. (They are still 1649 * masked by the overall interrupt mask, controlled by 1650 * falcon_interrupts()). 1651 * 1652 * Note: All other fatal interrupts are enabled 1653 */ 1654 EFX_POPULATE_OWORD_3(temp, 1655 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1656 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1657 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1658 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1659 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1660 EFX_INVERT_OWORD(temp); 1661 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1662 1663 efx_nic_push_rx_indir_table(efx); 1664 1665 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1666 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1667 */ 1668 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1669 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1671 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1673 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1674 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1675 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1676 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1677 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1678 /* Disable hardware watchdog which can misfire */ 1679 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1680 /* Squash TX of packets of 16 bytes or less */ 1681 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1682 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1683 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1684 1685 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1686 EFX_POPULATE_OWORD_4(temp, 1687 /* Default values */ 1688 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1689 FRF_BZ_TX_PACE_SB_AF, 0xb, 1690 FRF_BZ_TX_PACE_FB_BASE, 0, 1691 /* Allow large pace values in the 1692 * fast bin. */ 1693 FRF_BZ_TX_PACE_BIN_TH, 1694 FFE_BZ_TX_PACE_RESERVED); 1695 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1696 } 1697 } 1698 1699 /* Register dump */ 1700 1701 #define REGISTER_REVISION_A 1 1702 #define REGISTER_REVISION_B 2 1703 #define REGISTER_REVISION_C 3 1704 #define REGISTER_REVISION_Z 3 /* latest revision */ 1705 1706 struct efx_nic_reg { 1707 u32 offset:24; 1708 u32 min_revision:2, max_revision:2; 1709 }; 1710 1711 #define REGISTER(name, min_rev, max_rev) { \ 1712 FR_ ## min_rev ## max_rev ## _ ## name, \ 1713 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1714 } 1715 #define REGISTER_AA(name) REGISTER(name, A, A) 1716 #define REGISTER_AB(name) REGISTER(name, A, B) 1717 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1718 #define REGISTER_BB(name) REGISTER(name, B, B) 1719 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1720 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1721 1722 static const struct efx_nic_reg efx_nic_regs[] = { 1723 REGISTER_AZ(ADR_REGION), 1724 REGISTER_AZ(INT_EN_KER), 1725 REGISTER_BZ(INT_EN_CHAR), 1726 REGISTER_AZ(INT_ADR_KER), 1727 REGISTER_BZ(INT_ADR_CHAR), 1728 /* INT_ACK_KER is WO */ 1729 /* INT_ISR0 is RC */ 1730 REGISTER_AZ(HW_INIT), 1731 REGISTER_CZ(USR_EV_CFG), 1732 REGISTER_AB(EE_SPI_HCMD), 1733 REGISTER_AB(EE_SPI_HADR), 1734 REGISTER_AB(EE_SPI_HDATA), 1735 REGISTER_AB(EE_BASE_PAGE), 1736 REGISTER_AB(EE_VPD_CFG0), 1737 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1738 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1739 /* PCIE_CORE_INDIRECT is indirect */ 1740 REGISTER_AB(NIC_STAT), 1741 REGISTER_AB(GPIO_CTL), 1742 REGISTER_AB(GLB_CTL), 1743 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1744 REGISTER_BZ(DP_CTRL), 1745 REGISTER_AZ(MEM_STAT), 1746 REGISTER_AZ(CS_DEBUG), 1747 REGISTER_AZ(ALTERA_BUILD), 1748 REGISTER_AZ(CSR_SPARE), 1749 REGISTER_AB(PCIE_SD_CTL0123), 1750 REGISTER_AB(PCIE_SD_CTL45), 1751 REGISTER_AB(PCIE_PCS_CTL_STAT), 1752 /* DEBUG_DATA_OUT is not used */ 1753 /* DRV_EV is WO */ 1754 REGISTER_AZ(EVQ_CTL), 1755 REGISTER_AZ(EVQ_CNT1), 1756 REGISTER_AZ(EVQ_CNT2), 1757 REGISTER_AZ(BUF_TBL_CFG), 1758 REGISTER_AZ(SRM_RX_DC_CFG), 1759 REGISTER_AZ(SRM_TX_DC_CFG), 1760 REGISTER_AZ(SRM_CFG), 1761 /* BUF_TBL_UPD is WO */ 1762 REGISTER_AZ(SRM_UPD_EVQ), 1763 REGISTER_AZ(SRAM_PARITY), 1764 REGISTER_AZ(RX_CFG), 1765 REGISTER_BZ(RX_FILTER_CTL), 1766 /* RX_FLUSH_DESCQ is WO */ 1767 REGISTER_AZ(RX_DC_CFG), 1768 REGISTER_AZ(RX_DC_PF_WM), 1769 REGISTER_BZ(RX_RSS_TKEY), 1770 /* RX_NODESC_DROP is RC */ 1771 REGISTER_AA(RX_SELF_RST), 1772 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1773 REGISTER_CZ(RX_RSS_IPV6_REG1), 1774 REGISTER_CZ(RX_RSS_IPV6_REG2), 1775 REGISTER_CZ(RX_RSS_IPV6_REG3), 1776 /* TX_FLUSH_DESCQ is WO */ 1777 REGISTER_AZ(TX_DC_CFG), 1778 REGISTER_AA(TX_CHKSM_CFG), 1779 REGISTER_AZ(TX_CFG), 1780 /* TX_PUSH_DROP is not used */ 1781 REGISTER_AZ(TX_RESERVED), 1782 REGISTER_BZ(TX_PACE), 1783 /* TX_PACE_DROP_QID is RC */ 1784 REGISTER_BB(TX_VLAN), 1785 REGISTER_BZ(TX_IPFIL_PORTEN), 1786 REGISTER_AB(MD_TXD), 1787 REGISTER_AB(MD_RXD), 1788 REGISTER_AB(MD_CS), 1789 REGISTER_AB(MD_PHY_ADR), 1790 REGISTER_AB(MD_ID), 1791 /* MD_STAT is RC */ 1792 REGISTER_AB(MAC_STAT_DMA), 1793 REGISTER_AB(MAC_CTRL), 1794 REGISTER_BB(GEN_MODE), 1795 REGISTER_AB(MAC_MC_HASH_REG0), 1796 REGISTER_AB(MAC_MC_HASH_REG1), 1797 REGISTER_AB(GM_CFG1), 1798 REGISTER_AB(GM_CFG2), 1799 /* GM_IPG and GM_HD are not used */ 1800 REGISTER_AB(GM_MAX_FLEN), 1801 /* GM_TEST is not used */ 1802 REGISTER_AB(GM_ADR1), 1803 REGISTER_AB(GM_ADR2), 1804 REGISTER_AB(GMF_CFG0), 1805 REGISTER_AB(GMF_CFG1), 1806 REGISTER_AB(GMF_CFG2), 1807 REGISTER_AB(GMF_CFG3), 1808 REGISTER_AB(GMF_CFG4), 1809 REGISTER_AB(GMF_CFG5), 1810 REGISTER_BB(TX_SRC_MAC_CTL), 1811 REGISTER_AB(XM_ADR_LO), 1812 REGISTER_AB(XM_ADR_HI), 1813 REGISTER_AB(XM_GLB_CFG), 1814 REGISTER_AB(XM_TX_CFG), 1815 REGISTER_AB(XM_RX_CFG), 1816 REGISTER_AB(XM_MGT_INT_MASK), 1817 REGISTER_AB(XM_FC), 1818 REGISTER_AB(XM_PAUSE_TIME), 1819 REGISTER_AB(XM_TX_PARAM), 1820 REGISTER_AB(XM_RX_PARAM), 1821 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1822 REGISTER_AB(XX_PWR_RST), 1823 REGISTER_AB(XX_SD_CTL), 1824 REGISTER_AB(XX_TXDRV_CTL), 1825 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1826 /* XX_CORE_STAT is partly RC */ 1827 }; 1828 1829 struct efx_nic_reg_table { 1830 u32 offset:24; 1831 u32 min_revision:2, max_revision:2; 1832 u32 step:6, rows:21; 1833 }; 1834 1835 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1836 offset, \ 1837 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1838 step, rows \ 1839 } 1840 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1841 REGISTER_TABLE_DIMENSIONS( \ 1842 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1843 min_rev, max_rev, \ 1844 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1845 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1846 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1847 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1848 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1849 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1850 #define REGISTER_TABLE_BB_CZ(name) \ 1851 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1852 FR_BZ_ ## name ## _STEP, \ 1853 FR_BB_ ## name ## _ROWS), \ 1854 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1855 FR_BZ_ ## name ## _STEP, \ 1856 FR_CZ_ ## name ## _ROWS) 1857 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1858 1859 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1860 /* DRIVER is not used */ 1861 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1862 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1863 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1864 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1865 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1866 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1867 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1868 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1869 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1870 /* We can't reasonably read all of the buffer table (up to 8MB!). 1871 * However this driver will only use a few entries. Reading 1872 * 1K entries allows for some expansion of queue count and 1873 * size before we need to change the version. */ 1874 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1875 A, A, 8, 1024), 1876 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1877 B, Z, 8, 1024), 1878 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1879 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1880 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1881 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1882 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1883 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1884 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1885 /* MSIX_PBA_TABLE is not mapped */ 1886 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1887 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 1888 }; 1889 1890 size_t efx_nic_get_regs_len(struct efx_nic *efx) 1891 { 1892 const struct efx_nic_reg *reg; 1893 const struct efx_nic_reg_table *table; 1894 size_t len = 0; 1895 1896 for (reg = efx_nic_regs; 1897 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1898 reg++) 1899 if (efx->type->revision >= reg->min_revision && 1900 efx->type->revision <= reg->max_revision) 1901 len += sizeof(efx_oword_t); 1902 1903 for (table = efx_nic_reg_tables; 1904 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1905 table++) 1906 if (efx->type->revision >= table->min_revision && 1907 efx->type->revision <= table->max_revision) 1908 len += table->rows * min_t(size_t, table->step, 16); 1909 1910 return len; 1911 } 1912 1913 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 1914 { 1915 const struct efx_nic_reg *reg; 1916 const struct efx_nic_reg_table *table; 1917 1918 for (reg = efx_nic_regs; 1919 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1920 reg++) { 1921 if (efx->type->revision >= reg->min_revision && 1922 efx->type->revision <= reg->max_revision) { 1923 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 1924 buf += sizeof(efx_oword_t); 1925 } 1926 } 1927 1928 for (table = efx_nic_reg_tables; 1929 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1930 table++) { 1931 size_t size, i; 1932 1933 if (!(efx->type->revision >= table->min_revision && 1934 efx->type->revision <= table->max_revision)) 1935 continue; 1936 1937 size = min_t(size_t, table->step, 16); 1938 1939 for (i = 0; i < table->rows; i++) { 1940 switch (table->step) { 1941 case 4: /* 32-bit register or SRAM */ 1942 efx_readd_table(efx, buf, table->offset, i); 1943 break; 1944 case 8: /* 64-bit SRAM */ 1945 efx_sram_readq(efx, 1946 efx->membase + table->offset, 1947 buf, i); 1948 break; 1949 case 16: /* 128-bit register */ 1950 efx_reado_table(efx, buf, table->offset, i); 1951 break; 1952 case 32: /* 128-bit register, interleaved */ 1953 efx_reado_table(efx, buf, table->offset, 2 * i); 1954 break; 1955 default: 1956 WARN_ON(1); 1957 return; 1958 } 1959 buf += size; 1960 } 1961 } 1962 } 1963