1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times 53 */ 54 #define EFX_FLUSH_INTERVAL 10 55 #define EFX_FLUSH_POLL_COUNT 100 56 57 /* Size and alignment of special buffers (4KB) */ 58 #define EFX_BUF_SIZE 4096 59 60 /* Depth of RX flush request fifo */ 61 #define EFX_RX_FLUSH_COUNT 4 62 63 /* Generated event code for efx_generate_test_event() */ 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 (0x00010100 + (_channel)->channel) 66 67 /* Generated event code for efx_generate_fill_event() */ 68 #define EFX_CHANNEL_MAGIC_FILL(_channel) \ 69 (0x00010200 + (_channel)->channel) 70 71 /************************************************************************** 72 * 73 * Solarstorm hardware access 74 * 75 **************************************************************************/ 76 77 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 78 unsigned int index) 79 { 80 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 81 value, index); 82 } 83 84 /* Read the current event from the event queue */ 85 static inline efx_qword_t *efx_event(struct efx_channel *channel, 86 unsigned int index) 87 { 88 return ((efx_qword_t *) (channel->eventq.addr)) + 89 (index & channel->eventq_mask); 90 } 91 92 /* See if an event is present 93 * 94 * We check both the high and low dword of the event for all ones. We 95 * wrote all ones when we cleared the event, and no valid event can 96 * have all ones in either its high or low dwords. This approach is 97 * robust against reordering. 98 * 99 * Note that using a single 64-bit comparison is incorrect; even 100 * though the CPU read will be atomic, the DMA write may not be. 101 */ 102 static inline int efx_event_present(efx_qword_t *event) 103 { 104 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 105 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 106 } 107 108 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 109 const efx_oword_t *mask) 110 { 111 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 112 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 113 } 114 115 int efx_nic_test_registers(struct efx_nic *efx, 116 const struct efx_nic_register_test *regs, 117 size_t n_regs) 118 { 119 unsigned address = 0, i, j; 120 efx_oword_t mask, imask, original, reg, buf; 121 122 /* Falcon should be in loopback to isolate the XMAC from the PHY */ 123 WARN_ON(!LOOPBACK_INTERNAL(efx)); 124 125 for (i = 0; i < n_regs; ++i) { 126 address = regs[i].address; 127 mask = imask = regs[i].mask; 128 EFX_INVERT_OWORD(imask); 129 130 efx_reado(efx, &original, address); 131 132 /* bit sweep on and off */ 133 for (j = 0; j < 128; j++) { 134 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 135 continue; 136 137 /* Test this testable bit can be set in isolation */ 138 EFX_AND_OWORD(reg, original, mask); 139 EFX_SET_OWORD32(reg, j, j, 1); 140 141 efx_writeo(efx, ®, address); 142 efx_reado(efx, &buf, address); 143 144 if (efx_masked_compare_oword(®, &buf, &mask)) 145 goto fail; 146 147 /* Test this testable bit can be cleared in isolation */ 148 EFX_OR_OWORD(reg, original, mask); 149 EFX_SET_OWORD32(reg, j, j, 0); 150 151 efx_writeo(efx, ®, address); 152 efx_reado(efx, &buf, address); 153 154 if (efx_masked_compare_oword(®, &buf, &mask)) 155 goto fail; 156 } 157 158 efx_writeo(efx, &original, address); 159 } 160 161 return 0; 162 163 fail: 164 netif_err(efx, hw, efx->net_dev, 165 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 166 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 167 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 168 return -EIO; 169 } 170 171 /************************************************************************** 172 * 173 * Special buffer handling 174 * Special buffers are used for event queues and the TX and RX 175 * descriptor rings. 176 * 177 *************************************************************************/ 178 179 /* 180 * Initialise a special buffer 181 * 182 * This will define a buffer (previously allocated via 183 * efx_alloc_special_buffer()) in the buffer table, allowing 184 * it to be used for event queues, descriptor rings etc. 185 */ 186 static void 187 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 188 { 189 efx_qword_t buf_desc; 190 int index; 191 dma_addr_t dma_addr; 192 int i; 193 194 EFX_BUG_ON_PARANOID(!buffer->addr); 195 196 /* Write buffer descriptors to NIC */ 197 for (i = 0; i < buffer->entries; i++) { 198 index = buffer->index + i; 199 dma_addr = buffer->dma_addr + (i * 4096); 200 netif_dbg(efx, probe, efx->net_dev, 201 "mapping special buffer %d at %llx\n", 202 index, (unsigned long long)dma_addr); 203 EFX_POPULATE_QWORD_3(buf_desc, 204 FRF_AZ_BUF_ADR_REGION, 0, 205 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 206 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 207 efx_write_buf_tbl(efx, &buf_desc, index); 208 } 209 } 210 211 /* Unmaps a buffer and clears the buffer table entries */ 212 static void 213 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 214 { 215 efx_oword_t buf_tbl_upd; 216 unsigned int start = buffer->index; 217 unsigned int end = (buffer->index + buffer->entries - 1); 218 219 if (!buffer->entries) 220 return; 221 222 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 223 buffer->index, buffer->index + buffer->entries - 1); 224 225 EFX_POPULATE_OWORD_4(buf_tbl_upd, 226 FRF_AZ_BUF_UPD_CMD, 0, 227 FRF_AZ_BUF_CLR_CMD, 1, 228 FRF_AZ_BUF_CLR_END_ID, end, 229 FRF_AZ_BUF_CLR_START_ID, start); 230 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 231 } 232 233 /* 234 * Allocate a new special buffer 235 * 236 * This allocates memory for a new buffer, clears it and allocates a 237 * new buffer ID range. It does not write into the buffer table. 238 * 239 * This call will allocate 4KB buffers, since 8KB buffers can't be 240 * used for event queues and descriptor rings. 241 */ 242 static int efx_alloc_special_buffer(struct efx_nic *efx, 243 struct efx_special_buffer *buffer, 244 unsigned int len) 245 { 246 len = ALIGN(len, EFX_BUF_SIZE); 247 248 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 249 &buffer->dma_addr, GFP_KERNEL); 250 if (!buffer->addr) 251 return -ENOMEM; 252 buffer->len = len; 253 buffer->entries = len / EFX_BUF_SIZE; 254 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 255 256 /* All zeros is a potentially valid event so memset to 0xff */ 257 memset(buffer->addr, 0xff, len); 258 259 /* Select new buffer ID */ 260 buffer->index = efx->next_buffer_table; 261 efx->next_buffer_table += buffer->entries; 262 263 netif_dbg(efx, probe, efx->net_dev, 264 "allocating special buffers %d-%d at %llx+%x " 265 "(virt %p phys %llx)\n", buffer->index, 266 buffer->index + buffer->entries - 1, 267 (u64)buffer->dma_addr, len, 268 buffer->addr, (u64)virt_to_phys(buffer->addr)); 269 270 return 0; 271 } 272 273 static void 274 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 275 { 276 if (!buffer->addr) 277 return; 278 279 netif_dbg(efx, hw, efx->net_dev, 280 "deallocating special buffers %d-%d at %llx+%x " 281 "(virt %p phys %llx)\n", buffer->index, 282 buffer->index + buffer->entries - 1, 283 (u64)buffer->dma_addr, buffer->len, 284 buffer->addr, (u64)virt_to_phys(buffer->addr)); 285 286 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 287 buffer->dma_addr); 288 buffer->addr = NULL; 289 buffer->entries = 0; 290 } 291 292 /************************************************************************** 293 * 294 * Generic buffer handling 295 * These buffers are used for interrupt status and MAC stats 296 * 297 **************************************************************************/ 298 299 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 300 unsigned int len) 301 { 302 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 303 &buffer->dma_addr); 304 if (!buffer->addr) 305 return -ENOMEM; 306 buffer->len = len; 307 memset(buffer->addr, 0, len); 308 return 0; 309 } 310 311 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 312 { 313 if (buffer->addr) { 314 pci_free_consistent(efx->pci_dev, buffer->len, 315 buffer->addr, buffer->dma_addr); 316 buffer->addr = NULL; 317 } 318 } 319 320 /************************************************************************** 321 * 322 * TX path 323 * 324 **************************************************************************/ 325 326 /* Returns a pointer to the specified transmit descriptor in the TX 327 * descriptor queue belonging to the specified channel. 328 */ 329 static inline efx_qword_t * 330 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 331 { 332 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 333 } 334 335 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 336 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 337 { 338 unsigned write_ptr; 339 efx_dword_t reg; 340 341 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 342 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 343 efx_writed_page(tx_queue->efx, ®, 344 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 345 } 346 347 /* Write pointer and first descriptor for TX descriptor ring */ 348 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 349 const efx_qword_t *txd) 350 { 351 unsigned write_ptr; 352 efx_oword_t reg; 353 354 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 355 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 356 357 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 358 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 359 FRF_AZ_TX_DESC_WPTR, write_ptr); 360 reg.qword[0] = *txd; 361 efx_writeo_page(tx_queue->efx, ®, 362 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 363 } 364 365 static inline bool 366 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 367 { 368 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 369 370 if (empty_read_count == 0) 371 return false; 372 373 tx_queue->empty_read_count = 0; 374 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 375 } 376 377 /* For each entry inserted into the software descriptor ring, create a 378 * descriptor in the hardware TX descriptor ring (in host memory), and 379 * write a doorbell. 380 */ 381 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 382 { 383 384 struct efx_tx_buffer *buffer; 385 efx_qword_t *txd; 386 unsigned write_ptr; 387 unsigned old_write_count = tx_queue->write_count; 388 389 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 390 391 do { 392 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 393 buffer = &tx_queue->buffer[write_ptr]; 394 txd = efx_tx_desc(tx_queue, write_ptr); 395 ++tx_queue->write_count; 396 397 /* Create TX descriptor ring entry */ 398 EFX_POPULATE_QWORD_4(*txd, 399 FSF_AZ_TX_KER_CONT, buffer->continuation, 400 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 401 FSF_AZ_TX_KER_BUF_REGION, 0, 402 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 403 } while (tx_queue->write_count != tx_queue->insert_count); 404 405 wmb(); /* Ensure descriptors are written before they are fetched */ 406 407 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 408 txd = efx_tx_desc(tx_queue, 409 old_write_count & tx_queue->ptr_mask); 410 efx_push_tx_desc(tx_queue, txd); 411 ++tx_queue->pushes; 412 } else { 413 efx_notify_tx_desc(tx_queue); 414 } 415 } 416 417 /* Allocate hardware resources for a TX queue */ 418 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 419 { 420 struct efx_nic *efx = tx_queue->efx; 421 unsigned entries; 422 423 entries = tx_queue->ptr_mask + 1; 424 return efx_alloc_special_buffer(efx, &tx_queue->txd, 425 entries * sizeof(efx_qword_t)); 426 } 427 428 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 429 { 430 struct efx_nic *efx = tx_queue->efx; 431 efx_oword_t reg; 432 433 tx_queue->flushed = FLUSH_NONE; 434 435 /* Pin TX descriptor ring */ 436 efx_init_special_buffer(efx, &tx_queue->txd); 437 438 /* Push TX descriptor ring to card */ 439 EFX_POPULATE_OWORD_10(reg, 440 FRF_AZ_TX_DESCQ_EN, 1, 441 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 442 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 443 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 444 FRF_AZ_TX_DESCQ_EVQ_ID, 445 tx_queue->channel->channel, 446 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 447 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 448 FRF_AZ_TX_DESCQ_SIZE, 449 __ffs(tx_queue->txd.entries), 450 FRF_AZ_TX_DESCQ_TYPE, 0, 451 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 452 453 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 454 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 455 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 456 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 457 !csum); 458 } 459 460 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 461 tx_queue->queue); 462 463 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 464 /* Only 128 bits in this register */ 465 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 466 467 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 468 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 469 clear_bit_le(tx_queue->queue, (void *)®); 470 else 471 set_bit_le(tx_queue->queue, (void *)®); 472 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 473 } 474 475 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 476 EFX_POPULATE_OWORD_1(reg, 477 FRF_BZ_TX_PACE, 478 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 479 FFE_BZ_TX_PACE_OFF : 480 FFE_BZ_TX_PACE_RESERVED); 481 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 482 tx_queue->queue); 483 } 484 } 485 486 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 487 { 488 struct efx_nic *efx = tx_queue->efx; 489 efx_oword_t tx_flush_descq; 490 491 tx_queue->flushed = FLUSH_PENDING; 492 493 /* Post a flush command */ 494 EFX_POPULATE_OWORD_2(tx_flush_descq, 495 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 496 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 497 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 498 } 499 500 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 501 { 502 struct efx_nic *efx = tx_queue->efx; 503 efx_oword_t tx_desc_ptr; 504 505 /* The queue should have been flushed */ 506 WARN_ON(tx_queue->flushed != FLUSH_DONE); 507 508 /* Remove TX descriptor ring from card */ 509 EFX_ZERO_OWORD(tx_desc_ptr); 510 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 511 tx_queue->queue); 512 513 /* Unpin TX descriptor ring */ 514 efx_fini_special_buffer(efx, &tx_queue->txd); 515 } 516 517 /* Free buffers backing TX queue */ 518 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 519 { 520 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 521 } 522 523 /************************************************************************** 524 * 525 * RX path 526 * 527 **************************************************************************/ 528 529 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 530 static inline efx_qword_t * 531 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 532 { 533 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 534 } 535 536 /* This creates an entry in the RX descriptor queue */ 537 static inline void 538 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 539 { 540 struct efx_rx_buffer *rx_buf; 541 efx_qword_t *rxd; 542 543 rxd = efx_rx_desc(rx_queue, index); 544 rx_buf = efx_rx_buffer(rx_queue, index); 545 EFX_POPULATE_QWORD_3(*rxd, 546 FSF_AZ_RX_KER_BUF_SIZE, 547 rx_buf->len - 548 rx_queue->efx->type->rx_buffer_padding, 549 FSF_AZ_RX_KER_BUF_REGION, 0, 550 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 551 } 552 553 /* This writes to the RX_DESC_WPTR register for the specified receive 554 * descriptor ring. 555 */ 556 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 557 { 558 struct efx_nic *efx = rx_queue->efx; 559 efx_dword_t reg; 560 unsigned write_ptr; 561 562 while (rx_queue->notified_count != rx_queue->added_count) { 563 efx_build_rx_desc( 564 rx_queue, 565 rx_queue->notified_count & rx_queue->ptr_mask); 566 ++rx_queue->notified_count; 567 } 568 569 wmb(); 570 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 571 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 572 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 573 efx_rx_queue_index(rx_queue)); 574 } 575 576 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 577 { 578 struct efx_nic *efx = rx_queue->efx; 579 unsigned entries; 580 581 entries = rx_queue->ptr_mask + 1; 582 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 583 entries * sizeof(efx_qword_t)); 584 } 585 586 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 587 { 588 efx_oword_t rx_desc_ptr; 589 struct efx_nic *efx = rx_queue->efx; 590 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 591 bool iscsi_digest_en = is_b0; 592 593 netif_dbg(efx, hw, efx->net_dev, 594 "RX queue %d ring in special buffers %d-%d\n", 595 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 596 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 597 598 rx_queue->flushed = FLUSH_NONE; 599 600 /* Pin RX descriptor ring */ 601 efx_init_special_buffer(efx, &rx_queue->rxd); 602 603 /* Push RX descriptor ring to card */ 604 EFX_POPULATE_OWORD_10(rx_desc_ptr, 605 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 606 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 607 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 608 FRF_AZ_RX_DESCQ_EVQ_ID, 609 efx_rx_queue_channel(rx_queue)->channel, 610 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 611 FRF_AZ_RX_DESCQ_LABEL, 612 efx_rx_queue_index(rx_queue), 613 FRF_AZ_RX_DESCQ_SIZE, 614 __ffs(rx_queue->rxd.entries), 615 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 616 /* For >=B0 this is scatter so disable */ 617 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 618 FRF_AZ_RX_DESCQ_EN, 1); 619 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 620 efx_rx_queue_index(rx_queue)); 621 } 622 623 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 624 { 625 struct efx_nic *efx = rx_queue->efx; 626 efx_oword_t rx_flush_descq; 627 628 rx_queue->flushed = FLUSH_PENDING; 629 630 /* Post a flush command */ 631 EFX_POPULATE_OWORD_2(rx_flush_descq, 632 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 633 FRF_AZ_RX_FLUSH_DESCQ, 634 efx_rx_queue_index(rx_queue)); 635 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 636 } 637 638 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 639 { 640 efx_oword_t rx_desc_ptr; 641 struct efx_nic *efx = rx_queue->efx; 642 643 /* The queue should already have been flushed */ 644 WARN_ON(rx_queue->flushed != FLUSH_DONE); 645 646 /* Remove RX descriptor ring from card */ 647 EFX_ZERO_OWORD(rx_desc_ptr); 648 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 649 efx_rx_queue_index(rx_queue)); 650 651 /* Unpin RX descriptor ring */ 652 efx_fini_special_buffer(efx, &rx_queue->rxd); 653 } 654 655 /* Free buffers backing RX queue */ 656 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 657 { 658 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 659 } 660 661 /************************************************************************** 662 * 663 * Event queue processing 664 * Event queues are processed by per-channel tasklets. 665 * 666 **************************************************************************/ 667 668 /* Update a channel's event queue's read pointer (RPTR) register 669 * 670 * This writes the EVQ_RPTR_REG register for the specified channel's 671 * event queue. 672 */ 673 void efx_nic_eventq_read_ack(struct efx_channel *channel) 674 { 675 efx_dword_t reg; 676 struct efx_nic *efx = channel->efx; 677 678 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 679 channel->eventq_read_ptr & channel->eventq_mask); 680 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, 681 channel->channel); 682 } 683 684 /* Use HW to insert a SW defined event */ 685 static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 686 { 687 efx_oword_t drv_ev_reg; 688 689 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 690 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 691 drv_ev_reg.u32[0] = event->u32[0]; 692 drv_ev_reg.u32[1] = event->u32[1]; 693 drv_ev_reg.u32[2] = 0; 694 drv_ev_reg.u32[3] = 0; 695 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); 696 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); 697 } 698 699 /* Handle a transmit completion event 700 * 701 * The NIC batches TX completion events; the message we receive is of 702 * the form "complete all TX events up to this index". 703 */ 704 static int 705 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 706 { 707 unsigned int tx_ev_desc_ptr; 708 unsigned int tx_ev_q_label; 709 struct efx_tx_queue *tx_queue; 710 struct efx_nic *efx = channel->efx; 711 int tx_packets = 0; 712 713 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 714 /* Transmit completion */ 715 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 716 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 717 tx_queue = efx_channel_get_tx_queue( 718 channel, tx_ev_q_label % EFX_TXQ_TYPES); 719 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 720 tx_queue->ptr_mask); 721 channel->irq_mod_score += tx_packets; 722 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 723 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 724 /* Rewrite the FIFO write pointer */ 725 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 726 tx_queue = efx_channel_get_tx_queue( 727 channel, tx_ev_q_label % EFX_TXQ_TYPES); 728 729 netif_tx_lock(efx->net_dev); 730 efx_notify_tx_desc(tx_queue); 731 netif_tx_unlock(efx->net_dev); 732 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 733 EFX_WORKAROUND_10727(efx)) { 734 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 735 } else { 736 netif_err(efx, tx_err, efx->net_dev, 737 "channel %d unexpected TX event " 738 EFX_QWORD_FMT"\n", channel->channel, 739 EFX_QWORD_VAL(*event)); 740 } 741 742 return tx_packets; 743 } 744 745 /* Detect errors included in the rx_evt_pkt_ok bit. */ 746 static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 747 const efx_qword_t *event, 748 bool *rx_ev_pkt_ok, 749 bool *discard) 750 { 751 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 752 struct efx_nic *efx = rx_queue->efx; 753 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 754 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 755 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 756 bool rx_ev_other_err, rx_ev_pause_frm; 757 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 758 unsigned rx_ev_pkt_type; 759 760 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 761 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 762 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 763 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 764 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 765 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 766 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 767 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 768 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 769 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 770 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 771 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 772 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 773 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 774 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 775 776 /* Every error apart from tobe_disc and pause_frm */ 777 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 778 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 779 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 780 781 /* Count errors that are not in MAC stats. Ignore expected 782 * checksum errors during self-test. */ 783 if (rx_ev_frm_trunc) 784 ++channel->n_rx_frm_trunc; 785 else if (rx_ev_tobe_disc) 786 ++channel->n_rx_tobe_disc; 787 else if (!efx->loopback_selftest) { 788 if (rx_ev_ip_hdr_chksum_err) 789 ++channel->n_rx_ip_hdr_chksum_err; 790 else if (rx_ev_tcp_udp_chksum_err) 791 ++channel->n_rx_tcp_udp_chksum_err; 792 } 793 794 /* The frame must be discarded if any of these are true. */ 795 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 796 rx_ev_tobe_disc | rx_ev_pause_frm); 797 798 /* TOBE_DISC is expected on unicast mismatches; don't print out an 799 * error message. FRM_TRUNC indicates RXDP dropped the packet due 800 * to a FIFO overflow. 801 */ 802 #ifdef DEBUG 803 if (rx_ev_other_err && net_ratelimit()) { 804 netif_dbg(efx, rx_err, efx->net_dev, 805 " RX queue %d unexpected RX event " 806 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 807 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 808 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 809 rx_ev_ip_hdr_chksum_err ? 810 " [IP_HDR_CHKSUM_ERR]" : "", 811 rx_ev_tcp_udp_chksum_err ? 812 " [TCP_UDP_CHKSUM_ERR]" : "", 813 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 814 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 815 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 816 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 817 rx_ev_pause_frm ? " [PAUSE]" : ""); 818 } 819 #endif 820 } 821 822 /* Handle receive events that are not in-order. */ 823 static void 824 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 825 { 826 struct efx_nic *efx = rx_queue->efx; 827 unsigned expected, dropped; 828 829 expected = rx_queue->removed_count & rx_queue->ptr_mask; 830 dropped = (index - expected) & rx_queue->ptr_mask; 831 netif_info(efx, rx_err, efx->net_dev, 832 "dropped %d events (index=%d expected=%d)\n", 833 dropped, index, expected); 834 835 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 836 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 837 } 838 839 /* Handle a packet received event 840 * 841 * The NIC gives a "discard" flag if it's a unicast packet with the 842 * wrong destination address 843 * Also "is multicast" and "matches multicast filter" flags can be used to 844 * discard non-matching multicast packets. 845 */ 846 static void 847 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 848 { 849 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 850 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 851 unsigned expected_ptr; 852 bool rx_ev_pkt_ok, discard = false, checksummed; 853 struct efx_rx_queue *rx_queue; 854 855 /* Basic packet information */ 856 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 857 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 858 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 859 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 860 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 861 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 862 channel->channel); 863 864 rx_queue = efx_channel_get_rx_queue(channel); 865 866 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 867 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 868 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 869 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 870 871 if (likely(rx_ev_pkt_ok)) { 872 /* If packet is marked as OK and packet type is TCP/IP or 873 * UDP/IP, then we can rely on the hardware checksum. 874 */ 875 checksummed = 876 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 877 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; 878 } else { 879 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); 880 checksummed = false; 881 } 882 883 /* Detect multicast packets that didn't match the filter */ 884 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 885 if (rx_ev_mcast_pkt) { 886 unsigned int rx_ev_mcast_hash_match = 887 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 888 889 if (unlikely(!rx_ev_mcast_hash_match)) { 890 ++channel->n_rx_mcast_mismatch; 891 discard = true; 892 } 893 } 894 895 channel->irq_mod_score += 2; 896 897 /* Handle received packet */ 898 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, 899 checksummed, discard); 900 } 901 902 static void 903 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 904 { 905 struct efx_nic *efx = channel->efx; 906 unsigned code; 907 908 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 909 if (code == EFX_CHANNEL_MAGIC_TEST(channel)) 910 ; /* ignore */ 911 else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) 912 /* The queue must be empty, so we won't receive any rx 913 * events, so efx_process_channel() won't refill the 914 * queue. Refill it here */ 915 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); 916 else 917 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 918 "generated event "EFX_QWORD_FMT"\n", 919 channel->channel, EFX_QWORD_VAL(*event)); 920 } 921 922 static void 923 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 924 { 925 struct efx_nic *efx = channel->efx; 926 unsigned int ev_sub_code; 927 unsigned int ev_sub_data; 928 929 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 930 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 931 932 switch (ev_sub_code) { 933 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 934 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 935 channel->channel, ev_sub_data); 936 break; 937 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 938 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 939 channel->channel, ev_sub_data); 940 break; 941 case FSE_AZ_EVQ_INIT_DONE_EV: 942 netif_dbg(efx, hw, efx->net_dev, 943 "channel %d EVQ %d initialised\n", 944 channel->channel, ev_sub_data); 945 break; 946 case FSE_AZ_SRM_UPD_DONE_EV: 947 netif_vdbg(efx, hw, efx->net_dev, 948 "channel %d SRAM update done\n", channel->channel); 949 break; 950 case FSE_AZ_WAKE_UP_EV: 951 netif_vdbg(efx, hw, efx->net_dev, 952 "channel %d RXQ %d wakeup event\n", 953 channel->channel, ev_sub_data); 954 break; 955 case FSE_AZ_TIMER_EV: 956 netif_vdbg(efx, hw, efx->net_dev, 957 "channel %d RX queue %d timer expired\n", 958 channel->channel, ev_sub_data); 959 break; 960 case FSE_AA_RX_RECOVER_EV: 961 netif_err(efx, rx_err, efx->net_dev, 962 "channel %d seen DRIVER RX_RESET event. " 963 "Resetting.\n", channel->channel); 964 atomic_inc(&efx->rx_reset); 965 efx_schedule_reset(efx, 966 EFX_WORKAROUND_6555(efx) ? 967 RESET_TYPE_RX_RECOVERY : 968 RESET_TYPE_DISABLE); 969 break; 970 case FSE_BZ_RX_DSC_ERROR_EV: 971 netif_err(efx, rx_err, efx->net_dev, 972 "RX DMA Q %d reports descriptor fetch error." 973 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 974 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 975 break; 976 case FSE_BZ_TX_DSC_ERROR_EV: 977 netif_err(efx, tx_err, efx->net_dev, 978 "TX DMA Q %d reports descriptor fetch error." 979 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 980 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 981 break; 982 default: 983 netif_vdbg(efx, hw, efx->net_dev, 984 "channel %d unknown driver event code %d " 985 "data %04x\n", channel->channel, ev_sub_code, 986 ev_sub_data); 987 break; 988 } 989 } 990 991 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 992 { 993 struct efx_nic *efx = channel->efx; 994 unsigned int read_ptr; 995 efx_qword_t event, *p_event; 996 int ev_code; 997 int tx_packets = 0; 998 int spent = 0; 999 1000 read_ptr = channel->eventq_read_ptr; 1001 1002 for (;;) { 1003 p_event = efx_event(channel, read_ptr); 1004 event = *p_event; 1005 1006 if (!efx_event_present(&event)) 1007 /* End of events */ 1008 break; 1009 1010 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1011 "channel %d event is "EFX_QWORD_FMT"\n", 1012 channel->channel, EFX_QWORD_VAL(event)); 1013 1014 /* Clear this event by marking it all ones */ 1015 EFX_SET_QWORD(*p_event); 1016 1017 ++read_ptr; 1018 1019 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1020 1021 switch (ev_code) { 1022 case FSE_AZ_EV_CODE_RX_EV: 1023 efx_handle_rx_event(channel, &event); 1024 if (++spent == budget) 1025 goto out; 1026 break; 1027 case FSE_AZ_EV_CODE_TX_EV: 1028 tx_packets += efx_handle_tx_event(channel, &event); 1029 if (tx_packets > efx->txq_entries) { 1030 spent = budget; 1031 goto out; 1032 } 1033 break; 1034 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1035 efx_handle_generated_event(channel, &event); 1036 break; 1037 case FSE_AZ_EV_CODE_DRIVER_EV: 1038 efx_handle_driver_event(channel, &event); 1039 break; 1040 case FSE_CZ_EV_CODE_MCDI_EV: 1041 efx_mcdi_process_event(channel, &event); 1042 break; 1043 case FSE_AZ_EV_CODE_GLOBAL_EV: 1044 if (efx->type->handle_global_event && 1045 efx->type->handle_global_event(channel, &event)) 1046 break; 1047 /* else fall through */ 1048 default: 1049 netif_err(channel->efx, hw, channel->efx->net_dev, 1050 "channel %d unknown event type %d (data " 1051 EFX_QWORD_FMT ")\n", channel->channel, 1052 ev_code, EFX_QWORD_VAL(event)); 1053 } 1054 } 1055 1056 out: 1057 channel->eventq_read_ptr = read_ptr; 1058 return spent; 1059 } 1060 1061 /* Check whether an event is present in the eventq at the current 1062 * read pointer. Only useful for self-test. 1063 */ 1064 bool efx_nic_event_present(struct efx_channel *channel) 1065 { 1066 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1067 } 1068 1069 /* Allocate buffer table entries for event queue */ 1070 int efx_nic_probe_eventq(struct efx_channel *channel) 1071 { 1072 struct efx_nic *efx = channel->efx; 1073 unsigned entries; 1074 1075 entries = channel->eventq_mask + 1; 1076 return efx_alloc_special_buffer(efx, &channel->eventq, 1077 entries * sizeof(efx_qword_t)); 1078 } 1079 1080 void efx_nic_init_eventq(struct efx_channel *channel) 1081 { 1082 efx_oword_t reg; 1083 struct efx_nic *efx = channel->efx; 1084 1085 netif_dbg(efx, hw, efx->net_dev, 1086 "channel %d event queue in special buffers %d-%d\n", 1087 channel->channel, channel->eventq.index, 1088 channel->eventq.index + channel->eventq.entries - 1); 1089 1090 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1091 EFX_POPULATE_OWORD_3(reg, 1092 FRF_CZ_TIMER_Q_EN, 1, 1093 FRF_CZ_HOST_NOTIFY_MODE, 0, 1094 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1095 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1096 } 1097 1098 /* Pin event queue buffer */ 1099 efx_init_special_buffer(efx, &channel->eventq); 1100 1101 /* Fill event queue with all ones (i.e. empty events) */ 1102 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1103 1104 /* Push event queue to card */ 1105 EFX_POPULATE_OWORD_3(reg, 1106 FRF_AZ_EVQ_EN, 1, 1107 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1108 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1109 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1110 channel->channel); 1111 1112 efx->type->push_irq_moderation(channel); 1113 } 1114 1115 void efx_nic_fini_eventq(struct efx_channel *channel) 1116 { 1117 efx_oword_t reg; 1118 struct efx_nic *efx = channel->efx; 1119 1120 /* Remove event queue from card */ 1121 EFX_ZERO_OWORD(reg); 1122 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1123 channel->channel); 1124 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1125 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1126 1127 /* Unpin event queue */ 1128 efx_fini_special_buffer(efx, &channel->eventq); 1129 } 1130 1131 /* Free buffers backing event queue */ 1132 void efx_nic_remove_eventq(struct efx_channel *channel) 1133 { 1134 efx_free_special_buffer(channel->efx, &channel->eventq); 1135 } 1136 1137 1138 void efx_nic_generate_test_event(struct efx_channel *channel) 1139 { 1140 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); 1141 efx_qword_t test_event; 1142 1143 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1144 FSE_AZ_EV_CODE_DRV_GEN_EV, 1145 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 1146 efx_generate_event(channel, &test_event); 1147 } 1148 1149 void efx_nic_generate_fill_event(struct efx_channel *channel) 1150 { 1151 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); 1152 efx_qword_t test_event; 1153 1154 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1155 FSE_AZ_EV_CODE_DRV_GEN_EV, 1156 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 1157 efx_generate_event(channel, &test_event); 1158 } 1159 1160 /************************************************************************** 1161 * 1162 * Flush handling 1163 * 1164 **************************************************************************/ 1165 1166 1167 static void efx_poll_flush_events(struct efx_nic *efx) 1168 { 1169 struct efx_channel *channel = efx_get_channel(efx, 0); 1170 struct efx_tx_queue *tx_queue; 1171 struct efx_rx_queue *rx_queue; 1172 unsigned int read_ptr = channel->eventq_read_ptr; 1173 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; 1174 1175 do { 1176 efx_qword_t *event = efx_event(channel, read_ptr); 1177 int ev_code, ev_sub_code, ev_queue; 1178 bool ev_failed; 1179 1180 if (!efx_event_present(event)) 1181 break; 1182 1183 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); 1184 ev_sub_code = EFX_QWORD_FIELD(*event, 1185 FSF_AZ_DRIVER_EV_SUBCODE); 1186 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1187 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1188 ev_queue = EFX_QWORD_FIELD(*event, 1189 FSF_AZ_DRIVER_EV_SUBDATA); 1190 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1191 tx_queue = efx_get_tx_queue( 1192 efx, ev_queue / EFX_TXQ_TYPES, 1193 ev_queue % EFX_TXQ_TYPES); 1194 tx_queue->flushed = FLUSH_DONE; 1195 } 1196 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1197 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { 1198 ev_queue = EFX_QWORD_FIELD( 1199 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1200 ev_failed = EFX_QWORD_FIELD( 1201 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1202 if (ev_queue < efx->n_rx_channels) { 1203 rx_queue = efx_get_rx_queue(efx, ev_queue); 1204 rx_queue->flushed = 1205 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1206 } 1207 } 1208 1209 /* We're about to destroy the queue anyway, so 1210 * it's ok to throw away every non-flush event */ 1211 EFX_SET_QWORD(*event); 1212 1213 ++read_ptr; 1214 } while (read_ptr != end_ptr); 1215 1216 channel->eventq_read_ptr = read_ptr; 1217 } 1218 1219 /* Handle tx and rx flushes at the same time, since they run in 1220 * parallel in the hardware and there's no reason for us to 1221 * serialise them */ 1222 int efx_nic_flush_queues(struct efx_nic *efx) 1223 { 1224 struct efx_channel *channel; 1225 struct efx_rx_queue *rx_queue; 1226 struct efx_tx_queue *tx_queue; 1227 int i, tx_pending, rx_pending; 1228 1229 /* If necessary prepare the hardware for flushing */ 1230 efx->type->prepare_flush(efx); 1231 1232 /* Flush all tx queues in parallel */ 1233 efx_for_each_channel(channel, efx) { 1234 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1235 if (tx_queue->initialised) 1236 efx_flush_tx_queue(tx_queue); 1237 } 1238 } 1239 1240 /* The hardware supports four concurrent rx flushes, each of which may 1241 * need to be retried if there is an outstanding descriptor fetch */ 1242 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1243 rx_pending = tx_pending = 0; 1244 efx_for_each_channel(channel, efx) { 1245 efx_for_each_channel_rx_queue(rx_queue, channel) { 1246 if (rx_queue->flushed == FLUSH_PENDING) 1247 ++rx_pending; 1248 } 1249 } 1250 efx_for_each_channel(channel, efx) { 1251 efx_for_each_channel_rx_queue(rx_queue, channel) { 1252 if (rx_pending == EFX_RX_FLUSH_COUNT) 1253 break; 1254 if (rx_queue->flushed == FLUSH_FAILED || 1255 rx_queue->flushed == FLUSH_NONE) { 1256 efx_flush_rx_queue(rx_queue); 1257 ++rx_pending; 1258 } 1259 } 1260 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1261 if (tx_queue->initialised && 1262 tx_queue->flushed != FLUSH_DONE) 1263 ++tx_pending; 1264 } 1265 } 1266 1267 if (rx_pending == 0 && tx_pending == 0) 1268 return 0; 1269 1270 msleep(EFX_FLUSH_INTERVAL); 1271 efx_poll_flush_events(efx); 1272 } 1273 1274 /* Mark the queues as all flushed. We're going to return failure 1275 * leading to a reset, or fake up success anyway */ 1276 efx_for_each_channel(channel, efx) { 1277 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1278 if (tx_queue->initialised && 1279 tx_queue->flushed != FLUSH_DONE) 1280 netif_err(efx, hw, efx->net_dev, 1281 "tx queue %d flush command timed out\n", 1282 tx_queue->queue); 1283 tx_queue->flushed = FLUSH_DONE; 1284 } 1285 efx_for_each_channel_rx_queue(rx_queue, channel) { 1286 if (rx_queue->flushed != FLUSH_DONE) 1287 netif_err(efx, hw, efx->net_dev, 1288 "rx queue %d flush command timed out\n", 1289 efx_rx_queue_index(rx_queue)); 1290 rx_queue->flushed = FLUSH_DONE; 1291 } 1292 } 1293 1294 return -ETIMEDOUT; 1295 } 1296 1297 /************************************************************************** 1298 * 1299 * Hardware interrupts 1300 * The hardware interrupt handler does very little work; all the event 1301 * queue processing is carried out by per-channel tasklets. 1302 * 1303 **************************************************************************/ 1304 1305 /* Enable/disable/generate interrupts */ 1306 static inline void efx_nic_interrupts(struct efx_nic *efx, 1307 bool enabled, bool force) 1308 { 1309 efx_oword_t int_en_reg_ker; 1310 1311 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1312 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1313 FRF_AZ_KER_INT_KER, force, 1314 FRF_AZ_DRV_INT_EN_KER, enabled); 1315 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1316 } 1317 1318 void efx_nic_enable_interrupts(struct efx_nic *efx) 1319 { 1320 struct efx_channel *channel; 1321 1322 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1323 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1324 1325 /* Enable interrupts */ 1326 efx_nic_interrupts(efx, true, false); 1327 1328 /* Force processing of all the channels to get the EVQ RPTRs up to 1329 date */ 1330 efx_for_each_channel(channel, efx) 1331 efx_schedule_channel(channel); 1332 } 1333 1334 void efx_nic_disable_interrupts(struct efx_nic *efx) 1335 { 1336 /* Disable interrupts */ 1337 efx_nic_interrupts(efx, false, false); 1338 } 1339 1340 /* Generate a test interrupt 1341 * Interrupt must already have been enabled, otherwise nasty things 1342 * may happen. 1343 */ 1344 void efx_nic_generate_interrupt(struct efx_nic *efx) 1345 { 1346 efx_nic_interrupts(efx, true, true); 1347 } 1348 1349 /* Process a fatal interrupt 1350 * Disable bus mastering ASAP and schedule a reset 1351 */ 1352 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1353 { 1354 struct falcon_nic_data *nic_data = efx->nic_data; 1355 efx_oword_t *int_ker = efx->irq_status.addr; 1356 efx_oword_t fatal_intr; 1357 int error, mem_perr; 1358 1359 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1360 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1361 1362 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1363 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1364 EFX_OWORD_VAL(fatal_intr), 1365 error ? "disabling bus mastering" : "no recognised error"); 1366 1367 /* If this is a memory parity error dump which blocks are offending */ 1368 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1369 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1370 if (mem_perr) { 1371 efx_oword_t reg; 1372 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1373 netif_err(efx, hw, efx->net_dev, 1374 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1375 EFX_OWORD_VAL(reg)); 1376 } 1377 1378 /* Disable both devices */ 1379 pci_clear_master(efx->pci_dev); 1380 if (efx_nic_is_dual_func(efx)) 1381 pci_clear_master(nic_data->pci_dev2); 1382 efx_nic_disable_interrupts(efx); 1383 1384 /* Count errors and reset or disable the NIC accordingly */ 1385 if (efx->int_error_count == 0 || 1386 time_after(jiffies, efx->int_error_expire)) { 1387 efx->int_error_count = 0; 1388 efx->int_error_expire = 1389 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1390 } 1391 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1392 netif_err(efx, hw, efx->net_dev, 1393 "SYSTEM ERROR - reset scheduled\n"); 1394 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1395 } else { 1396 netif_err(efx, hw, efx->net_dev, 1397 "SYSTEM ERROR - max number of errors seen." 1398 "NIC will be disabled\n"); 1399 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1400 } 1401 1402 return IRQ_HANDLED; 1403 } 1404 1405 /* Handle a legacy interrupt 1406 * Acknowledges the interrupt and schedule event queue processing. 1407 */ 1408 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1409 { 1410 struct efx_nic *efx = dev_id; 1411 efx_oword_t *int_ker = efx->irq_status.addr; 1412 irqreturn_t result = IRQ_NONE; 1413 struct efx_channel *channel; 1414 efx_dword_t reg; 1415 u32 queues; 1416 int syserr; 1417 1418 /* Could this be ours? If interrupts are disabled then the 1419 * channel state may not be valid. 1420 */ 1421 if (!efx->legacy_irq_enabled) 1422 return result; 1423 1424 /* Read the ISR which also ACKs the interrupts */ 1425 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1426 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1427 1428 /* Handle non-event-queue sources */ 1429 if (queues & (1U << efx->irq_level)) { 1430 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1431 if (unlikely(syserr)) 1432 return efx_nic_fatal_interrupt(efx); 1433 efx->last_irq_cpu = raw_smp_processor_id(); 1434 } 1435 1436 if (queues != 0) { 1437 if (EFX_WORKAROUND_15783(efx)) 1438 efx->irq_zero_count = 0; 1439 1440 /* Schedule processing of any interrupting queues */ 1441 efx_for_each_channel(channel, efx) { 1442 if (queues & 1) 1443 efx_schedule_channel_irq(channel); 1444 queues >>= 1; 1445 } 1446 result = IRQ_HANDLED; 1447 1448 } else if (EFX_WORKAROUND_15783(efx)) { 1449 efx_qword_t *event; 1450 1451 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1452 * because this might be a shared interrupt. */ 1453 if (efx->irq_zero_count++ == 0) 1454 result = IRQ_HANDLED; 1455 1456 /* Ensure we schedule or rearm all event queues */ 1457 efx_for_each_channel(channel, efx) { 1458 event = efx_event(channel, channel->eventq_read_ptr); 1459 if (efx_event_present(event)) 1460 efx_schedule_channel_irq(channel); 1461 else 1462 efx_nic_eventq_read_ack(channel); 1463 } 1464 } 1465 1466 if (result == IRQ_HANDLED) 1467 netif_vdbg(efx, intr, efx->net_dev, 1468 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1469 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1470 1471 return result; 1472 } 1473 1474 /* Handle an MSI interrupt 1475 * 1476 * Handle an MSI hardware interrupt. This routine schedules event 1477 * queue processing. No interrupt acknowledgement cycle is necessary. 1478 * Also, we never need to check that the interrupt is for us, since 1479 * MSI interrupts cannot be shared. 1480 */ 1481 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1482 { 1483 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1484 struct efx_nic *efx = channel->efx; 1485 efx_oword_t *int_ker = efx->irq_status.addr; 1486 int syserr; 1487 1488 netif_vdbg(efx, intr, efx->net_dev, 1489 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1490 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1491 1492 /* Handle non-event-queue sources */ 1493 if (channel->channel == efx->irq_level) { 1494 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1495 if (unlikely(syserr)) 1496 return efx_nic_fatal_interrupt(efx); 1497 efx->last_irq_cpu = raw_smp_processor_id(); 1498 } 1499 1500 /* Schedule processing of the channel */ 1501 efx_schedule_channel_irq(channel); 1502 1503 return IRQ_HANDLED; 1504 } 1505 1506 1507 /* Setup RSS indirection table. 1508 * This maps from the hash value of the packet to RXQ 1509 */ 1510 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1511 { 1512 size_t i = 0; 1513 efx_dword_t dword; 1514 1515 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1516 return; 1517 1518 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1519 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1520 1521 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1522 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1523 efx->rx_indir_table[i]); 1524 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1525 } 1526 } 1527 1528 /* Hook interrupt handler(s) 1529 * Try MSI and then legacy interrupts. 1530 */ 1531 int efx_nic_init_interrupt(struct efx_nic *efx) 1532 { 1533 struct efx_channel *channel; 1534 int rc; 1535 1536 if (!EFX_INT_MODE_USE_MSI(efx)) { 1537 irq_handler_t handler; 1538 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1539 handler = efx_legacy_interrupt; 1540 else 1541 handler = falcon_legacy_interrupt_a1; 1542 1543 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1544 efx->name, efx); 1545 if (rc) { 1546 netif_err(efx, drv, efx->net_dev, 1547 "failed to hook legacy IRQ %d\n", 1548 efx->pci_dev->irq); 1549 goto fail1; 1550 } 1551 return 0; 1552 } 1553 1554 /* Hook MSI or MSI-X interrupt */ 1555 efx_for_each_channel(channel, efx) { 1556 rc = request_irq(channel->irq, efx_msi_interrupt, 1557 IRQF_PROBE_SHARED, /* Not shared */ 1558 efx->channel_name[channel->channel], 1559 &efx->channel[channel->channel]); 1560 if (rc) { 1561 netif_err(efx, drv, efx->net_dev, 1562 "failed to hook IRQ %d\n", channel->irq); 1563 goto fail2; 1564 } 1565 } 1566 1567 return 0; 1568 1569 fail2: 1570 efx_for_each_channel(channel, efx) 1571 free_irq(channel->irq, &efx->channel[channel->channel]); 1572 fail1: 1573 return rc; 1574 } 1575 1576 void efx_nic_fini_interrupt(struct efx_nic *efx) 1577 { 1578 struct efx_channel *channel; 1579 efx_oword_t reg; 1580 1581 /* Disable MSI/MSI-X interrupts */ 1582 efx_for_each_channel(channel, efx) { 1583 if (channel->irq) 1584 free_irq(channel->irq, &efx->channel[channel->channel]); 1585 } 1586 1587 /* ACK legacy interrupt */ 1588 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1589 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1590 else 1591 falcon_irq_ack_a1(efx); 1592 1593 /* Disable legacy interrupt */ 1594 if (efx->legacy_irq) 1595 free_irq(efx->legacy_irq, efx); 1596 } 1597 1598 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1599 { 1600 efx_oword_t altera_build; 1601 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1602 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1603 } 1604 1605 void efx_nic_init_common(struct efx_nic *efx) 1606 { 1607 efx_oword_t temp; 1608 1609 /* Set positions of descriptor caches in SRAM. */ 1610 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 1611 efx->type->tx_dc_base / 8); 1612 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1613 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, 1614 efx->type->rx_dc_base / 8); 1615 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1616 1617 /* Set TX descriptor cache size. */ 1618 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1619 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1620 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1621 1622 /* Set RX descriptor cache size. Set low watermark to size-8, as 1623 * this allows most efficient prefetching. 1624 */ 1625 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1626 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1627 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1628 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1629 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1630 1631 /* Program INT_KER address */ 1632 EFX_POPULATE_OWORD_2(temp, 1633 FRF_AZ_NORM_INT_VEC_DIS_KER, 1634 EFX_INT_MODE_USE_MSI(efx), 1635 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1636 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1637 1638 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1639 /* Use an interrupt level unused by event queues */ 1640 efx->irq_level = 0x1f; 1641 else 1642 /* Use a valid MSI-X vector */ 1643 efx->irq_level = 0; 1644 1645 /* Enable all the genuinely fatal interrupts. (They are still 1646 * masked by the overall interrupt mask, controlled by 1647 * falcon_interrupts()). 1648 * 1649 * Note: All other fatal interrupts are enabled 1650 */ 1651 EFX_POPULATE_OWORD_3(temp, 1652 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1653 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1654 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1655 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1656 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1657 EFX_INVERT_OWORD(temp); 1658 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1659 1660 efx_nic_push_rx_indir_table(efx); 1661 1662 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1663 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1664 */ 1665 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1666 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1667 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1668 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1669 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1671 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1673 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1674 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1675 /* Disable hardware watchdog which can misfire */ 1676 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1677 /* Squash TX of packets of 16 bytes or less */ 1678 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1679 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1680 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1681 1682 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1683 EFX_POPULATE_OWORD_4(temp, 1684 /* Default values */ 1685 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1686 FRF_BZ_TX_PACE_SB_AF, 0xb, 1687 FRF_BZ_TX_PACE_FB_BASE, 0, 1688 /* Allow large pace values in the 1689 * fast bin. */ 1690 FRF_BZ_TX_PACE_BIN_TH, 1691 FFE_BZ_TX_PACE_RESERVED); 1692 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1693 } 1694 } 1695 1696 /* Register dump */ 1697 1698 #define REGISTER_REVISION_A 1 1699 #define REGISTER_REVISION_B 2 1700 #define REGISTER_REVISION_C 3 1701 #define REGISTER_REVISION_Z 3 /* latest revision */ 1702 1703 struct efx_nic_reg { 1704 u32 offset:24; 1705 u32 min_revision:2, max_revision:2; 1706 }; 1707 1708 #define REGISTER(name, min_rev, max_rev) { \ 1709 FR_ ## min_rev ## max_rev ## _ ## name, \ 1710 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1711 } 1712 #define REGISTER_AA(name) REGISTER(name, A, A) 1713 #define REGISTER_AB(name) REGISTER(name, A, B) 1714 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1715 #define REGISTER_BB(name) REGISTER(name, B, B) 1716 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1717 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1718 1719 static const struct efx_nic_reg efx_nic_regs[] = { 1720 REGISTER_AZ(ADR_REGION), 1721 REGISTER_AZ(INT_EN_KER), 1722 REGISTER_BZ(INT_EN_CHAR), 1723 REGISTER_AZ(INT_ADR_KER), 1724 REGISTER_BZ(INT_ADR_CHAR), 1725 /* INT_ACK_KER is WO */ 1726 /* INT_ISR0 is RC */ 1727 REGISTER_AZ(HW_INIT), 1728 REGISTER_CZ(USR_EV_CFG), 1729 REGISTER_AB(EE_SPI_HCMD), 1730 REGISTER_AB(EE_SPI_HADR), 1731 REGISTER_AB(EE_SPI_HDATA), 1732 REGISTER_AB(EE_BASE_PAGE), 1733 REGISTER_AB(EE_VPD_CFG0), 1734 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1735 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1736 /* PCIE_CORE_INDIRECT is indirect */ 1737 REGISTER_AB(NIC_STAT), 1738 REGISTER_AB(GPIO_CTL), 1739 REGISTER_AB(GLB_CTL), 1740 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1741 REGISTER_BZ(DP_CTRL), 1742 REGISTER_AZ(MEM_STAT), 1743 REGISTER_AZ(CS_DEBUG), 1744 REGISTER_AZ(ALTERA_BUILD), 1745 REGISTER_AZ(CSR_SPARE), 1746 REGISTER_AB(PCIE_SD_CTL0123), 1747 REGISTER_AB(PCIE_SD_CTL45), 1748 REGISTER_AB(PCIE_PCS_CTL_STAT), 1749 /* DEBUG_DATA_OUT is not used */ 1750 /* DRV_EV is WO */ 1751 REGISTER_AZ(EVQ_CTL), 1752 REGISTER_AZ(EVQ_CNT1), 1753 REGISTER_AZ(EVQ_CNT2), 1754 REGISTER_AZ(BUF_TBL_CFG), 1755 REGISTER_AZ(SRM_RX_DC_CFG), 1756 REGISTER_AZ(SRM_TX_DC_CFG), 1757 REGISTER_AZ(SRM_CFG), 1758 /* BUF_TBL_UPD is WO */ 1759 REGISTER_AZ(SRM_UPD_EVQ), 1760 REGISTER_AZ(SRAM_PARITY), 1761 REGISTER_AZ(RX_CFG), 1762 REGISTER_BZ(RX_FILTER_CTL), 1763 /* RX_FLUSH_DESCQ is WO */ 1764 REGISTER_AZ(RX_DC_CFG), 1765 REGISTER_AZ(RX_DC_PF_WM), 1766 REGISTER_BZ(RX_RSS_TKEY), 1767 /* RX_NODESC_DROP is RC */ 1768 REGISTER_AA(RX_SELF_RST), 1769 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1770 REGISTER_CZ(RX_RSS_IPV6_REG1), 1771 REGISTER_CZ(RX_RSS_IPV6_REG2), 1772 REGISTER_CZ(RX_RSS_IPV6_REG3), 1773 /* TX_FLUSH_DESCQ is WO */ 1774 REGISTER_AZ(TX_DC_CFG), 1775 REGISTER_AA(TX_CHKSM_CFG), 1776 REGISTER_AZ(TX_CFG), 1777 /* TX_PUSH_DROP is not used */ 1778 REGISTER_AZ(TX_RESERVED), 1779 REGISTER_BZ(TX_PACE), 1780 /* TX_PACE_DROP_QID is RC */ 1781 REGISTER_BB(TX_VLAN), 1782 REGISTER_BZ(TX_IPFIL_PORTEN), 1783 REGISTER_AB(MD_TXD), 1784 REGISTER_AB(MD_RXD), 1785 REGISTER_AB(MD_CS), 1786 REGISTER_AB(MD_PHY_ADR), 1787 REGISTER_AB(MD_ID), 1788 /* MD_STAT is RC */ 1789 REGISTER_AB(MAC_STAT_DMA), 1790 REGISTER_AB(MAC_CTRL), 1791 REGISTER_BB(GEN_MODE), 1792 REGISTER_AB(MAC_MC_HASH_REG0), 1793 REGISTER_AB(MAC_MC_HASH_REG1), 1794 REGISTER_AB(GM_CFG1), 1795 REGISTER_AB(GM_CFG2), 1796 /* GM_IPG and GM_HD are not used */ 1797 REGISTER_AB(GM_MAX_FLEN), 1798 /* GM_TEST is not used */ 1799 REGISTER_AB(GM_ADR1), 1800 REGISTER_AB(GM_ADR2), 1801 REGISTER_AB(GMF_CFG0), 1802 REGISTER_AB(GMF_CFG1), 1803 REGISTER_AB(GMF_CFG2), 1804 REGISTER_AB(GMF_CFG3), 1805 REGISTER_AB(GMF_CFG4), 1806 REGISTER_AB(GMF_CFG5), 1807 REGISTER_BB(TX_SRC_MAC_CTL), 1808 REGISTER_AB(XM_ADR_LO), 1809 REGISTER_AB(XM_ADR_HI), 1810 REGISTER_AB(XM_GLB_CFG), 1811 REGISTER_AB(XM_TX_CFG), 1812 REGISTER_AB(XM_RX_CFG), 1813 REGISTER_AB(XM_MGT_INT_MASK), 1814 REGISTER_AB(XM_FC), 1815 REGISTER_AB(XM_PAUSE_TIME), 1816 REGISTER_AB(XM_TX_PARAM), 1817 REGISTER_AB(XM_RX_PARAM), 1818 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1819 REGISTER_AB(XX_PWR_RST), 1820 REGISTER_AB(XX_SD_CTL), 1821 REGISTER_AB(XX_TXDRV_CTL), 1822 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1823 /* XX_CORE_STAT is partly RC */ 1824 }; 1825 1826 struct efx_nic_reg_table { 1827 u32 offset:24; 1828 u32 min_revision:2, max_revision:2; 1829 u32 step:6, rows:21; 1830 }; 1831 1832 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1833 offset, \ 1834 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1835 step, rows \ 1836 } 1837 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1838 REGISTER_TABLE_DIMENSIONS( \ 1839 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1840 min_rev, max_rev, \ 1841 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1842 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1843 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1844 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1845 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1846 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1847 #define REGISTER_TABLE_BB_CZ(name) \ 1848 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1849 FR_BZ_ ## name ## _STEP, \ 1850 FR_BB_ ## name ## _ROWS), \ 1851 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1852 FR_BZ_ ## name ## _STEP, \ 1853 FR_CZ_ ## name ## _ROWS) 1854 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1855 1856 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1857 /* DRIVER is not used */ 1858 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1859 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1860 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1861 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1862 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1863 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1864 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1865 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1866 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1867 /* We can't reasonably read all of the buffer table (up to 8MB!). 1868 * However this driver will only use a few entries. Reading 1869 * 1K entries allows for some expansion of queue count and 1870 * size before we need to change the version. */ 1871 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1872 A, A, 8, 1024), 1873 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1874 B, Z, 8, 1024), 1875 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1876 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1877 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1878 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1879 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1880 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1881 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1882 /* MSIX_PBA_TABLE is not mapped */ 1883 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1884 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 1885 }; 1886 1887 size_t efx_nic_get_regs_len(struct efx_nic *efx) 1888 { 1889 const struct efx_nic_reg *reg; 1890 const struct efx_nic_reg_table *table; 1891 size_t len = 0; 1892 1893 for (reg = efx_nic_regs; 1894 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1895 reg++) 1896 if (efx->type->revision >= reg->min_revision && 1897 efx->type->revision <= reg->max_revision) 1898 len += sizeof(efx_oword_t); 1899 1900 for (table = efx_nic_reg_tables; 1901 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1902 table++) 1903 if (efx->type->revision >= table->min_revision && 1904 efx->type->revision <= table->max_revision) 1905 len += table->rows * min_t(size_t, table->step, 16); 1906 1907 return len; 1908 } 1909 1910 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 1911 { 1912 const struct efx_nic_reg *reg; 1913 const struct efx_nic_reg_table *table; 1914 1915 for (reg = efx_nic_regs; 1916 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1917 reg++) { 1918 if (efx->type->revision >= reg->min_revision && 1919 efx->type->revision <= reg->max_revision) { 1920 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 1921 buf += sizeof(efx_oword_t); 1922 } 1923 } 1924 1925 for (table = efx_nic_reg_tables; 1926 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1927 table++) { 1928 size_t size, i; 1929 1930 if (!(efx->type->revision >= table->min_revision && 1931 efx->type->revision <= table->max_revision)) 1932 continue; 1933 1934 size = min_t(size_t, table->step, 16); 1935 1936 for (i = 0; i < table->rows; i++) { 1937 switch (table->step) { 1938 case 4: /* 32-bit register or SRAM */ 1939 efx_readd_table(efx, buf, table->offset, i); 1940 break; 1941 case 8: /* 64-bit SRAM */ 1942 efx_sram_readq(efx, 1943 efx->membase + table->offset, 1944 buf, i); 1945 break; 1946 case 16: /* 128-bit register */ 1947 efx_reado_table(efx, buf, table->offset, i); 1948 break; 1949 case 32: /* 128-bit register, interleaved */ 1950 efx_reado_table(efx, buf, table->offset, 2 * i); 1951 break; 1952 default: 1953 WARN_ON(1); 1954 return; 1955 } 1956 buf += size; 1957 } 1958 } 1959 } 1960