1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times 53 */ 54 #define EFX_FLUSH_INTERVAL 10 55 #define EFX_FLUSH_POLL_COUNT 100 56 57 /* Depth of RX flush request fifo */ 58 #define EFX_RX_FLUSH_COUNT 4 59 60 /* Driver generated events */ 61 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 62 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 63 64 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 65 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 66 67 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 68 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 69 #define EFX_CHANNEL_MAGIC_FILL(_channel) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, (_channel)->channel) 71 72 /************************************************************************** 73 * 74 * Solarstorm hardware access 75 * 76 **************************************************************************/ 77 78 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 79 unsigned int index) 80 { 81 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 82 value, index); 83 } 84 85 /* Read the current event from the event queue */ 86 static inline efx_qword_t *efx_event(struct efx_channel *channel, 87 unsigned int index) 88 { 89 return ((efx_qword_t *) (channel->eventq.addr)) + 90 (index & channel->eventq_mask); 91 } 92 93 /* See if an event is present 94 * 95 * We check both the high and low dword of the event for all ones. We 96 * wrote all ones when we cleared the event, and no valid event can 97 * have all ones in either its high or low dwords. This approach is 98 * robust against reordering. 99 * 100 * Note that using a single 64-bit comparison is incorrect; even 101 * though the CPU read will be atomic, the DMA write may not be. 102 */ 103 static inline int efx_event_present(efx_qword_t *event) 104 { 105 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 106 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 107 } 108 109 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 110 const efx_oword_t *mask) 111 { 112 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 113 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 114 } 115 116 int efx_nic_test_registers(struct efx_nic *efx, 117 const struct efx_nic_register_test *regs, 118 size_t n_regs) 119 { 120 unsigned address = 0, i, j; 121 efx_oword_t mask, imask, original, reg, buf; 122 123 /* Falcon should be in loopback to isolate the XMAC from the PHY */ 124 WARN_ON(!LOOPBACK_INTERNAL(efx)); 125 126 for (i = 0; i < n_regs; ++i) { 127 address = regs[i].address; 128 mask = imask = regs[i].mask; 129 EFX_INVERT_OWORD(imask); 130 131 efx_reado(efx, &original, address); 132 133 /* bit sweep on and off */ 134 for (j = 0; j < 128; j++) { 135 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 136 continue; 137 138 /* Test this testable bit can be set in isolation */ 139 EFX_AND_OWORD(reg, original, mask); 140 EFX_SET_OWORD32(reg, j, j, 1); 141 142 efx_writeo(efx, ®, address); 143 efx_reado(efx, &buf, address); 144 145 if (efx_masked_compare_oword(®, &buf, &mask)) 146 goto fail; 147 148 /* Test this testable bit can be cleared in isolation */ 149 EFX_OR_OWORD(reg, original, mask); 150 EFX_SET_OWORD32(reg, j, j, 0); 151 152 efx_writeo(efx, ®, address); 153 efx_reado(efx, &buf, address); 154 155 if (efx_masked_compare_oword(®, &buf, &mask)) 156 goto fail; 157 } 158 159 efx_writeo(efx, &original, address); 160 } 161 162 return 0; 163 164 fail: 165 netif_err(efx, hw, efx->net_dev, 166 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 167 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 168 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 169 return -EIO; 170 } 171 172 /************************************************************************** 173 * 174 * Special buffer handling 175 * Special buffers are used for event queues and the TX and RX 176 * descriptor rings. 177 * 178 *************************************************************************/ 179 180 /* 181 * Initialise a special buffer 182 * 183 * This will define a buffer (previously allocated via 184 * efx_alloc_special_buffer()) in the buffer table, allowing 185 * it to be used for event queues, descriptor rings etc. 186 */ 187 static void 188 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 189 { 190 efx_qword_t buf_desc; 191 int index; 192 dma_addr_t dma_addr; 193 int i; 194 195 EFX_BUG_ON_PARANOID(!buffer->addr); 196 197 /* Write buffer descriptors to NIC */ 198 for (i = 0; i < buffer->entries; i++) { 199 index = buffer->index + i; 200 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 201 netif_dbg(efx, probe, efx->net_dev, 202 "mapping special buffer %d at %llx\n", 203 index, (unsigned long long)dma_addr); 204 EFX_POPULATE_QWORD_3(buf_desc, 205 FRF_AZ_BUF_ADR_REGION, 0, 206 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 207 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 208 efx_write_buf_tbl(efx, &buf_desc, index); 209 } 210 } 211 212 /* Unmaps a buffer and clears the buffer table entries */ 213 static void 214 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 215 { 216 efx_oword_t buf_tbl_upd; 217 unsigned int start = buffer->index; 218 unsigned int end = (buffer->index + buffer->entries - 1); 219 220 if (!buffer->entries) 221 return; 222 223 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 224 buffer->index, buffer->index + buffer->entries - 1); 225 226 EFX_POPULATE_OWORD_4(buf_tbl_upd, 227 FRF_AZ_BUF_UPD_CMD, 0, 228 FRF_AZ_BUF_CLR_CMD, 1, 229 FRF_AZ_BUF_CLR_END_ID, end, 230 FRF_AZ_BUF_CLR_START_ID, start); 231 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 232 } 233 234 /* 235 * Allocate a new special buffer 236 * 237 * This allocates memory for a new buffer, clears it and allocates a 238 * new buffer ID range. It does not write into the buffer table. 239 * 240 * This call will allocate 4KB buffers, since 8KB buffers can't be 241 * used for event queues and descriptor rings. 242 */ 243 static int efx_alloc_special_buffer(struct efx_nic *efx, 244 struct efx_special_buffer *buffer, 245 unsigned int len) 246 { 247 len = ALIGN(len, EFX_BUF_SIZE); 248 249 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 250 &buffer->dma_addr, GFP_KERNEL); 251 if (!buffer->addr) 252 return -ENOMEM; 253 buffer->len = len; 254 buffer->entries = len / EFX_BUF_SIZE; 255 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 256 257 /* All zeros is a potentially valid event so memset to 0xff */ 258 memset(buffer->addr, 0xff, len); 259 260 /* Select new buffer ID */ 261 buffer->index = efx->next_buffer_table; 262 efx->next_buffer_table += buffer->entries; 263 264 netif_dbg(efx, probe, efx->net_dev, 265 "allocating special buffers %d-%d at %llx+%x " 266 "(virt %p phys %llx)\n", buffer->index, 267 buffer->index + buffer->entries - 1, 268 (u64)buffer->dma_addr, len, 269 buffer->addr, (u64)virt_to_phys(buffer->addr)); 270 271 return 0; 272 } 273 274 static void 275 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 276 { 277 if (!buffer->addr) 278 return; 279 280 netif_dbg(efx, hw, efx->net_dev, 281 "deallocating special buffers %d-%d at %llx+%x " 282 "(virt %p phys %llx)\n", buffer->index, 283 buffer->index + buffer->entries - 1, 284 (u64)buffer->dma_addr, buffer->len, 285 buffer->addr, (u64)virt_to_phys(buffer->addr)); 286 287 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 288 buffer->dma_addr); 289 buffer->addr = NULL; 290 buffer->entries = 0; 291 } 292 293 /************************************************************************** 294 * 295 * Generic buffer handling 296 * These buffers are used for interrupt status and MAC stats 297 * 298 **************************************************************************/ 299 300 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 301 unsigned int len) 302 { 303 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 304 &buffer->dma_addr); 305 if (!buffer->addr) 306 return -ENOMEM; 307 buffer->len = len; 308 memset(buffer->addr, 0, len); 309 return 0; 310 } 311 312 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 313 { 314 if (buffer->addr) { 315 pci_free_consistent(efx->pci_dev, buffer->len, 316 buffer->addr, buffer->dma_addr); 317 buffer->addr = NULL; 318 } 319 } 320 321 /************************************************************************** 322 * 323 * TX path 324 * 325 **************************************************************************/ 326 327 /* Returns a pointer to the specified transmit descriptor in the TX 328 * descriptor queue belonging to the specified channel. 329 */ 330 static inline efx_qword_t * 331 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 332 { 333 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 334 } 335 336 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 337 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 338 { 339 unsigned write_ptr; 340 efx_dword_t reg; 341 342 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 343 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 344 efx_writed_page(tx_queue->efx, ®, 345 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 346 } 347 348 /* Write pointer and first descriptor for TX descriptor ring */ 349 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 350 const efx_qword_t *txd) 351 { 352 unsigned write_ptr; 353 efx_oword_t reg; 354 355 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 356 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 357 358 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 359 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 360 FRF_AZ_TX_DESC_WPTR, write_ptr); 361 reg.qword[0] = *txd; 362 efx_writeo_page(tx_queue->efx, ®, 363 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 364 } 365 366 static inline bool 367 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 368 { 369 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 370 371 if (empty_read_count == 0) 372 return false; 373 374 tx_queue->empty_read_count = 0; 375 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 376 } 377 378 /* For each entry inserted into the software descriptor ring, create a 379 * descriptor in the hardware TX descriptor ring (in host memory), and 380 * write a doorbell. 381 */ 382 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 383 { 384 385 struct efx_tx_buffer *buffer; 386 efx_qword_t *txd; 387 unsigned write_ptr; 388 unsigned old_write_count = tx_queue->write_count; 389 390 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 391 392 do { 393 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 394 buffer = &tx_queue->buffer[write_ptr]; 395 txd = efx_tx_desc(tx_queue, write_ptr); 396 ++tx_queue->write_count; 397 398 /* Create TX descriptor ring entry */ 399 EFX_POPULATE_QWORD_4(*txd, 400 FSF_AZ_TX_KER_CONT, buffer->continuation, 401 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 402 FSF_AZ_TX_KER_BUF_REGION, 0, 403 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 404 } while (tx_queue->write_count != tx_queue->insert_count); 405 406 wmb(); /* Ensure descriptors are written before they are fetched */ 407 408 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 409 txd = efx_tx_desc(tx_queue, 410 old_write_count & tx_queue->ptr_mask); 411 efx_push_tx_desc(tx_queue, txd); 412 ++tx_queue->pushes; 413 } else { 414 efx_notify_tx_desc(tx_queue); 415 } 416 } 417 418 /* Allocate hardware resources for a TX queue */ 419 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 420 { 421 struct efx_nic *efx = tx_queue->efx; 422 unsigned entries; 423 424 entries = tx_queue->ptr_mask + 1; 425 return efx_alloc_special_buffer(efx, &tx_queue->txd, 426 entries * sizeof(efx_qword_t)); 427 } 428 429 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 430 { 431 struct efx_nic *efx = tx_queue->efx; 432 efx_oword_t reg; 433 434 tx_queue->flushed = FLUSH_NONE; 435 436 /* Pin TX descriptor ring */ 437 efx_init_special_buffer(efx, &tx_queue->txd); 438 439 /* Push TX descriptor ring to card */ 440 EFX_POPULATE_OWORD_10(reg, 441 FRF_AZ_TX_DESCQ_EN, 1, 442 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 443 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 444 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 445 FRF_AZ_TX_DESCQ_EVQ_ID, 446 tx_queue->channel->channel, 447 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 448 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 449 FRF_AZ_TX_DESCQ_SIZE, 450 __ffs(tx_queue->txd.entries), 451 FRF_AZ_TX_DESCQ_TYPE, 0, 452 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 453 454 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 455 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 456 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 457 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 458 !csum); 459 } 460 461 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 462 tx_queue->queue); 463 464 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 465 /* Only 128 bits in this register */ 466 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 467 468 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 469 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 470 clear_bit_le(tx_queue->queue, (void *)®); 471 else 472 set_bit_le(tx_queue->queue, (void *)®); 473 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 474 } 475 476 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 477 EFX_POPULATE_OWORD_1(reg, 478 FRF_BZ_TX_PACE, 479 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 480 FFE_BZ_TX_PACE_OFF : 481 FFE_BZ_TX_PACE_RESERVED); 482 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 483 tx_queue->queue); 484 } 485 } 486 487 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 488 { 489 struct efx_nic *efx = tx_queue->efx; 490 efx_oword_t tx_flush_descq; 491 492 tx_queue->flushed = FLUSH_PENDING; 493 494 /* Post a flush command */ 495 EFX_POPULATE_OWORD_2(tx_flush_descq, 496 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 497 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 498 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 499 } 500 501 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 502 { 503 struct efx_nic *efx = tx_queue->efx; 504 efx_oword_t tx_desc_ptr; 505 506 /* The queue should have been flushed */ 507 WARN_ON(tx_queue->flushed != FLUSH_DONE); 508 509 /* Remove TX descriptor ring from card */ 510 EFX_ZERO_OWORD(tx_desc_ptr); 511 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 512 tx_queue->queue); 513 514 /* Unpin TX descriptor ring */ 515 efx_fini_special_buffer(efx, &tx_queue->txd); 516 } 517 518 /* Free buffers backing TX queue */ 519 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 520 { 521 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 522 } 523 524 /************************************************************************** 525 * 526 * RX path 527 * 528 **************************************************************************/ 529 530 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 531 static inline efx_qword_t * 532 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 533 { 534 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 535 } 536 537 /* This creates an entry in the RX descriptor queue */ 538 static inline void 539 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 540 { 541 struct efx_rx_buffer *rx_buf; 542 efx_qword_t *rxd; 543 544 rxd = efx_rx_desc(rx_queue, index); 545 rx_buf = efx_rx_buffer(rx_queue, index); 546 EFX_POPULATE_QWORD_3(*rxd, 547 FSF_AZ_RX_KER_BUF_SIZE, 548 rx_buf->len - 549 rx_queue->efx->type->rx_buffer_padding, 550 FSF_AZ_RX_KER_BUF_REGION, 0, 551 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 552 } 553 554 /* This writes to the RX_DESC_WPTR register for the specified receive 555 * descriptor ring. 556 */ 557 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 558 { 559 struct efx_nic *efx = rx_queue->efx; 560 efx_dword_t reg; 561 unsigned write_ptr; 562 563 while (rx_queue->notified_count != rx_queue->added_count) { 564 efx_build_rx_desc( 565 rx_queue, 566 rx_queue->notified_count & rx_queue->ptr_mask); 567 ++rx_queue->notified_count; 568 } 569 570 wmb(); 571 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 572 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 573 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 574 efx_rx_queue_index(rx_queue)); 575 } 576 577 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 578 { 579 struct efx_nic *efx = rx_queue->efx; 580 unsigned entries; 581 582 entries = rx_queue->ptr_mask + 1; 583 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 584 entries * sizeof(efx_qword_t)); 585 } 586 587 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 588 { 589 efx_oword_t rx_desc_ptr; 590 struct efx_nic *efx = rx_queue->efx; 591 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 592 bool iscsi_digest_en = is_b0; 593 594 netif_dbg(efx, hw, efx->net_dev, 595 "RX queue %d ring in special buffers %d-%d\n", 596 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 597 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 598 599 rx_queue->flushed = FLUSH_NONE; 600 601 /* Pin RX descriptor ring */ 602 efx_init_special_buffer(efx, &rx_queue->rxd); 603 604 /* Push RX descriptor ring to card */ 605 EFX_POPULATE_OWORD_10(rx_desc_ptr, 606 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 607 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 608 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 609 FRF_AZ_RX_DESCQ_EVQ_ID, 610 efx_rx_queue_channel(rx_queue)->channel, 611 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 612 FRF_AZ_RX_DESCQ_LABEL, 613 efx_rx_queue_index(rx_queue), 614 FRF_AZ_RX_DESCQ_SIZE, 615 __ffs(rx_queue->rxd.entries), 616 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 617 /* For >=B0 this is scatter so disable */ 618 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 619 FRF_AZ_RX_DESCQ_EN, 1); 620 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 621 efx_rx_queue_index(rx_queue)); 622 } 623 624 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 625 { 626 struct efx_nic *efx = rx_queue->efx; 627 efx_oword_t rx_flush_descq; 628 629 rx_queue->flushed = FLUSH_PENDING; 630 631 /* Post a flush command */ 632 EFX_POPULATE_OWORD_2(rx_flush_descq, 633 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 634 FRF_AZ_RX_FLUSH_DESCQ, 635 efx_rx_queue_index(rx_queue)); 636 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 637 } 638 639 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 640 { 641 efx_oword_t rx_desc_ptr; 642 struct efx_nic *efx = rx_queue->efx; 643 644 /* The queue should already have been flushed */ 645 WARN_ON(rx_queue->flushed != FLUSH_DONE); 646 647 /* Remove RX descriptor ring from card */ 648 EFX_ZERO_OWORD(rx_desc_ptr); 649 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 650 efx_rx_queue_index(rx_queue)); 651 652 /* Unpin RX descriptor ring */ 653 efx_fini_special_buffer(efx, &rx_queue->rxd); 654 } 655 656 /* Free buffers backing RX queue */ 657 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 658 { 659 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 660 } 661 662 /************************************************************************** 663 * 664 * Event queue processing 665 * Event queues are processed by per-channel tasklets. 666 * 667 **************************************************************************/ 668 669 /* Update a channel's event queue's read pointer (RPTR) register 670 * 671 * This writes the EVQ_RPTR_REG register for the specified channel's 672 * event queue. 673 */ 674 void efx_nic_eventq_read_ack(struct efx_channel *channel) 675 { 676 efx_dword_t reg; 677 struct efx_nic *efx = channel->efx; 678 679 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 680 channel->eventq_read_ptr & channel->eventq_mask); 681 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, 682 channel->channel); 683 } 684 685 /* Use HW to insert a SW defined event */ 686 static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) 687 { 688 efx_oword_t drv_ev_reg; 689 690 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 691 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 692 drv_ev_reg.u32[0] = event->u32[0]; 693 drv_ev_reg.u32[1] = event->u32[1]; 694 drv_ev_reg.u32[2] = 0; 695 drv_ev_reg.u32[3] = 0; 696 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); 697 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); 698 } 699 700 static void efx_magic_event(struct efx_channel *channel, u32 magic) 701 { 702 efx_qword_t event; 703 704 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 705 FSE_AZ_EV_CODE_DRV_GEN_EV, 706 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 707 efx_generate_event(channel, &event); 708 } 709 710 /* Handle a transmit completion event 711 * 712 * The NIC batches TX completion events; the message we receive is of 713 * the form "complete all TX events up to this index". 714 */ 715 static int 716 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 717 { 718 unsigned int tx_ev_desc_ptr; 719 unsigned int tx_ev_q_label; 720 struct efx_tx_queue *tx_queue; 721 struct efx_nic *efx = channel->efx; 722 int tx_packets = 0; 723 724 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 725 /* Transmit completion */ 726 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 727 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 728 tx_queue = efx_channel_get_tx_queue( 729 channel, tx_ev_q_label % EFX_TXQ_TYPES); 730 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 731 tx_queue->ptr_mask); 732 channel->irq_mod_score += tx_packets; 733 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 734 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 735 /* Rewrite the FIFO write pointer */ 736 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 737 tx_queue = efx_channel_get_tx_queue( 738 channel, tx_ev_q_label % EFX_TXQ_TYPES); 739 740 netif_tx_lock(efx->net_dev); 741 efx_notify_tx_desc(tx_queue); 742 netif_tx_unlock(efx->net_dev); 743 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 744 EFX_WORKAROUND_10727(efx)) { 745 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 746 } else { 747 netif_err(efx, tx_err, efx->net_dev, 748 "channel %d unexpected TX event " 749 EFX_QWORD_FMT"\n", channel->channel, 750 EFX_QWORD_VAL(*event)); 751 } 752 753 return tx_packets; 754 } 755 756 /* Detect errors included in the rx_evt_pkt_ok bit. */ 757 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 758 const efx_qword_t *event) 759 { 760 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 761 struct efx_nic *efx = rx_queue->efx; 762 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 763 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 764 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 765 bool rx_ev_other_err, rx_ev_pause_frm; 766 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 767 unsigned rx_ev_pkt_type; 768 769 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 770 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 771 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 772 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 773 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 774 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 775 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 776 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 777 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 778 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 779 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 780 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 781 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 782 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 783 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 784 785 /* Every error apart from tobe_disc and pause_frm */ 786 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 787 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 788 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 789 790 /* Count errors that are not in MAC stats. Ignore expected 791 * checksum errors during self-test. */ 792 if (rx_ev_frm_trunc) 793 ++channel->n_rx_frm_trunc; 794 else if (rx_ev_tobe_disc) 795 ++channel->n_rx_tobe_disc; 796 else if (!efx->loopback_selftest) { 797 if (rx_ev_ip_hdr_chksum_err) 798 ++channel->n_rx_ip_hdr_chksum_err; 799 else if (rx_ev_tcp_udp_chksum_err) 800 ++channel->n_rx_tcp_udp_chksum_err; 801 } 802 803 /* TOBE_DISC is expected on unicast mismatches; don't print out an 804 * error message. FRM_TRUNC indicates RXDP dropped the packet due 805 * to a FIFO overflow. 806 */ 807 #ifdef DEBUG 808 if (rx_ev_other_err && net_ratelimit()) { 809 netif_dbg(efx, rx_err, efx->net_dev, 810 " RX queue %d unexpected RX event " 811 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 812 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 813 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 814 rx_ev_ip_hdr_chksum_err ? 815 " [IP_HDR_CHKSUM_ERR]" : "", 816 rx_ev_tcp_udp_chksum_err ? 817 " [TCP_UDP_CHKSUM_ERR]" : "", 818 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 819 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 820 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 821 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 822 rx_ev_pause_frm ? " [PAUSE]" : ""); 823 } 824 #endif 825 826 /* The frame must be discarded if any of these are true. */ 827 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 828 rx_ev_tobe_disc | rx_ev_pause_frm) ? 829 EFX_RX_PKT_DISCARD : 0; 830 } 831 832 /* Handle receive events that are not in-order. */ 833 static void 834 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 835 { 836 struct efx_nic *efx = rx_queue->efx; 837 unsigned expected, dropped; 838 839 expected = rx_queue->removed_count & rx_queue->ptr_mask; 840 dropped = (index - expected) & rx_queue->ptr_mask; 841 netif_info(efx, rx_err, efx->net_dev, 842 "dropped %d events (index=%d expected=%d)\n", 843 dropped, index, expected); 844 845 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 846 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 847 } 848 849 /* Handle a packet received event 850 * 851 * The NIC gives a "discard" flag if it's a unicast packet with the 852 * wrong destination address 853 * Also "is multicast" and "matches multicast filter" flags can be used to 854 * discard non-matching multicast packets. 855 */ 856 static void 857 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 858 { 859 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 860 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 861 unsigned expected_ptr; 862 bool rx_ev_pkt_ok; 863 u16 flags; 864 struct efx_rx_queue *rx_queue; 865 866 /* Basic packet information */ 867 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 868 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 869 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 870 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 871 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 872 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 873 channel->channel); 874 875 rx_queue = efx_channel_get_rx_queue(channel); 876 877 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 878 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 879 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 880 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 881 882 if (likely(rx_ev_pkt_ok)) { 883 /* If packet is marked as OK and packet type is TCP/IP or 884 * UDP/IP, then we can rely on the hardware checksum. 885 */ 886 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 887 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 888 EFX_RX_PKT_CSUMMED : 0; 889 } else { 890 flags = efx_handle_rx_not_ok(rx_queue, event); 891 } 892 893 /* Detect multicast packets that didn't match the filter */ 894 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 895 if (rx_ev_mcast_pkt) { 896 unsigned int rx_ev_mcast_hash_match = 897 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 898 899 if (unlikely(!rx_ev_mcast_hash_match)) { 900 ++channel->n_rx_mcast_mismatch; 901 flags |= EFX_RX_PKT_DISCARD; 902 } 903 } 904 905 channel->irq_mod_score += 2; 906 907 /* Handle received packet */ 908 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 909 } 910 911 static void 912 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 913 { 914 struct efx_nic *efx = channel->efx; 915 unsigned magic; 916 917 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 918 919 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) 920 ; /* ignore */ 921 else if (magic == EFX_CHANNEL_MAGIC_FILL(channel)) 922 /* The queue must be empty, so we won't receive any rx 923 * events, so efx_process_channel() won't refill the 924 * queue. Refill it here */ 925 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); 926 else 927 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 928 "generated event "EFX_QWORD_FMT"\n", 929 channel->channel, EFX_QWORD_VAL(*event)); 930 } 931 932 static void 933 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 934 { 935 struct efx_nic *efx = channel->efx; 936 unsigned int ev_sub_code; 937 unsigned int ev_sub_data; 938 939 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 940 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 941 942 switch (ev_sub_code) { 943 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 944 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 945 channel->channel, ev_sub_data); 946 break; 947 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 948 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 949 channel->channel, ev_sub_data); 950 break; 951 case FSE_AZ_EVQ_INIT_DONE_EV: 952 netif_dbg(efx, hw, efx->net_dev, 953 "channel %d EVQ %d initialised\n", 954 channel->channel, ev_sub_data); 955 break; 956 case FSE_AZ_SRM_UPD_DONE_EV: 957 netif_vdbg(efx, hw, efx->net_dev, 958 "channel %d SRAM update done\n", channel->channel); 959 break; 960 case FSE_AZ_WAKE_UP_EV: 961 netif_vdbg(efx, hw, efx->net_dev, 962 "channel %d RXQ %d wakeup event\n", 963 channel->channel, ev_sub_data); 964 break; 965 case FSE_AZ_TIMER_EV: 966 netif_vdbg(efx, hw, efx->net_dev, 967 "channel %d RX queue %d timer expired\n", 968 channel->channel, ev_sub_data); 969 break; 970 case FSE_AA_RX_RECOVER_EV: 971 netif_err(efx, rx_err, efx->net_dev, 972 "channel %d seen DRIVER RX_RESET event. " 973 "Resetting.\n", channel->channel); 974 atomic_inc(&efx->rx_reset); 975 efx_schedule_reset(efx, 976 EFX_WORKAROUND_6555(efx) ? 977 RESET_TYPE_RX_RECOVERY : 978 RESET_TYPE_DISABLE); 979 break; 980 case FSE_BZ_RX_DSC_ERROR_EV: 981 netif_err(efx, rx_err, efx->net_dev, 982 "RX DMA Q %d reports descriptor fetch error." 983 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 984 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 985 break; 986 case FSE_BZ_TX_DSC_ERROR_EV: 987 netif_err(efx, tx_err, efx->net_dev, 988 "TX DMA Q %d reports descriptor fetch error." 989 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 990 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 991 break; 992 default: 993 netif_vdbg(efx, hw, efx->net_dev, 994 "channel %d unknown driver event code %d " 995 "data %04x\n", channel->channel, ev_sub_code, 996 ev_sub_data); 997 break; 998 } 999 } 1000 1001 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1002 { 1003 struct efx_nic *efx = channel->efx; 1004 unsigned int read_ptr; 1005 efx_qword_t event, *p_event; 1006 int ev_code; 1007 int tx_packets = 0; 1008 int spent = 0; 1009 1010 read_ptr = channel->eventq_read_ptr; 1011 1012 for (;;) { 1013 p_event = efx_event(channel, read_ptr); 1014 event = *p_event; 1015 1016 if (!efx_event_present(&event)) 1017 /* End of events */ 1018 break; 1019 1020 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1021 "channel %d event is "EFX_QWORD_FMT"\n", 1022 channel->channel, EFX_QWORD_VAL(event)); 1023 1024 /* Clear this event by marking it all ones */ 1025 EFX_SET_QWORD(*p_event); 1026 1027 ++read_ptr; 1028 1029 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1030 1031 switch (ev_code) { 1032 case FSE_AZ_EV_CODE_RX_EV: 1033 efx_handle_rx_event(channel, &event); 1034 if (++spent == budget) 1035 goto out; 1036 break; 1037 case FSE_AZ_EV_CODE_TX_EV: 1038 tx_packets += efx_handle_tx_event(channel, &event); 1039 if (tx_packets > efx->txq_entries) { 1040 spent = budget; 1041 goto out; 1042 } 1043 break; 1044 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1045 efx_handle_generated_event(channel, &event); 1046 break; 1047 case FSE_AZ_EV_CODE_DRIVER_EV: 1048 efx_handle_driver_event(channel, &event); 1049 break; 1050 case FSE_CZ_EV_CODE_MCDI_EV: 1051 efx_mcdi_process_event(channel, &event); 1052 break; 1053 case FSE_AZ_EV_CODE_GLOBAL_EV: 1054 if (efx->type->handle_global_event && 1055 efx->type->handle_global_event(channel, &event)) 1056 break; 1057 /* else fall through */ 1058 default: 1059 netif_err(channel->efx, hw, channel->efx->net_dev, 1060 "channel %d unknown event type %d (data " 1061 EFX_QWORD_FMT ")\n", channel->channel, 1062 ev_code, EFX_QWORD_VAL(event)); 1063 } 1064 } 1065 1066 out: 1067 channel->eventq_read_ptr = read_ptr; 1068 return spent; 1069 } 1070 1071 /* Check whether an event is present in the eventq at the current 1072 * read pointer. Only useful for self-test. 1073 */ 1074 bool efx_nic_event_present(struct efx_channel *channel) 1075 { 1076 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1077 } 1078 1079 /* Allocate buffer table entries for event queue */ 1080 int efx_nic_probe_eventq(struct efx_channel *channel) 1081 { 1082 struct efx_nic *efx = channel->efx; 1083 unsigned entries; 1084 1085 entries = channel->eventq_mask + 1; 1086 return efx_alloc_special_buffer(efx, &channel->eventq, 1087 entries * sizeof(efx_qword_t)); 1088 } 1089 1090 void efx_nic_init_eventq(struct efx_channel *channel) 1091 { 1092 efx_oword_t reg; 1093 struct efx_nic *efx = channel->efx; 1094 1095 netif_dbg(efx, hw, efx->net_dev, 1096 "channel %d event queue in special buffers %d-%d\n", 1097 channel->channel, channel->eventq.index, 1098 channel->eventq.index + channel->eventq.entries - 1); 1099 1100 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1101 EFX_POPULATE_OWORD_3(reg, 1102 FRF_CZ_TIMER_Q_EN, 1, 1103 FRF_CZ_HOST_NOTIFY_MODE, 0, 1104 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1105 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1106 } 1107 1108 /* Pin event queue buffer */ 1109 efx_init_special_buffer(efx, &channel->eventq); 1110 1111 /* Fill event queue with all ones (i.e. empty events) */ 1112 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1113 1114 /* Push event queue to card */ 1115 EFX_POPULATE_OWORD_3(reg, 1116 FRF_AZ_EVQ_EN, 1, 1117 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1118 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1119 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1120 channel->channel); 1121 1122 efx->type->push_irq_moderation(channel); 1123 } 1124 1125 void efx_nic_fini_eventq(struct efx_channel *channel) 1126 { 1127 efx_oword_t reg; 1128 struct efx_nic *efx = channel->efx; 1129 1130 /* Remove event queue from card */ 1131 EFX_ZERO_OWORD(reg); 1132 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1133 channel->channel); 1134 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1135 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1136 1137 /* Unpin event queue */ 1138 efx_fini_special_buffer(efx, &channel->eventq); 1139 } 1140 1141 /* Free buffers backing event queue */ 1142 void efx_nic_remove_eventq(struct efx_channel *channel) 1143 { 1144 efx_free_special_buffer(channel->efx, &channel->eventq); 1145 } 1146 1147 1148 void efx_nic_generate_test_event(struct efx_channel *channel) 1149 { 1150 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1151 } 1152 1153 void efx_nic_generate_fill_event(struct efx_channel *channel) 1154 { 1155 efx_magic_event(channel, EFX_CHANNEL_MAGIC_FILL(channel)); 1156 } 1157 1158 /************************************************************************** 1159 * 1160 * Flush handling 1161 * 1162 **************************************************************************/ 1163 1164 1165 static void efx_poll_flush_events(struct efx_nic *efx) 1166 { 1167 struct efx_channel *channel = efx_get_channel(efx, 0); 1168 struct efx_tx_queue *tx_queue; 1169 struct efx_rx_queue *rx_queue; 1170 unsigned int read_ptr = channel->eventq_read_ptr; 1171 unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; 1172 1173 do { 1174 efx_qword_t *event = efx_event(channel, read_ptr); 1175 int ev_code, ev_sub_code, ev_queue; 1176 bool ev_failed; 1177 1178 if (!efx_event_present(event)) 1179 break; 1180 1181 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); 1182 ev_sub_code = EFX_QWORD_FIELD(*event, 1183 FSF_AZ_DRIVER_EV_SUBCODE); 1184 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1185 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1186 ev_queue = EFX_QWORD_FIELD(*event, 1187 FSF_AZ_DRIVER_EV_SUBDATA); 1188 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { 1189 tx_queue = efx_get_tx_queue( 1190 efx, ev_queue / EFX_TXQ_TYPES, 1191 ev_queue % EFX_TXQ_TYPES); 1192 tx_queue->flushed = FLUSH_DONE; 1193 } 1194 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && 1195 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { 1196 ev_queue = EFX_QWORD_FIELD( 1197 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1198 ev_failed = EFX_QWORD_FIELD( 1199 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1200 if (ev_queue < efx->n_rx_channels) { 1201 rx_queue = efx_get_rx_queue(efx, ev_queue); 1202 rx_queue->flushed = 1203 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1204 } 1205 } 1206 1207 /* We're about to destroy the queue anyway, so 1208 * it's ok to throw away every non-flush event */ 1209 EFX_SET_QWORD(*event); 1210 1211 ++read_ptr; 1212 } while (read_ptr != end_ptr); 1213 1214 channel->eventq_read_ptr = read_ptr; 1215 } 1216 1217 /* Handle tx and rx flushes at the same time, since they run in 1218 * parallel in the hardware and there's no reason for us to 1219 * serialise them */ 1220 int efx_nic_flush_queues(struct efx_nic *efx) 1221 { 1222 struct efx_channel *channel; 1223 struct efx_rx_queue *rx_queue; 1224 struct efx_tx_queue *tx_queue; 1225 int i, tx_pending, rx_pending; 1226 1227 /* If necessary prepare the hardware for flushing */ 1228 efx->type->prepare_flush(efx); 1229 1230 /* Flush all tx queues in parallel */ 1231 efx_for_each_channel(channel, efx) { 1232 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1233 if (tx_queue->initialised) 1234 efx_flush_tx_queue(tx_queue); 1235 } 1236 } 1237 1238 /* The hardware supports four concurrent rx flushes, each of which may 1239 * need to be retried if there is an outstanding descriptor fetch */ 1240 for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { 1241 rx_pending = tx_pending = 0; 1242 efx_for_each_channel(channel, efx) { 1243 efx_for_each_channel_rx_queue(rx_queue, channel) { 1244 if (rx_queue->flushed == FLUSH_PENDING) 1245 ++rx_pending; 1246 } 1247 } 1248 efx_for_each_channel(channel, efx) { 1249 efx_for_each_channel_rx_queue(rx_queue, channel) { 1250 if (rx_pending == EFX_RX_FLUSH_COUNT) 1251 break; 1252 if (rx_queue->flushed == FLUSH_FAILED || 1253 rx_queue->flushed == FLUSH_NONE) { 1254 efx_flush_rx_queue(rx_queue); 1255 ++rx_pending; 1256 } 1257 } 1258 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1259 if (tx_queue->initialised && 1260 tx_queue->flushed != FLUSH_DONE) 1261 ++tx_pending; 1262 } 1263 } 1264 1265 if (rx_pending == 0 && tx_pending == 0) 1266 return 0; 1267 1268 msleep(EFX_FLUSH_INTERVAL); 1269 efx_poll_flush_events(efx); 1270 } 1271 1272 /* Mark the queues as all flushed. We're going to return failure 1273 * leading to a reset, or fake up success anyway */ 1274 efx_for_each_channel(channel, efx) { 1275 efx_for_each_possible_channel_tx_queue(tx_queue, channel) { 1276 if (tx_queue->initialised && 1277 tx_queue->flushed != FLUSH_DONE) 1278 netif_err(efx, hw, efx->net_dev, 1279 "tx queue %d flush command timed out\n", 1280 tx_queue->queue); 1281 tx_queue->flushed = FLUSH_DONE; 1282 } 1283 efx_for_each_channel_rx_queue(rx_queue, channel) { 1284 if (rx_queue->flushed != FLUSH_DONE) 1285 netif_err(efx, hw, efx->net_dev, 1286 "rx queue %d flush command timed out\n", 1287 efx_rx_queue_index(rx_queue)); 1288 rx_queue->flushed = FLUSH_DONE; 1289 } 1290 } 1291 1292 return -ETIMEDOUT; 1293 } 1294 1295 /************************************************************************** 1296 * 1297 * Hardware interrupts 1298 * The hardware interrupt handler does very little work; all the event 1299 * queue processing is carried out by per-channel tasklets. 1300 * 1301 **************************************************************************/ 1302 1303 /* Enable/disable/generate interrupts */ 1304 static inline void efx_nic_interrupts(struct efx_nic *efx, 1305 bool enabled, bool force) 1306 { 1307 efx_oword_t int_en_reg_ker; 1308 1309 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1310 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1311 FRF_AZ_KER_INT_KER, force, 1312 FRF_AZ_DRV_INT_EN_KER, enabled); 1313 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1314 } 1315 1316 void efx_nic_enable_interrupts(struct efx_nic *efx) 1317 { 1318 struct efx_channel *channel; 1319 1320 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1321 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1322 1323 /* Enable interrupts */ 1324 efx_nic_interrupts(efx, true, false); 1325 1326 /* Force processing of all the channels to get the EVQ RPTRs up to 1327 date */ 1328 efx_for_each_channel(channel, efx) 1329 efx_schedule_channel(channel); 1330 } 1331 1332 void efx_nic_disable_interrupts(struct efx_nic *efx) 1333 { 1334 /* Disable interrupts */ 1335 efx_nic_interrupts(efx, false, false); 1336 } 1337 1338 /* Generate a test interrupt 1339 * Interrupt must already have been enabled, otherwise nasty things 1340 * may happen. 1341 */ 1342 void efx_nic_generate_interrupt(struct efx_nic *efx) 1343 { 1344 efx_nic_interrupts(efx, true, true); 1345 } 1346 1347 /* Process a fatal interrupt 1348 * Disable bus mastering ASAP and schedule a reset 1349 */ 1350 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1351 { 1352 struct falcon_nic_data *nic_data = efx->nic_data; 1353 efx_oword_t *int_ker = efx->irq_status.addr; 1354 efx_oword_t fatal_intr; 1355 int error, mem_perr; 1356 1357 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1358 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1359 1360 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1361 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1362 EFX_OWORD_VAL(fatal_intr), 1363 error ? "disabling bus mastering" : "no recognised error"); 1364 1365 /* If this is a memory parity error dump which blocks are offending */ 1366 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1367 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1368 if (mem_perr) { 1369 efx_oword_t reg; 1370 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1371 netif_err(efx, hw, efx->net_dev, 1372 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1373 EFX_OWORD_VAL(reg)); 1374 } 1375 1376 /* Disable both devices */ 1377 pci_clear_master(efx->pci_dev); 1378 if (efx_nic_is_dual_func(efx)) 1379 pci_clear_master(nic_data->pci_dev2); 1380 efx_nic_disable_interrupts(efx); 1381 1382 /* Count errors and reset or disable the NIC accordingly */ 1383 if (efx->int_error_count == 0 || 1384 time_after(jiffies, efx->int_error_expire)) { 1385 efx->int_error_count = 0; 1386 efx->int_error_expire = 1387 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1388 } 1389 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1390 netif_err(efx, hw, efx->net_dev, 1391 "SYSTEM ERROR - reset scheduled\n"); 1392 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1393 } else { 1394 netif_err(efx, hw, efx->net_dev, 1395 "SYSTEM ERROR - max number of errors seen." 1396 "NIC will be disabled\n"); 1397 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1398 } 1399 1400 return IRQ_HANDLED; 1401 } 1402 1403 /* Handle a legacy interrupt 1404 * Acknowledges the interrupt and schedule event queue processing. 1405 */ 1406 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1407 { 1408 struct efx_nic *efx = dev_id; 1409 efx_oword_t *int_ker = efx->irq_status.addr; 1410 irqreturn_t result = IRQ_NONE; 1411 struct efx_channel *channel; 1412 efx_dword_t reg; 1413 u32 queues; 1414 int syserr; 1415 1416 /* Could this be ours? If interrupts are disabled then the 1417 * channel state may not be valid. 1418 */ 1419 if (!efx->legacy_irq_enabled) 1420 return result; 1421 1422 /* Read the ISR which also ACKs the interrupts */ 1423 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1424 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1425 1426 /* Handle non-event-queue sources */ 1427 if (queues & (1U << efx->irq_level)) { 1428 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1429 if (unlikely(syserr)) 1430 return efx_nic_fatal_interrupt(efx); 1431 efx->last_irq_cpu = raw_smp_processor_id(); 1432 } 1433 1434 if (queues != 0) { 1435 if (EFX_WORKAROUND_15783(efx)) 1436 efx->irq_zero_count = 0; 1437 1438 /* Schedule processing of any interrupting queues */ 1439 efx_for_each_channel(channel, efx) { 1440 if (queues & 1) 1441 efx_schedule_channel_irq(channel); 1442 queues >>= 1; 1443 } 1444 result = IRQ_HANDLED; 1445 1446 } else if (EFX_WORKAROUND_15783(efx)) { 1447 efx_qword_t *event; 1448 1449 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1450 * because this might be a shared interrupt. */ 1451 if (efx->irq_zero_count++ == 0) 1452 result = IRQ_HANDLED; 1453 1454 /* Ensure we schedule or rearm all event queues */ 1455 efx_for_each_channel(channel, efx) { 1456 event = efx_event(channel, channel->eventq_read_ptr); 1457 if (efx_event_present(event)) 1458 efx_schedule_channel_irq(channel); 1459 else 1460 efx_nic_eventq_read_ack(channel); 1461 } 1462 } 1463 1464 if (result == IRQ_HANDLED) 1465 netif_vdbg(efx, intr, efx->net_dev, 1466 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1467 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1468 1469 return result; 1470 } 1471 1472 /* Handle an MSI interrupt 1473 * 1474 * Handle an MSI hardware interrupt. This routine schedules event 1475 * queue processing. No interrupt acknowledgement cycle is necessary. 1476 * Also, we never need to check that the interrupt is for us, since 1477 * MSI interrupts cannot be shared. 1478 */ 1479 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1480 { 1481 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1482 struct efx_nic *efx = channel->efx; 1483 efx_oword_t *int_ker = efx->irq_status.addr; 1484 int syserr; 1485 1486 netif_vdbg(efx, intr, efx->net_dev, 1487 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1488 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1489 1490 /* Handle non-event-queue sources */ 1491 if (channel->channel == efx->irq_level) { 1492 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1493 if (unlikely(syserr)) 1494 return efx_nic_fatal_interrupt(efx); 1495 efx->last_irq_cpu = raw_smp_processor_id(); 1496 } 1497 1498 /* Schedule processing of the channel */ 1499 efx_schedule_channel_irq(channel); 1500 1501 return IRQ_HANDLED; 1502 } 1503 1504 1505 /* Setup RSS indirection table. 1506 * This maps from the hash value of the packet to RXQ 1507 */ 1508 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1509 { 1510 size_t i = 0; 1511 efx_dword_t dword; 1512 1513 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1514 return; 1515 1516 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1517 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1518 1519 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1520 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1521 efx->rx_indir_table[i]); 1522 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1523 } 1524 } 1525 1526 /* Hook interrupt handler(s) 1527 * Try MSI and then legacy interrupts. 1528 */ 1529 int efx_nic_init_interrupt(struct efx_nic *efx) 1530 { 1531 struct efx_channel *channel; 1532 int rc; 1533 1534 if (!EFX_INT_MODE_USE_MSI(efx)) { 1535 irq_handler_t handler; 1536 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1537 handler = efx_legacy_interrupt; 1538 else 1539 handler = falcon_legacy_interrupt_a1; 1540 1541 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1542 efx->name, efx); 1543 if (rc) { 1544 netif_err(efx, drv, efx->net_dev, 1545 "failed to hook legacy IRQ %d\n", 1546 efx->pci_dev->irq); 1547 goto fail1; 1548 } 1549 return 0; 1550 } 1551 1552 /* Hook MSI or MSI-X interrupt */ 1553 efx_for_each_channel(channel, efx) { 1554 rc = request_irq(channel->irq, efx_msi_interrupt, 1555 IRQF_PROBE_SHARED, /* Not shared */ 1556 efx->channel_name[channel->channel], 1557 &efx->channel[channel->channel]); 1558 if (rc) { 1559 netif_err(efx, drv, efx->net_dev, 1560 "failed to hook IRQ %d\n", channel->irq); 1561 goto fail2; 1562 } 1563 } 1564 1565 return 0; 1566 1567 fail2: 1568 efx_for_each_channel(channel, efx) 1569 free_irq(channel->irq, &efx->channel[channel->channel]); 1570 fail1: 1571 return rc; 1572 } 1573 1574 void efx_nic_fini_interrupt(struct efx_nic *efx) 1575 { 1576 struct efx_channel *channel; 1577 efx_oword_t reg; 1578 1579 /* Disable MSI/MSI-X interrupts */ 1580 efx_for_each_channel(channel, efx) { 1581 if (channel->irq) 1582 free_irq(channel->irq, &efx->channel[channel->channel]); 1583 } 1584 1585 /* ACK legacy interrupt */ 1586 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1587 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1588 else 1589 falcon_irq_ack_a1(efx); 1590 1591 /* Disable legacy interrupt */ 1592 if (efx->legacy_irq) 1593 free_irq(efx->legacy_irq, efx); 1594 } 1595 1596 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1597 { 1598 efx_oword_t altera_build; 1599 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1600 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1601 } 1602 1603 void efx_nic_init_common(struct efx_nic *efx) 1604 { 1605 efx_oword_t temp; 1606 1607 /* Set positions of descriptor caches in SRAM. */ 1608 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, 1609 efx->type->tx_dc_base / 8); 1610 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1611 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, 1612 efx->type->rx_dc_base / 8); 1613 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1614 1615 /* Set TX descriptor cache size. */ 1616 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1617 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1618 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1619 1620 /* Set RX descriptor cache size. Set low watermark to size-8, as 1621 * this allows most efficient prefetching. 1622 */ 1623 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1624 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1625 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1626 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1627 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1628 1629 /* Program INT_KER address */ 1630 EFX_POPULATE_OWORD_2(temp, 1631 FRF_AZ_NORM_INT_VEC_DIS_KER, 1632 EFX_INT_MODE_USE_MSI(efx), 1633 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1634 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1635 1636 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1637 /* Use an interrupt level unused by event queues */ 1638 efx->irq_level = 0x1f; 1639 else 1640 /* Use a valid MSI-X vector */ 1641 efx->irq_level = 0; 1642 1643 /* Enable all the genuinely fatal interrupts. (They are still 1644 * masked by the overall interrupt mask, controlled by 1645 * falcon_interrupts()). 1646 * 1647 * Note: All other fatal interrupts are enabled 1648 */ 1649 EFX_POPULATE_OWORD_3(temp, 1650 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1651 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1652 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1653 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1654 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1655 EFX_INVERT_OWORD(temp); 1656 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1657 1658 efx_nic_push_rx_indir_table(efx); 1659 1660 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1661 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1662 */ 1663 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1664 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1665 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1666 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1667 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1668 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1669 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1671 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1673 /* Disable hardware watchdog which can misfire */ 1674 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1675 /* Squash TX of packets of 16 bytes or less */ 1676 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1677 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1678 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1679 1680 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1681 EFX_POPULATE_OWORD_4(temp, 1682 /* Default values */ 1683 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1684 FRF_BZ_TX_PACE_SB_AF, 0xb, 1685 FRF_BZ_TX_PACE_FB_BASE, 0, 1686 /* Allow large pace values in the 1687 * fast bin. */ 1688 FRF_BZ_TX_PACE_BIN_TH, 1689 FFE_BZ_TX_PACE_RESERVED); 1690 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1691 } 1692 } 1693 1694 /* Register dump */ 1695 1696 #define REGISTER_REVISION_A 1 1697 #define REGISTER_REVISION_B 2 1698 #define REGISTER_REVISION_C 3 1699 #define REGISTER_REVISION_Z 3 /* latest revision */ 1700 1701 struct efx_nic_reg { 1702 u32 offset:24; 1703 u32 min_revision:2, max_revision:2; 1704 }; 1705 1706 #define REGISTER(name, min_rev, max_rev) { \ 1707 FR_ ## min_rev ## max_rev ## _ ## name, \ 1708 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1709 } 1710 #define REGISTER_AA(name) REGISTER(name, A, A) 1711 #define REGISTER_AB(name) REGISTER(name, A, B) 1712 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1713 #define REGISTER_BB(name) REGISTER(name, B, B) 1714 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1715 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1716 1717 static const struct efx_nic_reg efx_nic_regs[] = { 1718 REGISTER_AZ(ADR_REGION), 1719 REGISTER_AZ(INT_EN_KER), 1720 REGISTER_BZ(INT_EN_CHAR), 1721 REGISTER_AZ(INT_ADR_KER), 1722 REGISTER_BZ(INT_ADR_CHAR), 1723 /* INT_ACK_KER is WO */ 1724 /* INT_ISR0 is RC */ 1725 REGISTER_AZ(HW_INIT), 1726 REGISTER_CZ(USR_EV_CFG), 1727 REGISTER_AB(EE_SPI_HCMD), 1728 REGISTER_AB(EE_SPI_HADR), 1729 REGISTER_AB(EE_SPI_HDATA), 1730 REGISTER_AB(EE_BASE_PAGE), 1731 REGISTER_AB(EE_VPD_CFG0), 1732 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1733 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1734 /* PCIE_CORE_INDIRECT is indirect */ 1735 REGISTER_AB(NIC_STAT), 1736 REGISTER_AB(GPIO_CTL), 1737 REGISTER_AB(GLB_CTL), 1738 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1739 REGISTER_BZ(DP_CTRL), 1740 REGISTER_AZ(MEM_STAT), 1741 REGISTER_AZ(CS_DEBUG), 1742 REGISTER_AZ(ALTERA_BUILD), 1743 REGISTER_AZ(CSR_SPARE), 1744 REGISTER_AB(PCIE_SD_CTL0123), 1745 REGISTER_AB(PCIE_SD_CTL45), 1746 REGISTER_AB(PCIE_PCS_CTL_STAT), 1747 /* DEBUG_DATA_OUT is not used */ 1748 /* DRV_EV is WO */ 1749 REGISTER_AZ(EVQ_CTL), 1750 REGISTER_AZ(EVQ_CNT1), 1751 REGISTER_AZ(EVQ_CNT2), 1752 REGISTER_AZ(BUF_TBL_CFG), 1753 REGISTER_AZ(SRM_RX_DC_CFG), 1754 REGISTER_AZ(SRM_TX_DC_CFG), 1755 REGISTER_AZ(SRM_CFG), 1756 /* BUF_TBL_UPD is WO */ 1757 REGISTER_AZ(SRM_UPD_EVQ), 1758 REGISTER_AZ(SRAM_PARITY), 1759 REGISTER_AZ(RX_CFG), 1760 REGISTER_BZ(RX_FILTER_CTL), 1761 /* RX_FLUSH_DESCQ is WO */ 1762 REGISTER_AZ(RX_DC_CFG), 1763 REGISTER_AZ(RX_DC_PF_WM), 1764 REGISTER_BZ(RX_RSS_TKEY), 1765 /* RX_NODESC_DROP is RC */ 1766 REGISTER_AA(RX_SELF_RST), 1767 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1768 REGISTER_CZ(RX_RSS_IPV6_REG1), 1769 REGISTER_CZ(RX_RSS_IPV6_REG2), 1770 REGISTER_CZ(RX_RSS_IPV6_REG3), 1771 /* TX_FLUSH_DESCQ is WO */ 1772 REGISTER_AZ(TX_DC_CFG), 1773 REGISTER_AA(TX_CHKSM_CFG), 1774 REGISTER_AZ(TX_CFG), 1775 /* TX_PUSH_DROP is not used */ 1776 REGISTER_AZ(TX_RESERVED), 1777 REGISTER_BZ(TX_PACE), 1778 /* TX_PACE_DROP_QID is RC */ 1779 REGISTER_BB(TX_VLAN), 1780 REGISTER_BZ(TX_IPFIL_PORTEN), 1781 REGISTER_AB(MD_TXD), 1782 REGISTER_AB(MD_RXD), 1783 REGISTER_AB(MD_CS), 1784 REGISTER_AB(MD_PHY_ADR), 1785 REGISTER_AB(MD_ID), 1786 /* MD_STAT is RC */ 1787 REGISTER_AB(MAC_STAT_DMA), 1788 REGISTER_AB(MAC_CTRL), 1789 REGISTER_BB(GEN_MODE), 1790 REGISTER_AB(MAC_MC_HASH_REG0), 1791 REGISTER_AB(MAC_MC_HASH_REG1), 1792 REGISTER_AB(GM_CFG1), 1793 REGISTER_AB(GM_CFG2), 1794 /* GM_IPG and GM_HD are not used */ 1795 REGISTER_AB(GM_MAX_FLEN), 1796 /* GM_TEST is not used */ 1797 REGISTER_AB(GM_ADR1), 1798 REGISTER_AB(GM_ADR2), 1799 REGISTER_AB(GMF_CFG0), 1800 REGISTER_AB(GMF_CFG1), 1801 REGISTER_AB(GMF_CFG2), 1802 REGISTER_AB(GMF_CFG3), 1803 REGISTER_AB(GMF_CFG4), 1804 REGISTER_AB(GMF_CFG5), 1805 REGISTER_BB(TX_SRC_MAC_CTL), 1806 REGISTER_AB(XM_ADR_LO), 1807 REGISTER_AB(XM_ADR_HI), 1808 REGISTER_AB(XM_GLB_CFG), 1809 REGISTER_AB(XM_TX_CFG), 1810 REGISTER_AB(XM_RX_CFG), 1811 REGISTER_AB(XM_MGT_INT_MASK), 1812 REGISTER_AB(XM_FC), 1813 REGISTER_AB(XM_PAUSE_TIME), 1814 REGISTER_AB(XM_TX_PARAM), 1815 REGISTER_AB(XM_RX_PARAM), 1816 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1817 REGISTER_AB(XX_PWR_RST), 1818 REGISTER_AB(XX_SD_CTL), 1819 REGISTER_AB(XX_TXDRV_CTL), 1820 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1821 /* XX_CORE_STAT is partly RC */ 1822 }; 1823 1824 struct efx_nic_reg_table { 1825 u32 offset:24; 1826 u32 min_revision:2, max_revision:2; 1827 u32 step:6, rows:21; 1828 }; 1829 1830 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1831 offset, \ 1832 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1833 step, rows \ 1834 } 1835 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1836 REGISTER_TABLE_DIMENSIONS( \ 1837 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1838 min_rev, max_rev, \ 1839 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1840 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1841 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1842 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1843 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1844 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1845 #define REGISTER_TABLE_BB_CZ(name) \ 1846 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1847 FR_BZ_ ## name ## _STEP, \ 1848 FR_BB_ ## name ## _ROWS), \ 1849 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1850 FR_BZ_ ## name ## _STEP, \ 1851 FR_CZ_ ## name ## _ROWS) 1852 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1853 1854 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1855 /* DRIVER is not used */ 1856 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1857 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1858 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1859 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1860 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1861 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1862 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1863 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1864 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1865 /* We can't reasonably read all of the buffer table (up to 8MB!). 1866 * However this driver will only use a few entries. Reading 1867 * 1K entries allows for some expansion of queue count and 1868 * size before we need to change the version. */ 1869 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1870 A, A, 8, 1024), 1871 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1872 B, Z, 8, 1024), 1873 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1874 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1875 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1876 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1877 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1878 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1879 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1880 /* MSIX_PBA_TABLE is not mapped */ 1881 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1882 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 1883 }; 1884 1885 size_t efx_nic_get_regs_len(struct efx_nic *efx) 1886 { 1887 const struct efx_nic_reg *reg; 1888 const struct efx_nic_reg_table *table; 1889 size_t len = 0; 1890 1891 for (reg = efx_nic_regs; 1892 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1893 reg++) 1894 if (efx->type->revision >= reg->min_revision && 1895 efx->type->revision <= reg->max_revision) 1896 len += sizeof(efx_oword_t); 1897 1898 for (table = efx_nic_reg_tables; 1899 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1900 table++) 1901 if (efx->type->revision >= table->min_revision && 1902 efx->type->revision <= table->max_revision) 1903 len += table->rows * min_t(size_t, table->step, 16); 1904 1905 return len; 1906 } 1907 1908 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 1909 { 1910 const struct efx_nic_reg *reg; 1911 const struct efx_nic_reg_table *table; 1912 1913 for (reg = efx_nic_regs; 1914 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1915 reg++) { 1916 if (efx->type->revision >= reg->min_revision && 1917 efx->type->revision <= reg->max_revision) { 1918 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 1919 buf += sizeof(efx_oword_t); 1920 } 1921 } 1922 1923 for (table = efx_nic_reg_tables; 1924 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1925 table++) { 1926 size_t size, i; 1927 1928 if (!(efx->type->revision >= table->min_revision && 1929 efx->type->revision <= table->max_revision)) 1930 continue; 1931 1932 size = min_t(size_t, table->step, 16); 1933 1934 for (i = 0; i < table->rows; i++) { 1935 switch (table->step) { 1936 case 4: /* 32-bit register or SRAM */ 1937 efx_readd_table(efx, buf, table->offset, i); 1938 break; 1939 case 8: /* 64-bit SRAM */ 1940 efx_sram_readq(efx, 1941 efx->membase + table->offset, 1942 buf, i); 1943 break; 1944 case 16: /* 128-bit register */ 1945 efx_reado_table(efx, buf, table->offset, i); 1946 break; 1947 case 32: /* 128-bit register, interleaved */ 1948 efx_reado_table(efx, buf, table->offset, 2 * i); 1949 break; 1950 default: 1951 WARN_ON(1); 1952 return; 1953 } 1954 buf += size; 1955 } 1956 } 1957 } 1958